Allow libcalls to be installed for legacy __sync optabs.
[gcc.git] / gcc / omp-low.c
1 /* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
5
6 Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011
7 Free Software Foundation, Inc.
8
9 This file is part of GCC.
10
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
14 version.
15
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
24
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "tree.h"
30 #include "rtl.h"
31 #include "gimple.h"
32 #include "tree-iterator.h"
33 #include "tree-inline.h"
34 #include "langhooks.h"
35 #include "diagnostic-core.h"
36 #include "tree-flow.h"
37 #include "timevar.h"
38 #include "flags.h"
39 #include "function.h"
40 #include "expr.h"
41 #include "tree-pass.h"
42 #include "ggc.h"
43 #include "except.h"
44 #include "splay-tree.h"
45 #include "optabs.h"
46 #include "cfgloop.h"
47
48
49 /* Lowering of OpenMP parallel and workshare constructs proceeds in two
50 phases. The first phase scans the function looking for OMP statements
51 and then for variables that must be replaced to satisfy data sharing
52 clauses. The second phase expands code for the constructs, as well as
53 re-gimplifying things when variables have been replaced with complex
54 expressions.
55
56 Final code generation is done by pass_expand_omp. The flowgraph is
57 scanned for parallel regions which are then moved to a new
58 function, to be invoked by the thread library. */
59
60 /* Context structure. Used to store information about each parallel
61 directive in the code. */
62
63 typedef struct omp_context
64 {
65 /* This field must be at the beginning, as we do "inheritance": Some
66 callback functions for tree-inline.c (e.g., omp_copy_decl)
67 receive a copy_body_data pointer that is up-casted to an
68 omp_context pointer. */
69 copy_body_data cb;
70
71 /* The tree of contexts corresponding to the encountered constructs. */
72 struct omp_context *outer;
73 gimple stmt;
74
75 /* Map variables to fields in a structure that allows communication
76 between sending and receiving threads. */
77 splay_tree field_map;
78 tree record_type;
79 tree sender_decl;
80 tree receiver_decl;
81
82 /* These are used just by task contexts, if task firstprivate fn is
83 needed. srecord_type is used to communicate from the thread
84 that encountered the task construct to task firstprivate fn,
85 record_type is allocated by GOMP_task, initialized by task firstprivate
86 fn and passed to the task body fn. */
87 splay_tree sfield_map;
88 tree srecord_type;
89
90 /* A chain of variables to add to the top-level block surrounding the
91 construct. In the case of a parallel, this is in the child function. */
92 tree block_vars;
93
94 /* What to do with variables with implicitly determined sharing
95 attributes. */
96 enum omp_clause_default_kind default_kind;
97
98 /* Nesting depth of this context. Used to beautify error messages re
99 invalid gotos. The outermost ctx is depth 1, with depth 0 being
100 reserved for the main body of the function. */
101 int depth;
102
103 /* True if this parallel directive is nested within another. */
104 bool is_nested;
105 } omp_context;
106
107
108 struct omp_for_data_loop
109 {
110 tree v, n1, n2, step;
111 enum tree_code cond_code;
112 };
113
114 /* A structure describing the main elements of a parallel loop. */
115
116 struct omp_for_data
117 {
118 struct omp_for_data_loop loop;
119 tree chunk_size;
120 gimple for_stmt;
121 tree pre, iter_type;
122 int collapse;
123 bool have_nowait, have_ordered;
124 enum omp_clause_schedule_kind sched_kind;
125 struct omp_for_data_loop *loops;
126 };
127
128
129 static splay_tree all_contexts;
130 static int taskreg_nesting_level;
131 struct omp_region *root_omp_region;
132 static bitmap task_shared_vars;
133
134 static void scan_omp (gimple_seq, omp_context *);
135 static tree scan_omp_1_op (tree *, int *, void *);
136
137 #define WALK_SUBSTMTS \
138 case GIMPLE_BIND: \
139 case GIMPLE_TRY: \
140 case GIMPLE_CATCH: \
141 case GIMPLE_EH_FILTER: \
142 /* The sub-statements for these should be walked. */ \
143 *handled_ops_p = false; \
144 break;
145
146 /* Convenience function for calling scan_omp_1_op on tree operands. */
147
148 static inline tree
149 scan_omp_op (tree *tp, omp_context *ctx)
150 {
151 struct walk_stmt_info wi;
152
153 memset (&wi, 0, sizeof (wi));
154 wi.info = ctx;
155 wi.want_locations = true;
156
157 return walk_tree (tp, scan_omp_1_op, &wi, NULL);
158 }
159
160 static void lower_omp (gimple_seq, omp_context *);
161 static tree lookup_decl_in_outer_ctx (tree, omp_context *);
162 static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
163
164 /* Find an OpenMP clause of type KIND within CLAUSES. */
165
166 tree
167 find_omp_clause (tree clauses, enum omp_clause_code kind)
168 {
169 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
170 if (OMP_CLAUSE_CODE (clauses) == kind)
171 return clauses;
172
173 return NULL_TREE;
174 }
175
176 /* Return true if CTX is for an omp parallel. */
177
178 static inline bool
179 is_parallel_ctx (omp_context *ctx)
180 {
181 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL;
182 }
183
184
185 /* Return true if CTX is for an omp task. */
186
187 static inline bool
188 is_task_ctx (omp_context *ctx)
189 {
190 return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
191 }
192
193
194 /* Return true if CTX is for an omp parallel or omp task. */
195
196 static inline bool
197 is_taskreg_ctx (omp_context *ctx)
198 {
199 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL
200 || gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
201 }
202
203
204 /* Return true if REGION is a combined parallel+workshare region. */
205
206 static inline bool
207 is_combined_parallel (struct omp_region *region)
208 {
209 return region->is_combined_parallel;
210 }
211
212
213 /* Extract the header elements of parallel loop FOR_STMT and store
214 them into *FD. */
215
216 static void
217 extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
218 struct omp_for_data_loop *loops)
219 {
220 tree t, var, *collapse_iter, *collapse_count;
221 tree count = NULL_TREE, iter_type = long_integer_type_node;
222 struct omp_for_data_loop *loop;
223 int i;
224 struct omp_for_data_loop dummy_loop;
225 location_t loc = gimple_location (for_stmt);
226
227 fd->for_stmt = for_stmt;
228 fd->pre = NULL;
229 fd->collapse = gimple_omp_for_collapse (for_stmt);
230 if (fd->collapse > 1)
231 fd->loops = loops;
232 else
233 fd->loops = &fd->loop;
234
235 fd->have_nowait = fd->have_ordered = false;
236 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
237 fd->chunk_size = NULL_TREE;
238 collapse_iter = NULL;
239 collapse_count = NULL;
240
241 for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
242 switch (OMP_CLAUSE_CODE (t))
243 {
244 case OMP_CLAUSE_NOWAIT:
245 fd->have_nowait = true;
246 break;
247 case OMP_CLAUSE_ORDERED:
248 fd->have_ordered = true;
249 break;
250 case OMP_CLAUSE_SCHEDULE:
251 fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
252 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
253 break;
254 case OMP_CLAUSE_COLLAPSE:
255 if (fd->collapse > 1)
256 {
257 collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
258 collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
259 }
260 default:
261 break;
262 }
263
264 /* FIXME: for now map schedule(auto) to schedule(static).
265 There should be analysis to determine whether all iterations
266 are approximately the same amount of work (then schedule(static)
267 is best) or if it varies (then schedule(dynamic,N) is better). */
268 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
269 {
270 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
271 gcc_assert (fd->chunk_size == NULL);
272 }
273 gcc_assert (fd->collapse == 1 || collapse_iter != NULL);
274 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
275 gcc_assert (fd->chunk_size == NULL);
276 else if (fd->chunk_size == NULL)
277 {
278 /* We only need to compute a default chunk size for ordered
279 static loops and dynamic loops. */
280 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
281 || fd->have_ordered
282 || fd->collapse > 1)
283 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
284 ? integer_zero_node : integer_one_node;
285 }
286
287 for (i = 0; i < fd->collapse; i++)
288 {
289 if (fd->collapse == 1)
290 loop = &fd->loop;
291 else if (loops != NULL)
292 loop = loops + i;
293 else
294 loop = &dummy_loop;
295
296
297 loop->v = gimple_omp_for_index (for_stmt, i);
298 gcc_assert (SSA_VAR_P (loop->v));
299 gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
300 || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
301 var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
302 loop->n1 = gimple_omp_for_initial (for_stmt, i);
303
304 loop->cond_code = gimple_omp_for_cond (for_stmt, i);
305 loop->n2 = gimple_omp_for_final (for_stmt, i);
306 switch (loop->cond_code)
307 {
308 case LT_EXPR:
309 case GT_EXPR:
310 break;
311 case LE_EXPR:
312 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
313 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, 1);
314 else
315 loop->n2 = fold_build2_loc (loc,
316 PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
317 build_int_cst (TREE_TYPE (loop->n2), 1));
318 loop->cond_code = LT_EXPR;
319 break;
320 case GE_EXPR:
321 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
322 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, -1);
323 else
324 loop->n2 = fold_build2_loc (loc,
325 MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
326 build_int_cst (TREE_TYPE (loop->n2), 1));
327 loop->cond_code = GT_EXPR;
328 break;
329 default:
330 gcc_unreachable ();
331 }
332
333 t = gimple_omp_for_incr (for_stmt, i);
334 gcc_assert (TREE_OPERAND (t, 0) == var);
335 switch (TREE_CODE (t))
336 {
337 case PLUS_EXPR:
338 case POINTER_PLUS_EXPR:
339 loop->step = TREE_OPERAND (t, 1);
340 break;
341 case MINUS_EXPR:
342 loop->step = TREE_OPERAND (t, 1);
343 loop->step = fold_build1_loc (loc,
344 NEGATE_EXPR, TREE_TYPE (loop->step),
345 loop->step);
346 break;
347 default:
348 gcc_unreachable ();
349 }
350
351 if (iter_type != long_long_unsigned_type_node)
352 {
353 if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
354 iter_type = long_long_unsigned_type_node;
355 else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
356 && TYPE_PRECISION (TREE_TYPE (loop->v))
357 >= TYPE_PRECISION (iter_type))
358 {
359 tree n;
360
361 if (loop->cond_code == LT_EXPR)
362 n = fold_build2_loc (loc,
363 PLUS_EXPR, TREE_TYPE (loop->v),
364 loop->n2, loop->step);
365 else
366 n = loop->n1;
367 if (TREE_CODE (n) != INTEGER_CST
368 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
369 iter_type = long_long_unsigned_type_node;
370 }
371 else if (TYPE_PRECISION (TREE_TYPE (loop->v))
372 > TYPE_PRECISION (iter_type))
373 {
374 tree n1, n2;
375
376 if (loop->cond_code == LT_EXPR)
377 {
378 n1 = loop->n1;
379 n2 = fold_build2_loc (loc,
380 PLUS_EXPR, TREE_TYPE (loop->v),
381 loop->n2, loop->step);
382 }
383 else
384 {
385 n1 = fold_build2_loc (loc,
386 MINUS_EXPR, TREE_TYPE (loop->v),
387 loop->n2, loop->step);
388 n2 = loop->n1;
389 }
390 if (TREE_CODE (n1) != INTEGER_CST
391 || TREE_CODE (n2) != INTEGER_CST
392 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
393 || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
394 iter_type = long_long_unsigned_type_node;
395 }
396 }
397
398 if (collapse_count && *collapse_count == NULL)
399 {
400 if ((i == 0 || count != NULL_TREE)
401 && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
402 && TREE_CONSTANT (loop->n1)
403 && TREE_CONSTANT (loop->n2)
404 && TREE_CODE (loop->step) == INTEGER_CST)
405 {
406 tree itype = TREE_TYPE (loop->v);
407
408 if (POINTER_TYPE_P (itype))
409 itype
410 = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
411 t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
412 t = fold_build2_loc (loc,
413 PLUS_EXPR, itype,
414 fold_convert_loc (loc, itype, loop->step), t);
415 t = fold_build2_loc (loc, PLUS_EXPR, itype, t,
416 fold_convert_loc (loc, itype, loop->n2));
417 t = fold_build2_loc (loc, MINUS_EXPR, itype, t,
418 fold_convert_loc (loc, itype, loop->n1));
419 if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
420 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype,
421 fold_build1_loc (loc, NEGATE_EXPR, itype, t),
422 fold_build1_loc (loc, NEGATE_EXPR, itype,
423 fold_convert_loc (loc, itype,
424 loop->step)));
425 else
426 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, t,
427 fold_convert_loc (loc, itype, loop->step));
428 t = fold_convert_loc (loc, long_long_unsigned_type_node, t);
429 if (count != NULL_TREE)
430 count = fold_build2_loc (loc,
431 MULT_EXPR, long_long_unsigned_type_node,
432 count, t);
433 else
434 count = t;
435 if (TREE_CODE (count) != INTEGER_CST)
436 count = NULL_TREE;
437 }
438 else
439 count = NULL_TREE;
440 }
441 }
442
443 if (count)
444 {
445 if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
446 iter_type = long_long_unsigned_type_node;
447 else
448 iter_type = long_integer_type_node;
449 }
450 else if (collapse_iter && *collapse_iter != NULL)
451 iter_type = TREE_TYPE (*collapse_iter);
452 fd->iter_type = iter_type;
453 if (collapse_iter && *collapse_iter == NULL)
454 *collapse_iter = create_tmp_var (iter_type, ".iter");
455 if (collapse_count && *collapse_count == NULL)
456 {
457 if (count)
458 *collapse_count = fold_convert_loc (loc, iter_type, count);
459 else
460 *collapse_count = create_tmp_var (iter_type, ".count");
461 }
462
463 if (fd->collapse > 1)
464 {
465 fd->loop.v = *collapse_iter;
466 fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
467 fd->loop.n2 = *collapse_count;
468 fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
469 fd->loop.cond_code = LT_EXPR;
470 }
471 }
472
473
474 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
475 is the immediate dominator of PAR_ENTRY_BB, return true if there
476 are no data dependencies that would prevent expanding the parallel
477 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
478
479 When expanding a combined parallel+workshare region, the call to
480 the child function may need additional arguments in the case of
481 GIMPLE_OMP_FOR regions. In some cases, these arguments are
482 computed out of variables passed in from the parent to the child
483 via 'struct .omp_data_s'. For instance:
484
485 #pragma omp parallel for schedule (guided, i * 4)
486 for (j ...)
487
488 Is lowered into:
489
490 # BLOCK 2 (PAR_ENTRY_BB)
491 .omp_data_o.i = i;
492 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
493
494 # BLOCK 3 (WS_ENTRY_BB)
495 .omp_data_i = &.omp_data_o;
496 D.1667 = .omp_data_i->i;
497 D.1598 = D.1667 * 4;
498 #pragma omp for schedule (guided, D.1598)
499
500 When we outline the parallel region, the call to the child function
501 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
502 that value is computed *after* the call site. So, in principle we
503 cannot do the transformation.
504
505 To see whether the code in WS_ENTRY_BB blocks the combined
506 parallel+workshare call, we collect all the variables used in the
507 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
508 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
509 call.
510
511 FIXME. If we had the SSA form built at this point, we could merely
512 hoist the code in block 3 into block 2 and be done with it. But at
513 this point we don't have dataflow information and though we could
514 hack something up here, it is really not worth the aggravation. */
515
516 static bool
517 workshare_safe_to_combine_p (basic_block ws_entry_bb)
518 {
519 struct omp_for_data fd;
520 gimple ws_stmt = last_stmt (ws_entry_bb);
521
522 if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
523 return true;
524
525 gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR);
526
527 extract_omp_for_data (ws_stmt, &fd, NULL);
528
529 if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
530 return false;
531 if (fd.iter_type != long_integer_type_node)
532 return false;
533
534 /* FIXME. We give up too easily here. If any of these arguments
535 are not constants, they will likely involve variables that have
536 been mapped into fields of .omp_data_s for sharing with the child
537 function. With appropriate data flow, it would be possible to
538 see through this. */
539 if (!is_gimple_min_invariant (fd.loop.n1)
540 || !is_gimple_min_invariant (fd.loop.n2)
541 || !is_gimple_min_invariant (fd.loop.step)
542 || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
543 return false;
544
545 return true;
546 }
547
548
549 /* Collect additional arguments needed to emit a combined
550 parallel+workshare call. WS_STMT is the workshare directive being
551 expanded. */
552
553 static VEC(tree,gc) *
554 get_ws_args_for (gimple ws_stmt)
555 {
556 tree t;
557 location_t loc = gimple_location (ws_stmt);
558 VEC(tree,gc) *ws_args;
559
560 if (gimple_code (ws_stmt) == GIMPLE_OMP_FOR)
561 {
562 struct omp_for_data fd;
563
564 extract_omp_for_data (ws_stmt, &fd, NULL);
565
566 ws_args = VEC_alloc (tree, gc, 3 + (fd.chunk_size != 0));
567
568 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n1);
569 VEC_quick_push (tree, ws_args, t);
570
571 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n2);
572 VEC_quick_push (tree, ws_args, t);
573
574 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.step);
575 VEC_quick_push (tree, ws_args, t);
576
577 if (fd.chunk_size)
578 {
579 t = fold_convert_loc (loc, long_integer_type_node, fd.chunk_size);
580 VEC_quick_push (tree, ws_args, t);
581 }
582
583 return ws_args;
584 }
585 else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
586 {
587 /* Number of sections is equal to the number of edges from the
588 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
589 the exit of the sections region. */
590 basic_block bb = single_succ (gimple_bb (ws_stmt));
591 t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
592 ws_args = VEC_alloc (tree, gc, 1);
593 VEC_quick_push (tree, ws_args, t);
594 return ws_args;
595 }
596
597 gcc_unreachable ();
598 }
599
600
601 /* Discover whether REGION is a combined parallel+workshare region. */
602
603 static void
604 determine_parallel_type (struct omp_region *region)
605 {
606 basic_block par_entry_bb, par_exit_bb;
607 basic_block ws_entry_bb, ws_exit_bb;
608
609 if (region == NULL || region->inner == NULL
610 || region->exit == NULL || region->inner->exit == NULL
611 || region->inner->cont == NULL)
612 return;
613
614 /* We only support parallel+for and parallel+sections. */
615 if (region->type != GIMPLE_OMP_PARALLEL
616 || (region->inner->type != GIMPLE_OMP_FOR
617 && region->inner->type != GIMPLE_OMP_SECTIONS))
618 return;
619
620 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
621 WS_EXIT_BB -> PAR_EXIT_BB. */
622 par_entry_bb = region->entry;
623 par_exit_bb = region->exit;
624 ws_entry_bb = region->inner->entry;
625 ws_exit_bb = region->inner->exit;
626
627 if (single_succ (par_entry_bb) == ws_entry_bb
628 && single_succ (ws_exit_bb) == par_exit_bb
629 && workshare_safe_to_combine_p (ws_entry_bb)
630 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb))
631 || (last_and_only_stmt (ws_entry_bb)
632 && last_and_only_stmt (par_exit_bb))))
633 {
634 gimple ws_stmt = last_stmt (ws_entry_bb);
635
636 if (region->inner->type == GIMPLE_OMP_FOR)
637 {
638 /* If this is a combined parallel loop, we need to determine
639 whether or not to use the combined library calls. There
640 are two cases where we do not apply the transformation:
641 static loops and any kind of ordered loop. In the first
642 case, we already open code the loop so there is no need
643 to do anything else. In the latter case, the combined
644 parallel loop call would still need extra synchronization
645 to implement ordered semantics, so there would not be any
646 gain in using the combined call. */
647 tree clauses = gimple_omp_for_clauses (ws_stmt);
648 tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
649 if (c == NULL
650 || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC
651 || find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
652 {
653 region->is_combined_parallel = false;
654 region->inner->is_combined_parallel = false;
655 return;
656 }
657 }
658
659 region->is_combined_parallel = true;
660 region->inner->is_combined_parallel = true;
661 region->ws_args = get_ws_args_for (ws_stmt);
662 }
663 }
664
665
666 /* Return true if EXPR is variable sized. */
667
668 static inline bool
669 is_variable_sized (const_tree expr)
670 {
671 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
672 }
673
674 /* Return true if DECL is a reference type. */
675
676 static inline bool
677 is_reference (tree decl)
678 {
679 return lang_hooks.decls.omp_privatize_by_reference (decl);
680 }
681
682 /* Lookup variables in the decl or field splay trees. The "maybe" form
683 allows for the variable form to not have been entered, otherwise we
684 assert that the variable must have been entered. */
685
686 static inline tree
687 lookup_decl (tree var, omp_context *ctx)
688 {
689 tree *n;
690 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
691 return *n;
692 }
693
694 static inline tree
695 maybe_lookup_decl (const_tree var, omp_context *ctx)
696 {
697 tree *n;
698 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
699 return n ? *n : NULL_TREE;
700 }
701
702 static inline tree
703 lookup_field (tree var, omp_context *ctx)
704 {
705 splay_tree_node n;
706 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
707 return (tree) n->value;
708 }
709
710 static inline tree
711 lookup_sfield (tree var, omp_context *ctx)
712 {
713 splay_tree_node n;
714 n = splay_tree_lookup (ctx->sfield_map
715 ? ctx->sfield_map : ctx->field_map,
716 (splay_tree_key) var);
717 return (tree) n->value;
718 }
719
720 static inline tree
721 maybe_lookup_field (tree var, omp_context *ctx)
722 {
723 splay_tree_node n;
724 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
725 return n ? (tree) n->value : NULL_TREE;
726 }
727
728 /* Return true if DECL should be copied by pointer. SHARED_CTX is
729 the parallel context if DECL is to be shared. */
730
731 static bool
732 use_pointer_for_field (tree decl, omp_context *shared_ctx)
733 {
734 if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
735 return true;
736
737 /* We can only use copy-in/copy-out semantics for shared variables
738 when we know the value is not accessible from an outer scope. */
739 if (shared_ctx)
740 {
741 /* ??? Trivially accessible from anywhere. But why would we even
742 be passing an address in this case? Should we simply assert
743 this to be false, or should we have a cleanup pass that removes
744 these from the list of mappings? */
745 if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
746 return true;
747
748 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
749 without analyzing the expression whether or not its location
750 is accessible to anyone else. In the case of nested parallel
751 regions it certainly may be. */
752 if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
753 return true;
754
755 /* Do not use copy-in/copy-out for variables that have their
756 address taken. */
757 if (TREE_ADDRESSABLE (decl))
758 return true;
759
760 /* Disallow copy-in/out in nested parallel if
761 decl is shared in outer parallel, otherwise
762 each thread could store the shared variable
763 in its own copy-in location, making the
764 variable no longer really shared. */
765 if (!TREE_READONLY (decl) && shared_ctx->is_nested)
766 {
767 omp_context *up;
768
769 for (up = shared_ctx->outer; up; up = up->outer)
770 if (is_taskreg_ctx (up) && maybe_lookup_decl (decl, up))
771 break;
772
773 if (up)
774 {
775 tree c;
776
777 for (c = gimple_omp_taskreg_clauses (up->stmt);
778 c; c = OMP_CLAUSE_CHAIN (c))
779 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
780 && OMP_CLAUSE_DECL (c) == decl)
781 break;
782
783 if (c)
784 goto maybe_mark_addressable_and_ret;
785 }
786 }
787
788 /* For tasks avoid using copy-in/out, unless they are readonly
789 (in which case just copy-in is used). As tasks can be
790 deferred or executed in different thread, when GOMP_task
791 returns, the task hasn't necessarily terminated. */
792 if (!TREE_READONLY (decl) && is_task_ctx (shared_ctx))
793 {
794 tree outer;
795 maybe_mark_addressable_and_ret:
796 outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
797 if (is_gimple_reg (outer))
798 {
799 /* Taking address of OUTER in lower_send_shared_vars
800 might need regimplification of everything that uses the
801 variable. */
802 if (!task_shared_vars)
803 task_shared_vars = BITMAP_ALLOC (NULL);
804 bitmap_set_bit (task_shared_vars, DECL_UID (outer));
805 TREE_ADDRESSABLE (outer) = 1;
806 }
807 return true;
808 }
809 }
810
811 return false;
812 }
813
814 /* Create a new VAR_DECL and copy information from VAR to it. */
815
816 tree
817 copy_var_decl (tree var, tree name, tree type)
818 {
819 tree copy = build_decl (DECL_SOURCE_LOCATION (var), VAR_DECL, name, type);
820
821 TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (var);
822 TREE_THIS_VOLATILE (copy) = TREE_THIS_VOLATILE (var);
823 DECL_GIMPLE_REG_P (copy) = DECL_GIMPLE_REG_P (var);
824 DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (var);
825 DECL_IGNORED_P (copy) = DECL_IGNORED_P (var);
826 DECL_CONTEXT (copy) = DECL_CONTEXT (var);
827 TREE_USED (copy) = 1;
828 DECL_SEEN_IN_BIND_EXPR_P (copy) = 1;
829
830 return copy;
831 }
832
833 /* Construct a new automatic decl similar to VAR. */
834
835 static tree
836 omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
837 {
838 tree copy = copy_var_decl (var, name, type);
839
840 DECL_CONTEXT (copy) = current_function_decl;
841 DECL_CHAIN (copy) = ctx->block_vars;
842 ctx->block_vars = copy;
843
844 return copy;
845 }
846
847 static tree
848 omp_copy_decl_1 (tree var, omp_context *ctx)
849 {
850 return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
851 }
852
853 /* Build tree nodes to access the field for VAR on the receiver side. */
854
855 static tree
856 build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
857 {
858 tree x, field = lookup_field (var, ctx);
859
860 /* If the receiver record type was remapped in the child function,
861 remap the field into the new record type. */
862 x = maybe_lookup_field (field, ctx);
863 if (x != NULL)
864 field = x;
865
866 x = build_simple_mem_ref (ctx->receiver_decl);
867 x = build3 (COMPONENT_REF, TREE_TYPE (field), x, field, NULL);
868 if (by_ref)
869 x = build_simple_mem_ref (x);
870
871 return x;
872 }
873
874 /* Build tree nodes to access VAR in the scope outer to CTX. In the case
875 of a parallel, this is a component reference; for workshare constructs
876 this is some variable. */
877
878 static tree
879 build_outer_var_ref (tree var, omp_context *ctx)
880 {
881 tree x;
882
883 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
884 x = var;
885 else if (is_variable_sized (var))
886 {
887 x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
888 x = build_outer_var_ref (x, ctx);
889 x = build_simple_mem_ref (x);
890 }
891 else if (is_taskreg_ctx (ctx))
892 {
893 bool by_ref = use_pointer_for_field (var, NULL);
894 x = build_receiver_ref (var, by_ref, ctx);
895 }
896 else if (ctx->outer)
897 x = lookup_decl (var, ctx->outer);
898 else if (is_reference (var))
899 /* This can happen with orphaned constructs. If var is reference, it is
900 possible it is shared and as such valid. */
901 x = var;
902 else
903 gcc_unreachable ();
904
905 if (is_reference (var))
906 x = build_simple_mem_ref (x);
907
908 return x;
909 }
910
911 /* Build tree nodes to access the field for VAR on the sender side. */
912
913 static tree
914 build_sender_ref (tree var, omp_context *ctx)
915 {
916 tree field = lookup_sfield (var, ctx);
917 return build3 (COMPONENT_REF, TREE_TYPE (field),
918 ctx->sender_decl, field, NULL);
919 }
920
921 /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
922
923 static void
924 install_var_field (tree var, bool by_ref, int mask, omp_context *ctx)
925 {
926 tree field, type, sfield = NULL_TREE;
927
928 gcc_assert ((mask & 1) == 0
929 || !splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
930 gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
931 || !splay_tree_lookup (ctx->sfield_map, (splay_tree_key) var));
932
933 type = TREE_TYPE (var);
934 if (by_ref)
935 type = build_pointer_type (type);
936 else if ((mask & 3) == 1 && is_reference (var))
937 type = TREE_TYPE (type);
938
939 field = build_decl (DECL_SOURCE_LOCATION (var),
940 FIELD_DECL, DECL_NAME (var), type);
941
942 /* Remember what variable this field was created for. This does have a
943 side effect of making dwarf2out ignore this member, so for helpful
944 debugging we clear it later in delete_omp_context. */
945 DECL_ABSTRACT_ORIGIN (field) = var;
946 if (type == TREE_TYPE (var))
947 {
948 DECL_ALIGN (field) = DECL_ALIGN (var);
949 DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
950 TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
951 }
952 else
953 DECL_ALIGN (field) = TYPE_ALIGN (type);
954
955 if ((mask & 3) == 3)
956 {
957 insert_field_into_struct (ctx->record_type, field);
958 if (ctx->srecord_type)
959 {
960 sfield = build_decl (DECL_SOURCE_LOCATION (var),
961 FIELD_DECL, DECL_NAME (var), type);
962 DECL_ABSTRACT_ORIGIN (sfield) = var;
963 DECL_ALIGN (sfield) = DECL_ALIGN (field);
964 DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
965 TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
966 insert_field_into_struct (ctx->srecord_type, sfield);
967 }
968 }
969 else
970 {
971 if (ctx->srecord_type == NULL_TREE)
972 {
973 tree t;
974
975 ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
976 ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
977 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
978 {
979 sfield = build_decl (DECL_SOURCE_LOCATION (var),
980 FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
981 DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
982 insert_field_into_struct (ctx->srecord_type, sfield);
983 splay_tree_insert (ctx->sfield_map,
984 (splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
985 (splay_tree_value) sfield);
986 }
987 }
988 sfield = field;
989 insert_field_into_struct ((mask & 1) ? ctx->record_type
990 : ctx->srecord_type, field);
991 }
992
993 if (mask & 1)
994 splay_tree_insert (ctx->field_map, (splay_tree_key) var,
995 (splay_tree_value) field);
996 if ((mask & 2) && ctx->sfield_map)
997 splay_tree_insert (ctx->sfield_map, (splay_tree_key) var,
998 (splay_tree_value) sfield);
999 }
1000
1001 static tree
1002 install_var_local (tree var, omp_context *ctx)
1003 {
1004 tree new_var = omp_copy_decl_1 (var, ctx);
1005 insert_decl_map (&ctx->cb, var, new_var);
1006 return new_var;
1007 }
1008
1009 /* Adjust the replacement for DECL in CTX for the new context. This means
1010 copying the DECL_VALUE_EXPR, and fixing up the type. */
1011
1012 static void
1013 fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
1014 {
1015 tree new_decl, size;
1016
1017 new_decl = lookup_decl (decl, ctx);
1018
1019 TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
1020
1021 if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
1022 && DECL_HAS_VALUE_EXPR_P (decl))
1023 {
1024 tree ve = DECL_VALUE_EXPR (decl);
1025 walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL);
1026 SET_DECL_VALUE_EXPR (new_decl, ve);
1027 DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
1028 }
1029
1030 if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
1031 {
1032 size = remap_decl (DECL_SIZE (decl), &ctx->cb);
1033 if (size == error_mark_node)
1034 size = TYPE_SIZE (TREE_TYPE (new_decl));
1035 DECL_SIZE (new_decl) = size;
1036
1037 size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
1038 if (size == error_mark_node)
1039 size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
1040 DECL_SIZE_UNIT (new_decl) = size;
1041 }
1042 }
1043
1044 /* The callback for remap_decl. Search all containing contexts for a
1045 mapping of the variable; this avoids having to duplicate the splay
1046 tree ahead of time. We know a mapping doesn't already exist in the
1047 given context. Create new mappings to implement default semantics. */
1048
1049 static tree
1050 omp_copy_decl (tree var, copy_body_data *cb)
1051 {
1052 omp_context *ctx = (omp_context *) cb;
1053 tree new_var;
1054
1055 if (TREE_CODE (var) == LABEL_DECL)
1056 {
1057 new_var = create_artificial_label (DECL_SOURCE_LOCATION (var));
1058 DECL_CONTEXT (new_var) = current_function_decl;
1059 insert_decl_map (&ctx->cb, var, new_var);
1060 return new_var;
1061 }
1062
1063 while (!is_taskreg_ctx (ctx))
1064 {
1065 ctx = ctx->outer;
1066 if (ctx == NULL)
1067 return var;
1068 new_var = maybe_lookup_decl (var, ctx);
1069 if (new_var)
1070 return new_var;
1071 }
1072
1073 if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
1074 return var;
1075
1076 return error_mark_node;
1077 }
1078
1079
1080 /* Return the parallel region associated with STMT. */
1081
1082 /* Debugging dumps for parallel regions. */
1083 void dump_omp_region (FILE *, struct omp_region *, int);
1084 void debug_omp_region (struct omp_region *);
1085 void debug_all_omp_regions (void);
1086
1087 /* Dump the parallel region tree rooted at REGION. */
1088
1089 void
1090 dump_omp_region (FILE *file, struct omp_region *region, int indent)
1091 {
1092 fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
1093 gimple_code_name[region->type]);
1094
1095 if (region->inner)
1096 dump_omp_region (file, region->inner, indent + 4);
1097
1098 if (region->cont)
1099 {
1100 fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
1101 region->cont->index);
1102 }
1103
1104 if (region->exit)
1105 fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
1106 region->exit->index);
1107 else
1108 fprintf (file, "%*s[no exit marker]\n", indent, "");
1109
1110 if (region->next)
1111 dump_omp_region (file, region->next, indent);
1112 }
1113
1114 DEBUG_FUNCTION void
1115 debug_omp_region (struct omp_region *region)
1116 {
1117 dump_omp_region (stderr, region, 0);
1118 }
1119
1120 DEBUG_FUNCTION void
1121 debug_all_omp_regions (void)
1122 {
1123 dump_omp_region (stderr, root_omp_region, 0);
1124 }
1125
1126
1127 /* Create a new parallel region starting at STMT inside region PARENT. */
1128
1129 struct omp_region *
1130 new_omp_region (basic_block bb, enum gimple_code type,
1131 struct omp_region *parent)
1132 {
1133 struct omp_region *region = XCNEW (struct omp_region);
1134
1135 region->outer = parent;
1136 region->entry = bb;
1137 region->type = type;
1138
1139 if (parent)
1140 {
1141 /* This is a nested region. Add it to the list of inner
1142 regions in PARENT. */
1143 region->next = parent->inner;
1144 parent->inner = region;
1145 }
1146 else
1147 {
1148 /* This is a toplevel region. Add it to the list of toplevel
1149 regions in ROOT_OMP_REGION. */
1150 region->next = root_omp_region;
1151 root_omp_region = region;
1152 }
1153
1154 return region;
1155 }
1156
1157 /* Release the memory associated with the region tree rooted at REGION. */
1158
1159 static void
1160 free_omp_region_1 (struct omp_region *region)
1161 {
1162 struct omp_region *i, *n;
1163
1164 for (i = region->inner; i ; i = n)
1165 {
1166 n = i->next;
1167 free_omp_region_1 (i);
1168 }
1169
1170 free (region);
1171 }
1172
1173 /* Release the memory for the entire omp region tree. */
1174
1175 void
1176 free_omp_regions (void)
1177 {
1178 struct omp_region *r, *n;
1179 for (r = root_omp_region; r ; r = n)
1180 {
1181 n = r->next;
1182 free_omp_region_1 (r);
1183 }
1184 root_omp_region = NULL;
1185 }
1186
1187
1188 /* Create a new context, with OUTER_CTX being the surrounding context. */
1189
1190 static omp_context *
1191 new_omp_context (gimple stmt, omp_context *outer_ctx)
1192 {
1193 omp_context *ctx = XCNEW (omp_context);
1194
1195 splay_tree_insert (all_contexts, (splay_tree_key) stmt,
1196 (splay_tree_value) ctx);
1197 ctx->stmt = stmt;
1198
1199 if (outer_ctx)
1200 {
1201 ctx->outer = outer_ctx;
1202 ctx->cb = outer_ctx->cb;
1203 ctx->cb.block = NULL;
1204 ctx->depth = outer_ctx->depth + 1;
1205 }
1206 else
1207 {
1208 ctx->cb.src_fn = current_function_decl;
1209 ctx->cb.dst_fn = current_function_decl;
1210 ctx->cb.src_node = cgraph_get_node (current_function_decl);
1211 gcc_checking_assert (ctx->cb.src_node);
1212 ctx->cb.dst_node = ctx->cb.src_node;
1213 ctx->cb.src_cfun = cfun;
1214 ctx->cb.copy_decl = omp_copy_decl;
1215 ctx->cb.eh_lp_nr = 0;
1216 ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
1217 ctx->depth = 1;
1218 }
1219
1220 ctx->cb.decl_map = pointer_map_create ();
1221
1222 return ctx;
1223 }
1224
1225 static gimple_seq maybe_catch_exception (gimple_seq);
1226
1227 /* Finalize task copyfn. */
1228
1229 static void
1230 finalize_task_copyfn (gimple task_stmt)
1231 {
1232 struct function *child_cfun;
1233 tree child_fn, old_fn;
1234 gimple_seq seq, new_seq;
1235 gimple bind;
1236
1237 child_fn = gimple_omp_task_copy_fn (task_stmt);
1238 if (child_fn == NULL_TREE)
1239 return;
1240
1241 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
1242
1243 /* Inform the callgraph about the new function. */
1244 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
1245 = cfun->curr_properties;
1246
1247 old_fn = current_function_decl;
1248 push_cfun (child_cfun);
1249 current_function_decl = child_fn;
1250 bind = gimplify_body (&DECL_SAVED_TREE (child_fn), child_fn, false);
1251 seq = gimple_seq_alloc ();
1252 gimple_seq_add_stmt (&seq, bind);
1253 new_seq = maybe_catch_exception (seq);
1254 if (new_seq != seq)
1255 {
1256 bind = gimple_build_bind (NULL, new_seq, NULL);
1257 seq = gimple_seq_alloc ();
1258 gimple_seq_add_stmt (&seq, bind);
1259 }
1260 gimple_set_body (child_fn, seq);
1261 pop_cfun ();
1262 current_function_decl = old_fn;
1263
1264 cgraph_add_new_function (child_fn, false);
1265 }
1266
1267 /* Destroy a omp_context data structures. Called through the splay tree
1268 value delete callback. */
1269
1270 static void
1271 delete_omp_context (splay_tree_value value)
1272 {
1273 omp_context *ctx = (omp_context *) value;
1274
1275 pointer_map_destroy (ctx->cb.decl_map);
1276
1277 if (ctx->field_map)
1278 splay_tree_delete (ctx->field_map);
1279 if (ctx->sfield_map)
1280 splay_tree_delete (ctx->sfield_map);
1281
1282 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1283 it produces corrupt debug information. */
1284 if (ctx->record_type)
1285 {
1286 tree t;
1287 for (t = TYPE_FIELDS (ctx->record_type); t ; t = DECL_CHAIN (t))
1288 DECL_ABSTRACT_ORIGIN (t) = NULL;
1289 }
1290 if (ctx->srecord_type)
1291 {
1292 tree t;
1293 for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = DECL_CHAIN (t))
1294 DECL_ABSTRACT_ORIGIN (t) = NULL;
1295 }
1296
1297 if (is_task_ctx (ctx))
1298 finalize_task_copyfn (ctx->stmt);
1299
1300 XDELETE (ctx);
1301 }
1302
1303 /* Fix up RECEIVER_DECL with a type that has been remapped to the child
1304 context. */
1305
1306 static void
1307 fixup_child_record_type (omp_context *ctx)
1308 {
1309 tree f, type = ctx->record_type;
1310
1311 /* ??? It isn't sufficient to just call remap_type here, because
1312 variably_modified_type_p doesn't work the way we expect for
1313 record types. Testing each field for whether it needs remapping
1314 and creating a new record by hand works, however. */
1315 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
1316 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
1317 break;
1318 if (f)
1319 {
1320 tree name, new_fields = NULL;
1321
1322 type = lang_hooks.types.make_type (RECORD_TYPE);
1323 name = DECL_NAME (TYPE_NAME (ctx->record_type));
1324 name = build_decl (DECL_SOURCE_LOCATION (ctx->receiver_decl),
1325 TYPE_DECL, name, type);
1326 TYPE_NAME (type) = name;
1327
1328 for (f = TYPE_FIELDS (ctx->record_type); f ; f = DECL_CHAIN (f))
1329 {
1330 tree new_f = copy_node (f);
1331 DECL_CONTEXT (new_f) = type;
1332 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
1333 DECL_CHAIN (new_f) = new_fields;
1334 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
1335 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
1336 &ctx->cb, NULL);
1337 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
1338 &ctx->cb, NULL);
1339 new_fields = new_f;
1340
1341 /* Arrange to be able to look up the receiver field
1342 given the sender field. */
1343 splay_tree_insert (ctx->field_map, (splay_tree_key) f,
1344 (splay_tree_value) new_f);
1345 }
1346 TYPE_FIELDS (type) = nreverse (new_fields);
1347 layout_type (type);
1348 }
1349
1350 TREE_TYPE (ctx->receiver_decl) = build_pointer_type (type);
1351 }
1352
1353 /* Instantiate decls as necessary in CTX to satisfy the data sharing
1354 specified by CLAUSES. */
1355
1356 static void
1357 scan_sharing_clauses (tree clauses, omp_context *ctx)
1358 {
1359 tree c, decl;
1360 bool scan_array_reductions = false;
1361
1362 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1363 {
1364 bool by_ref;
1365
1366 switch (OMP_CLAUSE_CODE (c))
1367 {
1368 case OMP_CLAUSE_PRIVATE:
1369 decl = OMP_CLAUSE_DECL (c);
1370 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
1371 goto do_private;
1372 else if (!is_variable_sized (decl))
1373 install_var_local (decl, ctx);
1374 break;
1375
1376 case OMP_CLAUSE_SHARED:
1377 gcc_assert (is_taskreg_ctx (ctx));
1378 decl = OMP_CLAUSE_DECL (c);
1379 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
1380 || !is_variable_sized (decl));
1381 /* Global variables don't need to be copied,
1382 the receiver side will use them directly. */
1383 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1384 break;
1385 by_ref = use_pointer_for_field (decl, ctx);
1386 if (! TREE_READONLY (decl)
1387 || TREE_ADDRESSABLE (decl)
1388 || by_ref
1389 || is_reference (decl))
1390 {
1391 install_var_field (decl, by_ref, 3, ctx);
1392 install_var_local (decl, ctx);
1393 break;
1394 }
1395 /* We don't need to copy const scalar vars back. */
1396 OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
1397 goto do_private;
1398
1399 case OMP_CLAUSE_LASTPRIVATE:
1400 /* Let the corresponding firstprivate clause create
1401 the variable. */
1402 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1403 break;
1404 /* FALLTHRU */
1405
1406 case OMP_CLAUSE_FIRSTPRIVATE:
1407 case OMP_CLAUSE_REDUCTION:
1408 decl = OMP_CLAUSE_DECL (c);
1409 do_private:
1410 if (is_variable_sized (decl))
1411 {
1412 if (is_task_ctx (ctx))
1413 install_var_field (decl, false, 1, ctx);
1414 break;
1415 }
1416 else if (is_taskreg_ctx (ctx))
1417 {
1418 bool global
1419 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
1420 by_ref = use_pointer_for_field (decl, NULL);
1421
1422 if (is_task_ctx (ctx)
1423 && (global || by_ref || is_reference (decl)))
1424 {
1425 install_var_field (decl, false, 1, ctx);
1426 if (!global)
1427 install_var_field (decl, by_ref, 2, ctx);
1428 }
1429 else if (!global)
1430 install_var_field (decl, by_ref, 3, ctx);
1431 }
1432 install_var_local (decl, ctx);
1433 break;
1434
1435 case OMP_CLAUSE_COPYPRIVATE:
1436 case OMP_CLAUSE_COPYIN:
1437 decl = OMP_CLAUSE_DECL (c);
1438 by_ref = use_pointer_for_field (decl, NULL);
1439 install_var_field (decl, by_ref, 3, ctx);
1440 break;
1441
1442 case OMP_CLAUSE_DEFAULT:
1443 ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
1444 break;
1445
1446 case OMP_CLAUSE_FINAL:
1447 case OMP_CLAUSE_IF:
1448 case OMP_CLAUSE_NUM_THREADS:
1449 case OMP_CLAUSE_SCHEDULE:
1450 if (ctx->outer)
1451 scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
1452 break;
1453
1454 case OMP_CLAUSE_NOWAIT:
1455 case OMP_CLAUSE_ORDERED:
1456 case OMP_CLAUSE_COLLAPSE:
1457 case OMP_CLAUSE_UNTIED:
1458 case OMP_CLAUSE_MERGEABLE:
1459 break;
1460
1461 default:
1462 gcc_unreachable ();
1463 }
1464 }
1465
1466 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1467 {
1468 switch (OMP_CLAUSE_CODE (c))
1469 {
1470 case OMP_CLAUSE_LASTPRIVATE:
1471 /* Let the corresponding firstprivate clause create
1472 the variable. */
1473 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1474 scan_array_reductions = true;
1475 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1476 break;
1477 /* FALLTHRU */
1478
1479 case OMP_CLAUSE_PRIVATE:
1480 case OMP_CLAUSE_FIRSTPRIVATE:
1481 case OMP_CLAUSE_REDUCTION:
1482 decl = OMP_CLAUSE_DECL (c);
1483 if (is_variable_sized (decl))
1484 install_var_local (decl, ctx);
1485 fixup_remapped_decl (decl, ctx,
1486 OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
1487 && OMP_CLAUSE_PRIVATE_DEBUG (c));
1488 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1489 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1490 scan_array_reductions = true;
1491 break;
1492
1493 case OMP_CLAUSE_SHARED:
1494 decl = OMP_CLAUSE_DECL (c);
1495 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1496 fixup_remapped_decl (decl, ctx, false);
1497 break;
1498
1499 case OMP_CLAUSE_COPYPRIVATE:
1500 case OMP_CLAUSE_COPYIN:
1501 case OMP_CLAUSE_DEFAULT:
1502 case OMP_CLAUSE_IF:
1503 case OMP_CLAUSE_NUM_THREADS:
1504 case OMP_CLAUSE_SCHEDULE:
1505 case OMP_CLAUSE_NOWAIT:
1506 case OMP_CLAUSE_ORDERED:
1507 case OMP_CLAUSE_COLLAPSE:
1508 case OMP_CLAUSE_UNTIED:
1509 case OMP_CLAUSE_FINAL:
1510 case OMP_CLAUSE_MERGEABLE:
1511 break;
1512
1513 default:
1514 gcc_unreachable ();
1515 }
1516 }
1517
1518 if (scan_array_reductions)
1519 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1520 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1521 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1522 {
1523 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
1524 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
1525 }
1526 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
1527 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1528 scan_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
1529 }
1530
1531 /* Create a new name for omp child function. Returns an identifier. */
1532
1533 static GTY(()) unsigned int tmp_ompfn_id_num;
1534
1535 static tree
1536 create_omp_child_function_name (bool task_copy)
1537 {
1538 return (clone_function_name (current_function_decl,
1539 task_copy ? "_omp_cpyfn" : "_omp_fn"));
1540 }
1541
1542 /* Build a decl for the omp child function. It'll not contain a body
1543 yet, just the bare decl. */
1544
1545 static void
1546 create_omp_child_function (omp_context *ctx, bool task_copy)
1547 {
1548 tree decl, type, name, t;
1549
1550 name = create_omp_child_function_name (task_copy);
1551 if (task_copy)
1552 type = build_function_type_list (void_type_node, ptr_type_node,
1553 ptr_type_node, NULL_TREE);
1554 else
1555 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
1556
1557 decl = build_decl (gimple_location (ctx->stmt),
1558 FUNCTION_DECL, name, type);
1559
1560 if (!task_copy)
1561 ctx->cb.dst_fn = decl;
1562 else
1563 gimple_omp_task_set_copy_fn (ctx->stmt, decl);
1564
1565 TREE_STATIC (decl) = 1;
1566 TREE_USED (decl) = 1;
1567 DECL_ARTIFICIAL (decl) = 1;
1568 DECL_NAMELESS (decl) = 1;
1569 DECL_IGNORED_P (decl) = 0;
1570 TREE_PUBLIC (decl) = 0;
1571 DECL_UNINLINABLE (decl) = 1;
1572 DECL_EXTERNAL (decl) = 0;
1573 DECL_CONTEXT (decl) = NULL_TREE;
1574 DECL_INITIAL (decl) = make_node (BLOCK);
1575
1576 t = build_decl (DECL_SOURCE_LOCATION (decl),
1577 RESULT_DECL, NULL_TREE, void_type_node);
1578 DECL_ARTIFICIAL (t) = 1;
1579 DECL_IGNORED_P (t) = 1;
1580 DECL_CONTEXT (t) = decl;
1581 DECL_RESULT (decl) = t;
1582
1583 t = build_decl (DECL_SOURCE_LOCATION (decl),
1584 PARM_DECL, get_identifier (".omp_data_i"), ptr_type_node);
1585 DECL_ARTIFICIAL (t) = 1;
1586 DECL_NAMELESS (t) = 1;
1587 DECL_ARG_TYPE (t) = ptr_type_node;
1588 DECL_CONTEXT (t) = current_function_decl;
1589 TREE_USED (t) = 1;
1590 DECL_ARGUMENTS (decl) = t;
1591 if (!task_copy)
1592 ctx->receiver_decl = t;
1593 else
1594 {
1595 t = build_decl (DECL_SOURCE_LOCATION (decl),
1596 PARM_DECL, get_identifier (".omp_data_o"),
1597 ptr_type_node);
1598 DECL_ARTIFICIAL (t) = 1;
1599 DECL_NAMELESS (t) = 1;
1600 DECL_ARG_TYPE (t) = ptr_type_node;
1601 DECL_CONTEXT (t) = current_function_decl;
1602 TREE_USED (t) = 1;
1603 TREE_ADDRESSABLE (t) = 1;
1604 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
1605 DECL_ARGUMENTS (decl) = t;
1606 }
1607
1608 /* Allocate memory for the function structure. The call to
1609 allocate_struct_function clobbers CFUN, so we need to restore
1610 it afterward. */
1611 push_struct_function (decl);
1612 cfun->function_end_locus = gimple_location (ctx->stmt);
1613 pop_cfun ();
1614 }
1615
1616
1617 /* Scan an OpenMP parallel directive. */
1618
1619 static void
1620 scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1621 {
1622 omp_context *ctx;
1623 tree name;
1624 gimple stmt = gsi_stmt (*gsi);
1625
1626 /* Ignore parallel directives with empty bodies, unless there
1627 are copyin clauses. */
1628 if (optimize > 0
1629 && empty_body_p (gimple_omp_body (stmt))
1630 && find_omp_clause (gimple_omp_parallel_clauses (stmt),
1631 OMP_CLAUSE_COPYIN) == NULL)
1632 {
1633 gsi_replace (gsi, gimple_build_nop (), false);
1634 return;
1635 }
1636
1637 ctx = new_omp_context (stmt, outer_ctx);
1638 if (taskreg_nesting_level > 1)
1639 ctx->is_nested = true;
1640 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1641 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1642 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1643 name = create_tmp_var_name (".omp_data_s");
1644 name = build_decl (gimple_location (stmt),
1645 TYPE_DECL, name, ctx->record_type);
1646 DECL_ARTIFICIAL (name) = 1;
1647 DECL_NAMELESS (name) = 1;
1648 TYPE_NAME (ctx->record_type) = name;
1649 create_omp_child_function (ctx, false);
1650 gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
1651
1652 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx);
1653 scan_omp (gimple_omp_body (stmt), ctx);
1654
1655 if (TYPE_FIELDS (ctx->record_type) == NULL)
1656 ctx->record_type = ctx->receiver_decl = NULL;
1657 else
1658 {
1659 layout_type (ctx->record_type);
1660 fixup_child_record_type (ctx);
1661 }
1662 }
1663
1664 /* Scan an OpenMP task directive. */
1665
1666 static void
1667 scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1668 {
1669 omp_context *ctx;
1670 tree name, t;
1671 gimple stmt = gsi_stmt (*gsi);
1672 location_t loc = gimple_location (stmt);
1673
1674 /* Ignore task directives with empty bodies. */
1675 if (optimize > 0
1676 && empty_body_p (gimple_omp_body (stmt)))
1677 {
1678 gsi_replace (gsi, gimple_build_nop (), false);
1679 return;
1680 }
1681
1682 ctx = new_omp_context (stmt, outer_ctx);
1683 if (taskreg_nesting_level > 1)
1684 ctx->is_nested = true;
1685 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1686 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1687 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1688 name = create_tmp_var_name (".omp_data_s");
1689 name = build_decl (gimple_location (stmt),
1690 TYPE_DECL, name, ctx->record_type);
1691 DECL_ARTIFICIAL (name) = 1;
1692 DECL_NAMELESS (name) = 1;
1693 TYPE_NAME (ctx->record_type) = name;
1694 create_omp_child_function (ctx, false);
1695 gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
1696
1697 scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
1698
1699 if (ctx->srecord_type)
1700 {
1701 name = create_tmp_var_name (".omp_data_a");
1702 name = build_decl (gimple_location (stmt),
1703 TYPE_DECL, name, ctx->srecord_type);
1704 DECL_ARTIFICIAL (name) = 1;
1705 DECL_NAMELESS (name) = 1;
1706 TYPE_NAME (ctx->srecord_type) = name;
1707 create_omp_child_function (ctx, true);
1708 }
1709
1710 scan_omp (gimple_omp_body (stmt), ctx);
1711
1712 if (TYPE_FIELDS (ctx->record_type) == NULL)
1713 {
1714 ctx->record_type = ctx->receiver_decl = NULL;
1715 t = build_int_cst (long_integer_type_node, 0);
1716 gimple_omp_task_set_arg_size (stmt, t);
1717 t = build_int_cst (long_integer_type_node, 1);
1718 gimple_omp_task_set_arg_align (stmt, t);
1719 }
1720 else
1721 {
1722 tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
1723 /* Move VLA fields to the end. */
1724 p = &TYPE_FIELDS (ctx->record_type);
1725 while (*p)
1726 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
1727 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
1728 {
1729 *q = *p;
1730 *p = TREE_CHAIN (*p);
1731 TREE_CHAIN (*q) = NULL_TREE;
1732 q = &TREE_CHAIN (*q);
1733 }
1734 else
1735 p = &DECL_CHAIN (*p);
1736 *p = vla_fields;
1737 layout_type (ctx->record_type);
1738 fixup_child_record_type (ctx);
1739 if (ctx->srecord_type)
1740 layout_type (ctx->srecord_type);
1741 t = fold_convert_loc (loc, long_integer_type_node,
1742 TYPE_SIZE_UNIT (ctx->record_type));
1743 gimple_omp_task_set_arg_size (stmt, t);
1744 t = build_int_cst (long_integer_type_node,
1745 TYPE_ALIGN_UNIT (ctx->record_type));
1746 gimple_omp_task_set_arg_align (stmt, t);
1747 }
1748 }
1749
1750
1751 /* Scan an OpenMP loop directive. */
1752
1753 static void
1754 scan_omp_for (gimple stmt, omp_context *outer_ctx)
1755 {
1756 omp_context *ctx;
1757 size_t i;
1758
1759 ctx = new_omp_context (stmt, outer_ctx);
1760
1761 scan_sharing_clauses (gimple_omp_for_clauses (stmt), ctx);
1762
1763 scan_omp (gimple_omp_for_pre_body (stmt), ctx);
1764 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
1765 {
1766 scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx);
1767 scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx);
1768 scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx);
1769 scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx);
1770 }
1771 scan_omp (gimple_omp_body (stmt), ctx);
1772 }
1773
1774 /* Scan an OpenMP sections directive. */
1775
1776 static void
1777 scan_omp_sections (gimple stmt, omp_context *outer_ctx)
1778 {
1779 omp_context *ctx;
1780
1781 ctx = new_omp_context (stmt, outer_ctx);
1782 scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx);
1783 scan_omp (gimple_omp_body (stmt), ctx);
1784 }
1785
1786 /* Scan an OpenMP single directive. */
1787
1788 static void
1789 scan_omp_single (gimple stmt, omp_context *outer_ctx)
1790 {
1791 omp_context *ctx;
1792 tree name;
1793
1794 ctx = new_omp_context (stmt, outer_ctx);
1795 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1796 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1797 name = create_tmp_var_name (".omp_copy_s");
1798 name = build_decl (gimple_location (stmt),
1799 TYPE_DECL, name, ctx->record_type);
1800 TYPE_NAME (ctx->record_type) = name;
1801
1802 scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx);
1803 scan_omp (gimple_omp_body (stmt), ctx);
1804
1805 if (TYPE_FIELDS (ctx->record_type) == NULL)
1806 ctx->record_type = NULL;
1807 else
1808 layout_type (ctx->record_type);
1809 }
1810
1811
1812 /* Check OpenMP nesting restrictions. */
1813 static void
1814 check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
1815 {
1816 switch (gimple_code (stmt))
1817 {
1818 case GIMPLE_OMP_FOR:
1819 case GIMPLE_OMP_SECTIONS:
1820 case GIMPLE_OMP_SINGLE:
1821 case GIMPLE_CALL:
1822 for (; ctx != NULL; ctx = ctx->outer)
1823 switch (gimple_code (ctx->stmt))
1824 {
1825 case GIMPLE_OMP_FOR:
1826 case GIMPLE_OMP_SECTIONS:
1827 case GIMPLE_OMP_SINGLE:
1828 case GIMPLE_OMP_ORDERED:
1829 case GIMPLE_OMP_MASTER:
1830 case GIMPLE_OMP_TASK:
1831 if (is_gimple_call (stmt))
1832 {
1833 warning (0, "barrier region may not be closely nested inside "
1834 "of work-sharing, critical, ordered, master or "
1835 "explicit task region");
1836 return;
1837 }
1838 warning (0, "work-sharing region may not be closely nested inside "
1839 "of work-sharing, critical, ordered, master or explicit "
1840 "task region");
1841 return;
1842 case GIMPLE_OMP_PARALLEL:
1843 return;
1844 default:
1845 break;
1846 }
1847 break;
1848 case GIMPLE_OMP_MASTER:
1849 for (; ctx != NULL; ctx = ctx->outer)
1850 switch (gimple_code (ctx->stmt))
1851 {
1852 case GIMPLE_OMP_FOR:
1853 case GIMPLE_OMP_SECTIONS:
1854 case GIMPLE_OMP_SINGLE:
1855 case GIMPLE_OMP_TASK:
1856 warning (0, "master region may not be closely nested inside "
1857 "of work-sharing or explicit task region");
1858 return;
1859 case GIMPLE_OMP_PARALLEL:
1860 return;
1861 default:
1862 break;
1863 }
1864 break;
1865 case GIMPLE_OMP_ORDERED:
1866 for (; ctx != NULL; ctx = ctx->outer)
1867 switch (gimple_code (ctx->stmt))
1868 {
1869 case GIMPLE_OMP_CRITICAL:
1870 case GIMPLE_OMP_TASK:
1871 warning (0, "ordered region may not be closely nested inside "
1872 "of critical or explicit task region");
1873 return;
1874 case GIMPLE_OMP_FOR:
1875 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
1876 OMP_CLAUSE_ORDERED) == NULL)
1877 warning (0, "ordered region must be closely nested inside "
1878 "a loop region with an ordered clause");
1879 return;
1880 case GIMPLE_OMP_PARALLEL:
1881 return;
1882 default:
1883 break;
1884 }
1885 break;
1886 case GIMPLE_OMP_CRITICAL:
1887 for (; ctx != NULL; ctx = ctx->outer)
1888 if (gimple_code (ctx->stmt) == GIMPLE_OMP_CRITICAL
1889 && (gimple_omp_critical_name (stmt)
1890 == gimple_omp_critical_name (ctx->stmt)))
1891 {
1892 warning (0, "critical region may not be nested inside a critical "
1893 "region with the same name");
1894 return;
1895 }
1896 break;
1897 default:
1898 break;
1899 }
1900 }
1901
1902
1903 /* Helper function scan_omp.
1904
1905 Callback for walk_tree or operators in walk_gimple_stmt used to
1906 scan for OpenMP directives in TP. */
1907
1908 static tree
1909 scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
1910 {
1911 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
1912 omp_context *ctx = (omp_context *) wi->info;
1913 tree t = *tp;
1914
1915 switch (TREE_CODE (t))
1916 {
1917 case VAR_DECL:
1918 case PARM_DECL:
1919 case LABEL_DECL:
1920 case RESULT_DECL:
1921 if (ctx)
1922 *tp = remap_decl (t, &ctx->cb);
1923 break;
1924
1925 default:
1926 if (ctx && TYPE_P (t))
1927 *tp = remap_type (t, &ctx->cb);
1928 else if (!DECL_P (t))
1929 {
1930 *walk_subtrees = 1;
1931 if (ctx)
1932 {
1933 tree tem = remap_type (TREE_TYPE (t), &ctx->cb);
1934 if (tem != TREE_TYPE (t))
1935 {
1936 if (TREE_CODE (t) == INTEGER_CST)
1937 *tp = build_int_cst_wide (tem,
1938 TREE_INT_CST_LOW (t),
1939 TREE_INT_CST_HIGH (t));
1940 else
1941 TREE_TYPE (t) = tem;
1942 }
1943 }
1944 }
1945 break;
1946 }
1947
1948 return NULL_TREE;
1949 }
1950
1951
1952 /* Helper function for scan_omp.
1953
1954 Callback for walk_gimple_stmt used to scan for OpenMP directives in
1955 the current statement in GSI. */
1956
1957 static tree
1958 scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
1959 struct walk_stmt_info *wi)
1960 {
1961 gimple stmt = gsi_stmt (*gsi);
1962 omp_context *ctx = (omp_context *) wi->info;
1963
1964 if (gimple_has_location (stmt))
1965 input_location = gimple_location (stmt);
1966
1967 /* Check the OpenMP nesting restrictions. */
1968 if (ctx != NULL)
1969 {
1970 if (is_gimple_omp (stmt))
1971 check_omp_nesting_restrictions (stmt, ctx);
1972 else if (is_gimple_call (stmt))
1973 {
1974 tree fndecl = gimple_call_fndecl (stmt);
1975 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
1976 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
1977 check_omp_nesting_restrictions (stmt, ctx);
1978 }
1979 }
1980
1981 *handled_ops_p = true;
1982
1983 switch (gimple_code (stmt))
1984 {
1985 case GIMPLE_OMP_PARALLEL:
1986 taskreg_nesting_level++;
1987 scan_omp_parallel (gsi, ctx);
1988 taskreg_nesting_level--;
1989 break;
1990
1991 case GIMPLE_OMP_TASK:
1992 taskreg_nesting_level++;
1993 scan_omp_task (gsi, ctx);
1994 taskreg_nesting_level--;
1995 break;
1996
1997 case GIMPLE_OMP_FOR:
1998 scan_omp_for (stmt, ctx);
1999 break;
2000
2001 case GIMPLE_OMP_SECTIONS:
2002 scan_omp_sections (stmt, ctx);
2003 break;
2004
2005 case GIMPLE_OMP_SINGLE:
2006 scan_omp_single (stmt, ctx);
2007 break;
2008
2009 case GIMPLE_OMP_SECTION:
2010 case GIMPLE_OMP_MASTER:
2011 case GIMPLE_OMP_ORDERED:
2012 case GIMPLE_OMP_CRITICAL:
2013 ctx = new_omp_context (stmt, ctx);
2014 scan_omp (gimple_omp_body (stmt), ctx);
2015 break;
2016
2017 case GIMPLE_BIND:
2018 {
2019 tree var;
2020
2021 *handled_ops_p = false;
2022 if (ctx)
2023 for (var = gimple_bind_vars (stmt); var ; var = DECL_CHAIN (var))
2024 insert_decl_map (&ctx->cb, var, var);
2025 }
2026 break;
2027 default:
2028 *handled_ops_p = false;
2029 break;
2030 }
2031
2032 return NULL_TREE;
2033 }
2034
2035
2036 /* Scan all the statements starting at the current statement. CTX
2037 contains context information about the OpenMP directives and
2038 clauses found during the scan. */
2039
2040 static void
2041 scan_omp (gimple_seq body, omp_context *ctx)
2042 {
2043 location_t saved_location;
2044 struct walk_stmt_info wi;
2045
2046 memset (&wi, 0, sizeof (wi));
2047 wi.info = ctx;
2048 wi.want_locations = true;
2049
2050 saved_location = input_location;
2051 walk_gimple_seq (body, scan_omp_1_stmt, scan_omp_1_op, &wi);
2052 input_location = saved_location;
2053 }
2054 \f
2055 /* Re-gimplification and code generation routines. */
2056
2057 /* Build a call to GOMP_barrier. */
2058
2059 static tree
2060 build_omp_barrier (void)
2061 {
2062 return build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_BARRIER), 0);
2063 }
2064
2065 /* If a context was created for STMT when it was scanned, return it. */
2066
2067 static omp_context *
2068 maybe_lookup_ctx (gimple stmt)
2069 {
2070 splay_tree_node n;
2071 n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
2072 return n ? (omp_context *) n->value : NULL;
2073 }
2074
2075
2076 /* Find the mapping for DECL in CTX or the immediately enclosing
2077 context that has a mapping for DECL.
2078
2079 If CTX is a nested parallel directive, we may have to use the decl
2080 mappings created in CTX's parent context. Suppose that we have the
2081 following parallel nesting (variable UIDs showed for clarity):
2082
2083 iD.1562 = 0;
2084 #omp parallel shared(iD.1562) -> outer parallel
2085 iD.1562 = iD.1562 + 1;
2086
2087 #omp parallel shared (iD.1562) -> inner parallel
2088 iD.1562 = iD.1562 - 1;
2089
2090 Each parallel structure will create a distinct .omp_data_s structure
2091 for copying iD.1562 in/out of the directive:
2092
2093 outer parallel .omp_data_s.1.i -> iD.1562
2094 inner parallel .omp_data_s.2.i -> iD.1562
2095
2096 A shared variable mapping will produce a copy-out operation before
2097 the parallel directive and a copy-in operation after it. So, in
2098 this case we would have:
2099
2100 iD.1562 = 0;
2101 .omp_data_o.1.i = iD.1562;
2102 #omp parallel shared(iD.1562) -> outer parallel
2103 .omp_data_i.1 = &.omp_data_o.1
2104 .omp_data_i.1->i = .omp_data_i.1->i + 1;
2105
2106 .omp_data_o.2.i = iD.1562; -> **
2107 #omp parallel shared(iD.1562) -> inner parallel
2108 .omp_data_i.2 = &.omp_data_o.2
2109 .omp_data_i.2->i = .omp_data_i.2->i - 1;
2110
2111
2112 ** This is a problem. The symbol iD.1562 cannot be referenced
2113 inside the body of the outer parallel region. But since we are
2114 emitting this copy operation while expanding the inner parallel
2115 directive, we need to access the CTX structure of the outer
2116 parallel directive to get the correct mapping:
2117
2118 .omp_data_o.2.i = .omp_data_i.1->i
2119
2120 Since there may be other workshare or parallel directives enclosing
2121 the parallel directive, it may be necessary to walk up the context
2122 parent chain. This is not a problem in general because nested
2123 parallelism happens only rarely. */
2124
2125 static tree
2126 lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2127 {
2128 tree t;
2129 omp_context *up;
2130
2131 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2132 t = maybe_lookup_decl (decl, up);
2133
2134 gcc_assert (!ctx->is_nested || t || is_global_var (decl));
2135
2136 return t ? t : decl;
2137 }
2138
2139
2140 /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
2141 in outer contexts. */
2142
2143 static tree
2144 maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2145 {
2146 tree t = NULL;
2147 omp_context *up;
2148
2149 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2150 t = maybe_lookup_decl (decl, up);
2151
2152 return t ? t : decl;
2153 }
2154
2155
2156 /* Construct the initialization value for reduction CLAUSE. */
2157
2158 tree
2159 omp_reduction_init (tree clause, tree type)
2160 {
2161 location_t loc = OMP_CLAUSE_LOCATION (clause);
2162 switch (OMP_CLAUSE_REDUCTION_CODE (clause))
2163 {
2164 case PLUS_EXPR:
2165 case MINUS_EXPR:
2166 case BIT_IOR_EXPR:
2167 case BIT_XOR_EXPR:
2168 case TRUTH_OR_EXPR:
2169 case TRUTH_ORIF_EXPR:
2170 case TRUTH_XOR_EXPR:
2171 case NE_EXPR:
2172 return build_zero_cst (type);
2173
2174 case MULT_EXPR:
2175 case TRUTH_AND_EXPR:
2176 case TRUTH_ANDIF_EXPR:
2177 case EQ_EXPR:
2178 return fold_convert_loc (loc, type, integer_one_node);
2179
2180 case BIT_AND_EXPR:
2181 return fold_convert_loc (loc, type, integer_minus_one_node);
2182
2183 case MAX_EXPR:
2184 if (SCALAR_FLOAT_TYPE_P (type))
2185 {
2186 REAL_VALUE_TYPE max, min;
2187 if (HONOR_INFINITIES (TYPE_MODE (type)))
2188 {
2189 real_inf (&max);
2190 real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
2191 }
2192 else
2193 real_maxval (&min, 1, TYPE_MODE (type));
2194 return build_real (type, min);
2195 }
2196 else
2197 {
2198 gcc_assert (INTEGRAL_TYPE_P (type));
2199 return TYPE_MIN_VALUE (type);
2200 }
2201
2202 case MIN_EXPR:
2203 if (SCALAR_FLOAT_TYPE_P (type))
2204 {
2205 REAL_VALUE_TYPE max;
2206 if (HONOR_INFINITIES (TYPE_MODE (type)))
2207 real_inf (&max);
2208 else
2209 real_maxval (&max, 0, TYPE_MODE (type));
2210 return build_real (type, max);
2211 }
2212 else
2213 {
2214 gcc_assert (INTEGRAL_TYPE_P (type));
2215 return TYPE_MAX_VALUE (type);
2216 }
2217
2218 default:
2219 gcc_unreachable ();
2220 }
2221 }
2222
2223 /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
2224 from the receiver (aka child) side and initializers for REFERENCE_TYPE
2225 private variables. Initialization statements go in ILIST, while calls
2226 to destructors go in DLIST. */
2227
2228 static void
2229 lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
2230 omp_context *ctx)
2231 {
2232 gimple_stmt_iterator diter;
2233 tree c, dtor, copyin_seq, x, ptr;
2234 bool copyin_by_ref = false;
2235 bool lastprivate_firstprivate = false;
2236 int pass;
2237
2238 *dlist = gimple_seq_alloc ();
2239 diter = gsi_start (*dlist);
2240 copyin_seq = NULL;
2241
2242 /* Do all the fixed sized types in the first pass, and the variable sized
2243 types in the second pass. This makes sure that the scalar arguments to
2244 the variable sized types are processed before we use them in the
2245 variable sized operations. */
2246 for (pass = 0; pass < 2; ++pass)
2247 {
2248 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2249 {
2250 enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
2251 tree var, new_var;
2252 bool by_ref;
2253 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2254
2255 switch (c_kind)
2256 {
2257 case OMP_CLAUSE_PRIVATE:
2258 if (OMP_CLAUSE_PRIVATE_DEBUG (c))
2259 continue;
2260 break;
2261 case OMP_CLAUSE_SHARED:
2262 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
2263 {
2264 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
2265 continue;
2266 }
2267 case OMP_CLAUSE_FIRSTPRIVATE:
2268 case OMP_CLAUSE_COPYIN:
2269 case OMP_CLAUSE_REDUCTION:
2270 break;
2271 case OMP_CLAUSE_LASTPRIVATE:
2272 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2273 {
2274 lastprivate_firstprivate = true;
2275 if (pass != 0)
2276 continue;
2277 }
2278 break;
2279 default:
2280 continue;
2281 }
2282
2283 new_var = var = OMP_CLAUSE_DECL (c);
2284 if (c_kind != OMP_CLAUSE_COPYIN)
2285 new_var = lookup_decl (var, ctx);
2286
2287 if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
2288 {
2289 if (pass != 0)
2290 continue;
2291 }
2292 else if (is_variable_sized (var))
2293 {
2294 /* For variable sized types, we need to allocate the
2295 actual storage here. Call alloca and store the
2296 result in the pointer decl that we created elsewhere. */
2297 if (pass == 0)
2298 continue;
2299
2300 if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
2301 {
2302 gimple stmt;
2303 tree tmp, atmp;
2304
2305 ptr = DECL_VALUE_EXPR (new_var);
2306 gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
2307 ptr = TREE_OPERAND (ptr, 0);
2308 gcc_assert (DECL_P (ptr));
2309 x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
2310
2311 /* void *tmp = __builtin_alloca */
2312 atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
2313 stmt = gimple_build_call (atmp, 1, x);
2314 tmp = create_tmp_var_raw (ptr_type_node, NULL);
2315 gimple_add_tmp_var (tmp);
2316 gimple_call_set_lhs (stmt, tmp);
2317
2318 gimple_seq_add_stmt (ilist, stmt);
2319
2320 x = fold_convert_loc (clause_loc, TREE_TYPE (ptr), tmp);
2321 gimplify_assign (ptr, x, ilist);
2322 }
2323 }
2324 else if (is_reference (var))
2325 {
2326 /* For references that are being privatized for Fortran,
2327 allocate new backing storage for the new pointer
2328 variable. This allows us to avoid changing all the
2329 code that expects a pointer to something that expects
2330 a direct variable. Note that this doesn't apply to
2331 C++, since reference types are disallowed in data
2332 sharing clauses there, except for NRV optimized
2333 return values. */
2334 if (pass == 0)
2335 continue;
2336
2337 x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
2338 if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
2339 {
2340 x = build_receiver_ref (var, false, ctx);
2341 x = build_fold_addr_expr_loc (clause_loc, x);
2342 }
2343 else if (TREE_CONSTANT (x))
2344 {
2345 const char *name = NULL;
2346 if (DECL_NAME (var))
2347 name = IDENTIFIER_POINTER (DECL_NAME (new_var));
2348
2349 x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
2350 name);
2351 gimple_add_tmp_var (x);
2352 TREE_ADDRESSABLE (x) = 1;
2353 x = build_fold_addr_expr_loc (clause_loc, x);
2354 }
2355 else
2356 {
2357 tree atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
2358 x = build_call_expr_loc (clause_loc, atmp, 1, x);
2359 }
2360
2361 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
2362 gimplify_assign (new_var, x, ilist);
2363
2364 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2365 }
2366 else if (c_kind == OMP_CLAUSE_REDUCTION
2367 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2368 {
2369 if (pass == 0)
2370 continue;
2371 }
2372 else if (pass != 0)
2373 continue;
2374
2375 switch (OMP_CLAUSE_CODE (c))
2376 {
2377 case OMP_CLAUSE_SHARED:
2378 /* Shared global vars are just accessed directly. */
2379 if (is_global_var (new_var))
2380 break;
2381 /* Set up the DECL_VALUE_EXPR for shared variables now. This
2382 needs to be delayed until after fixup_child_record_type so
2383 that we get the correct type during the dereference. */
2384 by_ref = use_pointer_for_field (var, ctx);
2385 x = build_receiver_ref (var, by_ref, ctx);
2386 SET_DECL_VALUE_EXPR (new_var, x);
2387 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2388
2389 /* ??? If VAR is not passed by reference, and the variable
2390 hasn't been initialized yet, then we'll get a warning for
2391 the store into the omp_data_s structure. Ideally, we'd be
2392 able to notice this and not store anything at all, but
2393 we're generating code too early. Suppress the warning. */
2394 if (!by_ref)
2395 TREE_NO_WARNING (var) = 1;
2396 break;
2397
2398 case OMP_CLAUSE_LASTPRIVATE:
2399 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2400 break;
2401 /* FALLTHRU */
2402
2403 case OMP_CLAUSE_PRIVATE:
2404 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE)
2405 x = build_outer_var_ref (var, ctx);
2406 else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2407 {
2408 if (is_task_ctx (ctx))
2409 x = build_receiver_ref (var, false, ctx);
2410 else
2411 x = build_outer_var_ref (var, ctx);
2412 }
2413 else
2414 x = NULL;
2415 x = lang_hooks.decls.omp_clause_default_ctor (c, new_var, x);
2416 if (x)
2417 gimplify_and_add (x, ilist);
2418 /* FALLTHRU */
2419
2420 do_dtor:
2421 x = lang_hooks.decls.omp_clause_dtor (c, new_var);
2422 if (x)
2423 {
2424 gimple_seq tseq = NULL;
2425
2426 dtor = x;
2427 gimplify_stmt (&dtor, &tseq);
2428 gsi_insert_seq_before (&diter, tseq, GSI_SAME_STMT);
2429 }
2430 break;
2431
2432 case OMP_CLAUSE_FIRSTPRIVATE:
2433 if (is_task_ctx (ctx))
2434 {
2435 if (is_reference (var) || is_variable_sized (var))
2436 goto do_dtor;
2437 else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var,
2438 ctx))
2439 || use_pointer_for_field (var, NULL))
2440 {
2441 x = build_receiver_ref (var, false, ctx);
2442 SET_DECL_VALUE_EXPR (new_var, x);
2443 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2444 goto do_dtor;
2445 }
2446 }
2447 x = build_outer_var_ref (var, ctx);
2448 x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x);
2449 gimplify_and_add (x, ilist);
2450 goto do_dtor;
2451 break;
2452
2453 case OMP_CLAUSE_COPYIN:
2454 by_ref = use_pointer_for_field (var, NULL);
2455 x = build_receiver_ref (var, by_ref, ctx);
2456 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
2457 append_to_statement_list (x, &copyin_seq);
2458 copyin_by_ref |= by_ref;
2459 break;
2460
2461 case OMP_CLAUSE_REDUCTION:
2462 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2463 {
2464 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2465 x = build_outer_var_ref (var, ctx);
2466
2467 if (is_reference (var))
2468 x = build_fold_addr_expr_loc (clause_loc, x);
2469 SET_DECL_VALUE_EXPR (placeholder, x);
2470 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2471 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
2472 gimple_seq_add_seq (ilist,
2473 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c));
2474 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
2475 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
2476 }
2477 else
2478 {
2479 x = omp_reduction_init (c, TREE_TYPE (new_var));
2480 gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
2481 gimplify_assign (new_var, x, ilist);
2482 }
2483 break;
2484
2485 default:
2486 gcc_unreachable ();
2487 }
2488 }
2489 }
2490
2491 /* The copyin sequence is not to be executed by the main thread, since
2492 that would result in self-copies. Perhaps not visible to scalars,
2493 but it certainly is to C++ operator=. */
2494 if (copyin_seq)
2495 {
2496 x = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM),
2497 0);
2498 x = build2 (NE_EXPR, boolean_type_node, x,
2499 build_int_cst (TREE_TYPE (x), 0));
2500 x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
2501 gimplify_and_add (x, ilist);
2502 }
2503
2504 /* If any copyin variable is passed by reference, we must ensure the
2505 master thread doesn't modify it before it is copied over in all
2506 threads. Similarly for variables in both firstprivate and
2507 lastprivate clauses we need to ensure the lastprivate copying
2508 happens after firstprivate copying in all threads. */
2509 if (copyin_by_ref || lastprivate_firstprivate)
2510 gimplify_and_add (build_omp_barrier (), ilist);
2511 }
2512
2513
2514 /* Generate code to implement the LASTPRIVATE clauses. This is used for
2515 both parallel and workshare constructs. PREDICATE may be NULL if it's
2516 always true. */
2517
2518 static void
2519 lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *stmt_list,
2520 omp_context *ctx)
2521 {
2522 tree x, c, label = NULL;
2523 bool par_clauses = false;
2524
2525 /* Early exit if there are no lastprivate clauses. */
2526 clauses = find_omp_clause (clauses, OMP_CLAUSE_LASTPRIVATE);
2527 if (clauses == NULL)
2528 {
2529 /* If this was a workshare clause, see if it had been combined
2530 with its parallel. In that case, look for the clauses on the
2531 parallel statement itself. */
2532 if (is_parallel_ctx (ctx))
2533 return;
2534
2535 ctx = ctx->outer;
2536 if (ctx == NULL || !is_parallel_ctx (ctx))
2537 return;
2538
2539 clauses = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2540 OMP_CLAUSE_LASTPRIVATE);
2541 if (clauses == NULL)
2542 return;
2543 par_clauses = true;
2544 }
2545
2546 if (predicate)
2547 {
2548 gimple stmt;
2549 tree label_true, arm1, arm2;
2550
2551 label = create_artificial_label (UNKNOWN_LOCATION);
2552 label_true = create_artificial_label (UNKNOWN_LOCATION);
2553 arm1 = TREE_OPERAND (predicate, 0);
2554 arm2 = TREE_OPERAND (predicate, 1);
2555 gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
2556 gimplify_expr (&arm2, stmt_list, NULL, is_gimple_val, fb_rvalue);
2557 stmt = gimple_build_cond (TREE_CODE (predicate), arm1, arm2,
2558 label_true, label);
2559 gimple_seq_add_stmt (stmt_list, stmt);
2560 gimple_seq_add_stmt (stmt_list, gimple_build_label (label_true));
2561 }
2562
2563 for (c = clauses; c ;)
2564 {
2565 tree var, new_var;
2566 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2567
2568 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
2569 {
2570 var = OMP_CLAUSE_DECL (c);
2571 new_var = lookup_decl (var, ctx);
2572
2573 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
2574 {
2575 lower_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
2576 gimple_seq_add_seq (stmt_list,
2577 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
2578 }
2579 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) = NULL;
2580
2581 x = build_outer_var_ref (var, ctx);
2582 if (is_reference (var))
2583 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2584 x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
2585 gimplify_and_add (x, stmt_list);
2586 }
2587 c = OMP_CLAUSE_CHAIN (c);
2588 if (c == NULL && !par_clauses)
2589 {
2590 /* If this was a workshare clause, see if it had been combined
2591 with its parallel. In that case, continue looking for the
2592 clauses also on the parallel statement itself. */
2593 if (is_parallel_ctx (ctx))
2594 break;
2595
2596 ctx = ctx->outer;
2597 if (ctx == NULL || !is_parallel_ctx (ctx))
2598 break;
2599
2600 c = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2601 OMP_CLAUSE_LASTPRIVATE);
2602 par_clauses = true;
2603 }
2604 }
2605
2606 if (label)
2607 gimple_seq_add_stmt (stmt_list, gimple_build_label (label));
2608 }
2609
2610
2611 /* Generate code to implement the REDUCTION clauses. */
2612
2613 static void
2614 lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
2615 {
2616 gimple_seq sub_seq = NULL;
2617 gimple stmt;
2618 tree x, c;
2619 int count = 0;
2620
2621 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
2622 update in that case, otherwise use a lock. */
2623 for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
2624 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
2625 {
2626 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2627 {
2628 /* Never use OMP_ATOMIC for array reductions. */
2629 count = -1;
2630 break;
2631 }
2632 count++;
2633 }
2634
2635 if (count == 0)
2636 return;
2637
2638 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2639 {
2640 tree var, ref, new_var;
2641 enum tree_code code;
2642 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2643
2644 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
2645 continue;
2646
2647 var = OMP_CLAUSE_DECL (c);
2648 new_var = lookup_decl (var, ctx);
2649 if (is_reference (var))
2650 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2651 ref = build_outer_var_ref (var, ctx);
2652 code = OMP_CLAUSE_REDUCTION_CODE (c);
2653
2654 /* reduction(-:var) sums up the partial results, so it acts
2655 identically to reduction(+:var). */
2656 if (code == MINUS_EXPR)
2657 code = PLUS_EXPR;
2658
2659 if (count == 1)
2660 {
2661 tree addr = build_fold_addr_expr_loc (clause_loc, ref);
2662
2663 addr = save_expr (addr);
2664 ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
2665 x = fold_build2_loc (clause_loc, code, TREE_TYPE (ref), ref, new_var);
2666 x = build2 (OMP_ATOMIC, void_type_node, addr, x);
2667 gimplify_and_add (x, stmt_seqp);
2668 return;
2669 }
2670
2671 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2672 {
2673 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2674
2675 if (is_reference (var))
2676 ref = build_fold_addr_expr_loc (clause_loc, ref);
2677 SET_DECL_VALUE_EXPR (placeholder, ref);
2678 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2679 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
2680 gimple_seq_add_seq (&sub_seq, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
2681 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
2682 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
2683 }
2684 else
2685 {
2686 x = build2 (code, TREE_TYPE (ref), ref, new_var);
2687 ref = build_outer_var_ref (var, ctx);
2688 gimplify_assign (ref, x, &sub_seq);
2689 }
2690 }
2691
2692 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START),
2693 0);
2694 gimple_seq_add_stmt (stmt_seqp, stmt);
2695
2696 gimple_seq_add_seq (stmt_seqp, sub_seq);
2697
2698 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END),
2699 0);
2700 gimple_seq_add_stmt (stmt_seqp, stmt);
2701 }
2702
2703
2704 /* Generate code to implement the COPYPRIVATE clauses. */
2705
2706 static void
2707 lower_copyprivate_clauses (tree clauses, gimple_seq *slist, gimple_seq *rlist,
2708 omp_context *ctx)
2709 {
2710 tree c;
2711
2712 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2713 {
2714 tree var, new_var, ref, x;
2715 bool by_ref;
2716 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2717
2718 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
2719 continue;
2720
2721 var = OMP_CLAUSE_DECL (c);
2722 by_ref = use_pointer_for_field (var, NULL);
2723
2724 ref = build_sender_ref (var, ctx);
2725 x = new_var = lookup_decl_in_outer_ctx (var, ctx);
2726 if (by_ref)
2727 {
2728 x = build_fold_addr_expr_loc (clause_loc, new_var);
2729 x = fold_convert_loc (clause_loc, TREE_TYPE (ref), x);
2730 }
2731 gimplify_assign (ref, x, slist);
2732
2733 ref = build_receiver_ref (var, false, ctx);
2734 if (by_ref)
2735 {
2736 ref = fold_convert_loc (clause_loc,
2737 build_pointer_type (TREE_TYPE (new_var)),
2738 ref);
2739 ref = build_fold_indirect_ref_loc (clause_loc, ref);
2740 }
2741 if (is_reference (var))
2742 {
2743 ref = fold_convert_loc (clause_loc, TREE_TYPE (new_var), ref);
2744 ref = build_simple_mem_ref_loc (clause_loc, ref);
2745 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2746 }
2747 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, ref);
2748 gimplify_and_add (x, rlist);
2749 }
2750 }
2751
2752
2753 /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
2754 and REDUCTION from the sender (aka parent) side. */
2755
2756 static void
2757 lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
2758 omp_context *ctx)
2759 {
2760 tree c;
2761
2762 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2763 {
2764 tree val, ref, x, var;
2765 bool by_ref, do_in = false, do_out = false;
2766 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2767
2768 switch (OMP_CLAUSE_CODE (c))
2769 {
2770 case OMP_CLAUSE_PRIVATE:
2771 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2772 break;
2773 continue;
2774 case OMP_CLAUSE_FIRSTPRIVATE:
2775 case OMP_CLAUSE_COPYIN:
2776 case OMP_CLAUSE_LASTPRIVATE:
2777 case OMP_CLAUSE_REDUCTION:
2778 break;
2779 default:
2780 continue;
2781 }
2782
2783 val = OMP_CLAUSE_DECL (c);
2784 var = lookup_decl_in_outer_ctx (val, ctx);
2785
2786 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
2787 && is_global_var (var))
2788 continue;
2789 if (is_variable_sized (val))
2790 continue;
2791 by_ref = use_pointer_for_field (val, NULL);
2792
2793 switch (OMP_CLAUSE_CODE (c))
2794 {
2795 case OMP_CLAUSE_PRIVATE:
2796 case OMP_CLAUSE_FIRSTPRIVATE:
2797 case OMP_CLAUSE_COPYIN:
2798 do_in = true;
2799 break;
2800
2801 case OMP_CLAUSE_LASTPRIVATE:
2802 if (by_ref || is_reference (val))
2803 {
2804 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2805 continue;
2806 do_in = true;
2807 }
2808 else
2809 {
2810 do_out = true;
2811 if (lang_hooks.decls.omp_private_outer_ref (val))
2812 do_in = true;
2813 }
2814 break;
2815
2816 case OMP_CLAUSE_REDUCTION:
2817 do_in = true;
2818 do_out = !(by_ref || is_reference (val));
2819 break;
2820
2821 default:
2822 gcc_unreachable ();
2823 }
2824
2825 if (do_in)
2826 {
2827 ref = build_sender_ref (val, ctx);
2828 x = by_ref ? build_fold_addr_expr_loc (clause_loc, var) : var;
2829 gimplify_assign (ref, x, ilist);
2830 if (is_task_ctx (ctx))
2831 DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL;
2832 }
2833
2834 if (do_out)
2835 {
2836 ref = build_sender_ref (val, ctx);
2837 gimplify_assign (var, ref, olist);
2838 }
2839 }
2840 }
2841
2842 /* Generate code to implement SHARED from the sender (aka parent)
2843 side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
2844 list things that got automatically shared. */
2845
2846 static void
2847 lower_send_shared_vars (gimple_seq *ilist, gimple_seq *olist, omp_context *ctx)
2848 {
2849 tree var, ovar, nvar, f, x, record_type;
2850
2851 if (ctx->record_type == NULL)
2852 return;
2853
2854 record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type;
2855 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
2856 {
2857 ovar = DECL_ABSTRACT_ORIGIN (f);
2858 nvar = maybe_lookup_decl (ovar, ctx);
2859 if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
2860 continue;
2861
2862 /* If CTX is a nested parallel directive. Find the immediately
2863 enclosing parallel or workshare construct that contains a
2864 mapping for OVAR. */
2865 var = lookup_decl_in_outer_ctx (ovar, ctx);
2866
2867 if (use_pointer_for_field (ovar, ctx))
2868 {
2869 x = build_sender_ref (ovar, ctx);
2870 var = build_fold_addr_expr (var);
2871 gimplify_assign (x, var, ilist);
2872 }
2873 else
2874 {
2875 x = build_sender_ref (ovar, ctx);
2876 gimplify_assign (x, var, ilist);
2877
2878 if (!TREE_READONLY (var)
2879 /* We don't need to receive a new reference to a result
2880 or parm decl. In fact we may not store to it as we will
2881 invalidate any pending RSO and generate wrong gimple
2882 during inlining. */
2883 && !((TREE_CODE (var) == RESULT_DECL
2884 || TREE_CODE (var) == PARM_DECL)
2885 && DECL_BY_REFERENCE (var)))
2886 {
2887 x = build_sender_ref (ovar, ctx);
2888 gimplify_assign (var, x, olist);
2889 }
2890 }
2891 }
2892 }
2893
2894
2895 /* A convenience function to build an empty GIMPLE_COND with just the
2896 condition. */
2897
2898 static gimple
2899 gimple_build_cond_empty (tree cond)
2900 {
2901 enum tree_code pred_code;
2902 tree lhs, rhs;
2903
2904 gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs);
2905 return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE);
2906 }
2907
2908
2909 /* Build the function calls to GOMP_parallel_start etc to actually
2910 generate the parallel operation. REGION is the parallel region
2911 being expanded. BB is the block where to insert the code. WS_ARGS
2912 will be set if this is a call to a combined parallel+workshare
2913 construct, it contains the list of additional arguments needed by
2914 the workshare construct. */
2915
2916 static void
2917 expand_parallel_call (struct omp_region *region, basic_block bb,
2918 gimple entry_stmt, VEC(tree,gc) *ws_args)
2919 {
2920 tree t, t1, t2, val, cond, c, clauses;
2921 gimple_stmt_iterator gsi;
2922 gimple stmt;
2923 enum built_in_function start_ix;
2924 int start_ix2;
2925 location_t clause_loc;
2926 VEC(tree,gc) *args;
2927
2928 clauses = gimple_omp_parallel_clauses (entry_stmt);
2929
2930 /* Determine what flavor of GOMP_parallel_start we will be
2931 emitting. */
2932 start_ix = BUILT_IN_GOMP_PARALLEL_START;
2933 if (is_combined_parallel (region))
2934 {
2935 switch (region->inner->type)
2936 {
2937 case GIMPLE_OMP_FOR:
2938 gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
2939 start_ix2 = ((int)BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START
2940 + (region->inner->sched_kind
2941 == OMP_CLAUSE_SCHEDULE_RUNTIME
2942 ? 3 : region->inner->sched_kind));
2943 start_ix = (enum built_in_function)start_ix2;
2944 break;
2945 case GIMPLE_OMP_SECTIONS:
2946 start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS_START;
2947 break;
2948 default:
2949 gcc_unreachable ();
2950 }
2951 }
2952
2953 /* By default, the value of NUM_THREADS is zero (selected at run time)
2954 and there is no conditional. */
2955 cond = NULL_TREE;
2956 val = build_int_cst (unsigned_type_node, 0);
2957
2958 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
2959 if (c)
2960 cond = OMP_CLAUSE_IF_EXPR (c);
2961
2962 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
2963 if (c)
2964 {
2965 val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
2966 clause_loc = OMP_CLAUSE_LOCATION (c);
2967 }
2968 else
2969 clause_loc = gimple_location (entry_stmt);
2970
2971 /* Ensure 'val' is of the correct type. */
2972 val = fold_convert_loc (clause_loc, unsigned_type_node, val);
2973
2974 /* If we found the clause 'if (cond)', build either
2975 (cond != 0) or (cond ? val : 1u). */
2976 if (cond)
2977 {
2978 gimple_stmt_iterator gsi;
2979
2980 cond = gimple_boolify (cond);
2981
2982 if (integer_zerop (val))
2983 val = fold_build2_loc (clause_loc,
2984 EQ_EXPR, unsigned_type_node, cond,
2985 build_int_cst (TREE_TYPE (cond), 0));
2986 else
2987 {
2988 basic_block cond_bb, then_bb, else_bb;
2989 edge e, e_then, e_else;
2990 tree tmp_then, tmp_else, tmp_join, tmp_var;
2991
2992 tmp_var = create_tmp_var (TREE_TYPE (val), NULL);
2993 if (gimple_in_ssa_p (cfun))
2994 {
2995 tmp_then = make_ssa_name (tmp_var, NULL);
2996 tmp_else = make_ssa_name (tmp_var, NULL);
2997 tmp_join = make_ssa_name (tmp_var, NULL);
2998 }
2999 else
3000 {
3001 tmp_then = tmp_var;
3002 tmp_else = tmp_var;
3003 tmp_join = tmp_var;
3004 }
3005
3006 e = split_block (bb, NULL);
3007 cond_bb = e->src;
3008 bb = e->dest;
3009 remove_edge (e);
3010
3011 then_bb = create_empty_bb (cond_bb);
3012 else_bb = create_empty_bb (then_bb);
3013 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
3014 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
3015
3016 stmt = gimple_build_cond_empty (cond);
3017 gsi = gsi_start_bb (cond_bb);
3018 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3019
3020 gsi = gsi_start_bb (then_bb);
3021 stmt = gimple_build_assign (tmp_then, val);
3022 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3023
3024 gsi = gsi_start_bb (else_bb);
3025 stmt = gimple_build_assign
3026 (tmp_else, build_int_cst (unsigned_type_node, 1));
3027 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3028
3029 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
3030 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
3031 e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
3032 e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
3033
3034 if (gimple_in_ssa_p (cfun))
3035 {
3036 gimple phi = create_phi_node (tmp_join, bb);
3037 SSA_NAME_DEF_STMT (tmp_join) = phi;
3038 add_phi_arg (phi, tmp_then, e_then, UNKNOWN_LOCATION);
3039 add_phi_arg (phi, tmp_else, e_else, UNKNOWN_LOCATION);
3040 }
3041
3042 val = tmp_join;
3043 }
3044
3045 gsi = gsi_start_bb (bb);
3046 val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE,
3047 false, GSI_CONTINUE_LINKING);
3048 }
3049
3050 gsi = gsi_last_bb (bb);
3051 t = gimple_omp_parallel_data_arg (entry_stmt);
3052 if (t == NULL)
3053 t1 = null_pointer_node;
3054 else
3055 t1 = build_fold_addr_expr (t);
3056 t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
3057
3058 args = VEC_alloc (tree, gc, 3 + VEC_length (tree, ws_args));
3059 VEC_quick_push (tree, args, t2);
3060 VEC_quick_push (tree, args, t1);
3061 VEC_quick_push (tree, args, val);
3062 VEC_splice (tree, args, ws_args);
3063
3064 t = build_call_expr_loc_vec (UNKNOWN_LOCATION,
3065 builtin_decl_explicit (start_ix), args);
3066
3067 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3068 false, GSI_CONTINUE_LINKING);
3069
3070 t = gimple_omp_parallel_data_arg (entry_stmt);
3071 if (t == NULL)
3072 t = null_pointer_node;
3073 else
3074 t = build_fold_addr_expr (t);
3075 t = build_call_expr_loc (gimple_location (entry_stmt),
3076 gimple_omp_parallel_child_fn (entry_stmt), 1, t);
3077 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3078 false, GSI_CONTINUE_LINKING);
3079
3080 t = build_call_expr_loc (gimple_location (entry_stmt),
3081 builtin_decl_explicit (BUILT_IN_GOMP_PARALLEL_END),
3082 0);
3083 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3084 false, GSI_CONTINUE_LINKING);
3085 }
3086
3087
3088 /* Build the function call to GOMP_task to actually
3089 generate the task operation. BB is the block where to insert the code. */
3090
3091 static void
3092 expand_task_call (basic_block bb, gimple entry_stmt)
3093 {
3094 tree t, t1, t2, t3, flags, cond, c, c2, clauses;
3095 gimple_stmt_iterator gsi;
3096 location_t loc = gimple_location (entry_stmt);
3097
3098 clauses = gimple_omp_task_clauses (entry_stmt);
3099
3100 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
3101 if (c)
3102 cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (c));
3103 else
3104 cond = boolean_true_node;
3105
3106 c = find_omp_clause (clauses, OMP_CLAUSE_UNTIED);
3107 c2 = find_omp_clause (clauses, OMP_CLAUSE_MERGEABLE);
3108 flags = build_int_cst (unsigned_type_node,
3109 (c ? 1 : 0) + (c2 ? 4 : 0));
3110
3111 c = find_omp_clause (clauses, OMP_CLAUSE_FINAL);
3112 if (c)
3113 {
3114 c = gimple_boolify (OMP_CLAUSE_FINAL_EXPR (c));
3115 c = fold_build3_loc (loc, COND_EXPR, unsigned_type_node, c,
3116 build_int_cst (unsigned_type_node, 2),
3117 build_int_cst (unsigned_type_node, 0));
3118 flags = fold_build2_loc (loc, PLUS_EXPR, unsigned_type_node, flags, c);
3119 }
3120
3121 gsi = gsi_last_bb (bb);
3122 t = gimple_omp_task_data_arg (entry_stmt);
3123 if (t == NULL)
3124 t2 = null_pointer_node;
3125 else
3126 t2 = build_fold_addr_expr_loc (loc, t);
3127 t1 = build_fold_addr_expr_loc (loc, gimple_omp_task_child_fn (entry_stmt));
3128 t = gimple_omp_task_copy_fn (entry_stmt);
3129 if (t == NULL)
3130 t3 = null_pointer_node;
3131 else
3132 t3 = build_fold_addr_expr_loc (loc, t);
3133
3134 t = build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_TASK),
3135 7, t1, t2, t3,
3136 gimple_omp_task_arg_size (entry_stmt),
3137 gimple_omp_task_arg_align (entry_stmt), cond, flags);
3138
3139 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3140 false, GSI_CONTINUE_LINKING);
3141 }
3142
3143
3144 /* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
3145 catch handler and return it. This prevents programs from violating the
3146 structured block semantics with throws. */
3147
3148 static gimple_seq
3149 maybe_catch_exception (gimple_seq body)
3150 {
3151 gimple g;
3152 tree decl;
3153
3154 if (!flag_exceptions)
3155 return body;
3156
3157 if (lang_hooks.eh_protect_cleanup_actions != NULL)
3158 decl = lang_hooks.eh_protect_cleanup_actions ();
3159 else
3160 decl = builtin_decl_explicit (BUILT_IN_TRAP);
3161
3162 g = gimple_build_eh_must_not_throw (decl);
3163 g = gimple_build_try (body, gimple_seq_alloc_with_stmt (g),
3164 GIMPLE_TRY_CATCH);
3165
3166 return gimple_seq_alloc_with_stmt (g);
3167 }
3168
3169 /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
3170
3171 static tree
3172 vec2chain (VEC(tree,gc) *v)
3173 {
3174 tree chain = NULL_TREE, t;
3175 unsigned ix;
3176
3177 FOR_EACH_VEC_ELT_REVERSE (tree, v, ix, t)
3178 {
3179 DECL_CHAIN (t) = chain;
3180 chain = t;
3181 }
3182
3183 return chain;
3184 }
3185
3186
3187 /* Remove barriers in REGION->EXIT's block. Note that this is only
3188 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
3189 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
3190 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
3191 removed. */
3192
3193 static void
3194 remove_exit_barrier (struct omp_region *region)
3195 {
3196 gimple_stmt_iterator gsi;
3197 basic_block exit_bb;
3198 edge_iterator ei;
3199 edge e;
3200 gimple stmt;
3201 int any_addressable_vars = -1;
3202
3203 exit_bb = region->exit;
3204
3205 /* If the parallel region doesn't return, we don't have REGION->EXIT
3206 block at all. */
3207 if (! exit_bb)
3208 return;
3209
3210 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
3211 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
3212 statements that can appear in between are extremely limited -- no
3213 memory operations at all. Here, we allow nothing at all, so the
3214 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
3215 gsi = gsi_last_bb (exit_bb);
3216 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3217 gsi_prev (&gsi);
3218 if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
3219 return;
3220
3221 FOR_EACH_EDGE (e, ei, exit_bb->preds)
3222 {
3223 gsi = gsi_last_bb (e->src);
3224 if (gsi_end_p (gsi))
3225 continue;
3226 stmt = gsi_stmt (gsi);
3227 if (gimple_code (stmt) == GIMPLE_OMP_RETURN
3228 && !gimple_omp_return_nowait_p (stmt))
3229 {
3230 /* OpenMP 3.0 tasks unfortunately prevent this optimization
3231 in many cases. If there could be tasks queued, the barrier
3232 might be needed to let the tasks run before some local
3233 variable of the parallel that the task uses as shared
3234 runs out of scope. The task can be spawned either
3235 from within current function (this would be easy to check)
3236 or from some function it calls and gets passed an address
3237 of such a variable. */
3238 if (any_addressable_vars < 0)
3239 {
3240 gimple parallel_stmt = last_stmt (region->entry);
3241 tree child_fun = gimple_omp_parallel_child_fn (parallel_stmt);
3242 tree local_decls, block, decl;
3243 unsigned ix;
3244
3245 any_addressable_vars = 0;
3246 FOR_EACH_LOCAL_DECL (DECL_STRUCT_FUNCTION (child_fun), ix, decl)
3247 if (TREE_ADDRESSABLE (decl))
3248 {
3249 any_addressable_vars = 1;
3250 break;
3251 }
3252 for (block = gimple_block (stmt);
3253 !any_addressable_vars
3254 && block
3255 && TREE_CODE (block) == BLOCK;
3256 block = BLOCK_SUPERCONTEXT (block))
3257 {
3258 for (local_decls = BLOCK_VARS (block);
3259 local_decls;
3260 local_decls = DECL_CHAIN (local_decls))
3261 if (TREE_ADDRESSABLE (local_decls))
3262 {
3263 any_addressable_vars = 1;
3264 break;
3265 }
3266 if (block == gimple_block (parallel_stmt))
3267 break;
3268 }
3269 }
3270 if (!any_addressable_vars)
3271 gimple_omp_return_set_nowait (stmt);
3272 }
3273 }
3274 }
3275
3276 static void
3277 remove_exit_barriers (struct omp_region *region)
3278 {
3279 if (region->type == GIMPLE_OMP_PARALLEL)
3280 remove_exit_barrier (region);
3281
3282 if (region->inner)
3283 {
3284 region = region->inner;
3285 remove_exit_barriers (region);
3286 while (region->next)
3287 {
3288 region = region->next;
3289 remove_exit_barriers (region);
3290 }
3291 }
3292 }
3293
3294 /* Optimize omp_get_thread_num () and omp_get_num_threads ()
3295 calls. These can't be declared as const functions, but
3296 within one parallel body they are constant, so they can be
3297 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
3298 which are declared const. Similarly for task body, except
3299 that in untied task omp_get_thread_num () can change at any task
3300 scheduling point. */
3301
3302 static void
3303 optimize_omp_library_calls (gimple entry_stmt)
3304 {
3305 basic_block bb;
3306 gimple_stmt_iterator gsi;
3307 tree thr_num_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
3308 tree thr_num_id = DECL_ASSEMBLER_NAME (thr_num_tree);
3309 tree num_thr_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
3310 tree num_thr_id = DECL_ASSEMBLER_NAME (num_thr_tree);
3311 bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK
3312 && find_omp_clause (gimple_omp_task_clauses (entry_stmt),
3313 OMP_CLAUSE_UNTIED) != NULL);
3314
3315 FOR_EACH_BB (bb)
3316 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3317 {
3318 gimple call = gsi_stmt (gsi);
3319 tree decl;
3320
3321 if (is_gimple_call (call)
3322 && (decl = gimple_call_fndecl (call))
3323 && DECL_EXTERNAL (decl)
3324 && TREE_PUBLIC (decl)
3325 && DECL_INITIAL (decl) == NULL)
3326 {
3327 tree built_in;
3328
3329 if (DECL_NAME (decl) == thr_num_id)
3330 {
3331 /* In #pragma omp task untied omp_get_thread_num () can change
3332 during the execution of the task region. */
3333 if (untied_task)
3334 continue;
3335 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
3336 }
3337 else if (DECL_NAME (decl) == num_thr_id)
3338 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
3339 else
3340 continue;
3341
3342 if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
3343 || gimple_call_num_args (call) != 0)
3344 continue;
3345
3346 if (flag_exceptions && !TREE_NOTHROW (decl))
3347 continue;
3348
3349 if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
3350 || !types_compatible_p (TREE_TYPE (TREE_TYPE (decl)),
3351 TREE_TYPE (TREE_TYPE (built_in))))
3352 continue;
3353
3354 gimple_call_set_fndecl (call, built_in);
3355 }
3356 }
3357 }
3358
3359 /* Expand the OpenMP parallel or task directive starting at REGION. */
3360
3361 static void
3362 expand_omp_taskreg (struct omp_region *region)
3363 {
3364 basic_block entry_bb, exit_bb, new_bb;
3365 struct function *child_cfun;
3366 tree child_fn, block, t;
3367 tree save_current;
3368 gimple_stmt_iterator gsi;
3369 gimple entry_stmt, stmt;
3370 edge e;
3371 VEC(tree,gc) *ws_args;
3372
3373 entry_stmt = last_stmt (region->entry);
3374 child_fn = gimple_omp_taskreg_child_fn (entry_stmt);
3375 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
3376 /* If this function has been already instrumented, make sure
3377 the child function isn't instrumented again. */
3378 child_cfun->after_tree_profile = cfun->after_tree_profile;
3379
3380 entry_bb = region->entry;
3381 exit_bb = region->exit;
3382
3383 if (is_combined_parallel (region))
3384 ws_args = region->ws_args;
3385 else
3386 ws_args = NULL;
3387
3388 if (child_cfun->cfg)
3389 {
3390 /* Due to inlining, it may happen that we have already outlined
3391 the region, in which case all we need to do is make the
3392 sub-graph unreachable and emit the parallel call. */
3393 edge entry_succ_e, exit_succ_e;
3394 gimple_stmt_iterator gsi;
3395
3396 entry_succ_e = single_succ_edge (entry_bb);
3397
3398 gsi = gsi_last_bb (entry_bb);
3399 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL
3400 || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK);
3401 gsi_remove (&gsi, true);
3402
3403 new_bb = entry_bb;
3404 if (exit_bb)
3405 {
3406 exit_succ_e = single_succ_edge (exit_bb);
3407 make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
3408 }
3409 remove_edge_and_dominated_blocks (entry_succ_e);
3410 }
3411 else
3412 {
3413 unsigned srcidx, dstidx, num;
3414
3415 /* If the parallel region needs data sent from the parent
3416 function, then the very first statement (except possible
3417 tree profile counter updates) of the parallel body
3418 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
3419 &.OMP_DATA_O is passed as an argument to the child function,
3420 we need to replace it with the argument as seen by the child
3421 function.
3422
3423 In most cases, this will end up being the identity assignment
3424 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
3425 a function call that has been inlined, the original PARM_DECL
3426 .OMP_DATA_I may have been converted into a different local
3427 variable. In which case, we need to keep the assignment. */
3428 if (gimple_omp_taskreg_data_arg (entry_stmt))
3429 {
3430 basic_block entry_succ_bb = single_succ (entry_bb);
3431 gimple_stmt_iterator gsi;
3432 tree arg, narg;
3433 gimple parcopy_stmt = NULL;
3434
3435 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
3436 {
3437 gimple stmt;
3438
3439 gcc_assert (!gsi_end_p (gsi));
3440 stmt = gsi_stmt (gsi);
3441 if (gimple_code (stmt) != GIMPLE_ASSIGN)
3442 continue;
3443
3444 if (gimple_num_ops (stmt) == 2)
3445 {
3446 tree arg = gimple_assign_rhs1 (stmt);
3447
3448 /* We're ignore the subcode because we're
3449 effectively doing a STRIP_NOPS. */
3450
3451 if (TREE_CODE (arg) == ADDR_EXPR
3452 && TREE_OPERAND (arg, 0)
3453 == gimple_omp_taskreg_data_arg (entry_stmt))
3454 {
3455 parcopy_stmt = stmt;
3456 break;
3457 }
3458 }
3459 }
3460
3461 gcc_assert (parcopy_stmt != NULL);
3462 arg = DECL_ARGUMENTS (child_fn);
3463
3464 if (!gimple_in_ssa_p (cfun))
3465 {
3466 if (gimple_assign_lhs (parcopy_stmt) == arg)
3467 gsi_remove (&gsi, true);
3468 else
3469 {
3470 /* ?? Is setting the subcode really necessary ?? */
3471 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg));
3472 gimple_assign_set_rhs1 (parcopy_stmt, arg);
3473 }
3474 }
3475 else
3476 {
3477 /* If we are in ssa form, we must load the value from the default
3478 definition of the argument. That should not be defined now,
3479 since the argument is not used uninitialized. */
3480 gcc_assert (gimple_default_def (cfun, arg) == NULL);
3481 narg = make_ssa_name (arg, gimple_build_nop ());
3482 set_default_def (arg, narg);
3483 /* ?? Is setting the subcode really necessary ?? */
3484 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (narg));
3485 gimple_assign_set_rhs1 (parcopy_stmt, narg);
3486 update_stmt (parcopy_stmt);
3487 }
3488 }
3489
3490 /* Declare local variables needed in CHILD_CFUN. */
3491 block = DECL_INITIAL (child_fn);
3492 BLOCK_VARS (block) = vec2chain (child_cfun->local_decls);
3493 /* The gimplifier could record temporaries in parallel/task block
3494 rather than in containing function's local_decls chain,
3495 which would mean cgraph missed finalizing them. Do it now. */
3496 for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t))
3497 if (TREE_CODE (t) == VAR_DECL
3498 && TREE_STATIC (t)
3499 && !DECL_EXTERNAL (t))
3500 varpool_finalize_decl (t);
3501 DECL_SAVED_TREE (child_fn) = NULL;
3502 gimple_set_body (child_fn, bb_seq (single_succ (entry_bb)));
3503 TREE_USED (block) = 1;
3504
3505 /* Reset DECL_CONTEXT on function arguments. */
3506 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
3507 DECL_CONTEXT (t) = child_fn;
3508
3509 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
3510 so that it can be moved to the child function. */
3511 gsi = gsi_last_bb (entry_bb);
3512 stmt = gsi_stmt (gsi);
3513 gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
3514 || gimple_code (stmt) == GIMPLE_OMP_TASK));
3515 gsi_remove (&gsi, true);
3516 e = split_block (entry_bb, stmt);
3517 entry_bb = e->dest;
3518 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
3519
3520 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
3521 if (exit_bb)
3522 {
3523 gsi = gsi_last_bb (exit_bb);
3524 gcc_assert (!gsi_end_p (gsi)
3525 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3526 stmt = gimple_build_return (NULL);
3527 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
3528 gsi_remove (&gsi, true);
3529 }
3530
3531 /* Move the parallel region into CHILD_CFUN. */
3532
3533 if (gimple_in_ssa_p (cfun))
3534 {
3535 push_cfun (child_cfun);
3536 init_tree_ssa (child_cfun);
3537 init_ssa_operands ();
3538 cfun->gimple_df->in_ssa_p = true;
3539 pop_cfun ();
3540 block = NULL_TREE;
3541 }
3542 else
3543 block = gimple_block (entry_stmt);
3544
3545 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
3546 if (exit_bb)
3547 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
3548
3549 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
3550 num = VEC_length (tree, child_cfun->local_decls);
3551 for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
3552 {
3553 t = VEC_index (tree, child_cfun->local_decls, srcidx);
3554 if (DECL_CONTEXT (t) == cfun->decl)
3555 continue;
3556 if (srcidx != dstidx)
3557 VEC_replace (tree, child_cfun->local_decls, dstidx, t);
3558 dstidx++;
3559 }
3560 if (dstidx != num)
3561 VEC_truncate (tree, child_cfun->local_decls, dstidx);
3562
3563 /* Inform the callgraph about the new function. */
3564 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
3565 = cfun->curr_properties;
3566 cgraph_add_new_function (child_fn, true);
3567
3568 /* Fix the callgraph edges for child_cfun. Those for cfun will be
3569 fixed in a following pass. */
3570 push_cfun (child_cfun);
3571 save_current = current_function_decl;
3572 current_function_decl = child_fn;
3573 if (optimize)
3574 optimize_omp_library_calls (entry_stmt);
3575 rebuild_cgraph_edges ();
3576
3577 /* Some EH regions might become dead, see PR34608. If
3578 pass_cleanup_cfg isn't the first pass to happen with the
3579 new child, these dead EH edges might cause problems.
3580 Clean them up now. */
3581 if (flag_exceptions)
3582 {
3583 basic_block bb;
3584 bool changed = false;
3585
3586 FOR_EACH_BB (bb)
3587 changed |= gimple_purge_dead_eh_edges (bb);
3588 if (changed)
3589 cleanup_tree_cfg ();
3590 }
3591 if (gimple_in_ssa_p (cfun))
3592 update_ssa (TODO_update_ssa);
3593 current_function_decl = save_current;
3594 pop_cfun ();
3595 }
3596
3597 /* Emit a library call to launch the children threads. */
3598 if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
3599 expand_parallel_call (region, new_bb, entry_stmt, ws_args);
3600 else
3601 expand_task_call (new_bb, entry_stmt);
3602 update_ssa (TODO_update_ssa_only_virtuals);
3603 }
3604
3605
3606 /* A subroutine of expand_omp_for. Generate code for a parallel
3607 loop with any schedule. Given parameters:
3608
3609 for (V = N1; V cond N2; V += STEP) BODY;
3610
3611 where COND is "<" or ">", we generate pseudocode
3612
3613 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
3614 if (more) goto L0; else goto L3;
3615 L0:
3616 V = istart0;
3617 iend = iend0;
3618 L1:
3619 BODY;
3620 V += STEP;
3621 if (V cond iend) goto L1; else goto L2;
3622 L2:
3623 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3624 L3:
3625
3626 If this is a combined omp parallel loop, instead of the call to
3627 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
3628
3629 For collapsed loops, given parameters:
3630 collapse(3)
3631 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
3632 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
3633 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
3634 BODY;
3635
3636 we generate pseudocode
3637
3638 if (cond3 is <)
3639 adj = STEP3 - 1;
3640 else
3641 adj = STEP3 + 1;
3642 count3 = (adj + N32 - N31) / STEP3;
3643 if (cond2 is <)
3644 adj = STEP2 - 1;
3645 else
3646 adj = STEP2 + 1;
3647 count2 = (adj + N22 - N21) / STEP2;
3648 if (cond1 is <)
3649 adj = STEP1 - 1;
3650 else
3651 adj = STEP1 + 1;
3652 count1 = (adj + N12 - N11) / STEP1;
3653 count = count1 * count2 * count3;
3654 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
3655 if (more) goto L0; else goto L3;
3656 L0:
3657 V = istart0;
3658 T = V;
3659 V3 = N31 + (T % count3) * STEP3;
3660 T = T / count3;
3661 V2 = N21 + (T % count2) * STEP2;
3662 T = T / count2;
3663 V1 = N11 + T * STEP1;
3664 iend = iend0;
3665 L1:
3666 BODY;
3667 V += 1;
3668 if (V < iend) goto L10; else goto L2;
3669 L10:
3670 V3 += STEP3;
3671 if (V3 cond3 N32) goto L1; else goto L11;
3672 L11:
3673 V3 = N31;
3674 V2 += STEP2;
3675 if (V2 cond2 N22) goto L1; else goto L12;
3676 L12:
3677 V2 = N21;
3678 V1 += STEP1;
3679 goto L1;
3680 L2:
3681 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3682 L3:
3683
3684 */
3685
3686 static void
3687 expand_omp_for_generic (struct omp_region *region,
3688 struct omp_for_data *fd,
3689 enum built_in_function start_fn,
3690 enum built_in_function next_fn)
3691 {
3692 tree type, istart0, iend0, iend;
3693 tree t, vmain, vback, bias = NULL_TREE;
3694 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb;
3695 basic_block l2_bb = NULL, l3_bb = NULL;
3696 gimple_stmt_iterator gsi;
3697 gimple stmt;
3698 bool in_combined_parallel = is_combined_parallel (region);
3699 bool broken_loop = region->cont == NULL;
3700 edge e, ne;
3701 tree *counts = NULL;
3702 int i;
3703
3704 gcc_assert (!broken_loop || !in_combined_parallel);
3705 gcc_assert (fd->iter_type == long_integer_type_node
3706 || !in_combined_parallel);
3707
3708 type = TREE_TYPE (fd->loop.v);
3709 istart0 = create_tmp_var (fd->iter_type, ".istart0");
3710 iend0 = create_tmp_var (fd->iter_type, ".iend0");
3711 TREE_ADDRESSABLE (istart0) = 1;
3712 TREE_ADDRESSABLE (iend0) = 1;
3713 if (gimple_in_ssa_p (cfun))
3714 {
3715 add_referenced_var (istart0);
3716 add_referenced_var (iend0);
3717 }
3718
3719 /* See if we need to bias by LLONG_MIN. */
3720 if (fd->iter_type == long_long_unsigned_type_node
3721 && TREE_CODE (type) == INTEGER_TYPE
3722 && !TYPE_UNSIGNED (type))
3723 {
3724 tree n1, n2;
3725
3726 if (fd->loop.cond_code == LT_EXPR)
3727 {
3728 n1 = fd->loop.n1;
3729 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
3730 }
3731 else
3732 {
3733 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
3734 n2 = fd->loop.n1;
3735 }
3736 if (TREE_CODE (n1) != INTEGER_CST
3737 || TREE_CODE (n2) != INTEGER_CST
3738 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
3739 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
3740 }
3741
3742 entry_bb = region->entry;
3743 cont_bb = region->cont;
3744 collapse_bb = NULL;
3745 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
3746 gcc_assert (broken_loop
3747 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
3748 l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
3749 l1_bb = single_succ (l0_bb);
3750 if (!broken_loop)
3751 {
3752 l2_bb = create_empty_bb (cont_bb);
3753 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb);
3754 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
3755 }
3756 else
3757 l2_bb = NULL;
3758 l3_bb = BRANCH_EDGE (entry_bb)->dest;
3759 exit_bb = region->exit;
3760
3761 gsi = gsi_last_bb (entry_bb);
3762
3763 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
3764 if (fd->collapse > 1)
3765 {
3766 /* collapsed loops need work for expansion in SSA form. */
3767 gcc_assert (!gimple_in_ssa_p (cfun));
3768 counts = (tree *) alloca (fd->collapse * sizeof (tree));
3769 for (i = 0; i < fd->collapse; i++)
3770 {
3771 tree itype = TREE_TYPE (fd->loops[i].v);
3772
3773 if (POINTER_TYPE_P (itype))
3774 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
3775 t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR
3776 ? -1 : 1));
3777 t = fold_build2 (PLUS_EXPR, itype,
3778 fold_convert (itype, fd->loops[i].step), t);
3779 t = fold_build2 (PLUS_EXPR, itype, t,
3780 fold_convert (itype, fd->loops[i].n2));
3781 t = fold_build2 (MINUS_EXPR, itype, t,
3782 fold_convert (itype, fd->loops[i].n1));
3783 if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
3784 t = fold_build2 (TRUNC_DIV_EXPR, itype,
3785 fold_build1 (NEGATE_EXPR, itype, t),
3786 fold_build1 (NEGATE_EXPR, itype,
3787 fold_convert (itype,
3788 fd->loops[i].step)));
3789 else
3790 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
3791 fold_convert (itype, fd->loops[i].step));
3792 t = fold_convert (type, t);
3793 if (TREE_CODE (t) == INTEGER_CST)
3794 counts[i] = t;
3795 else
3796 {
3797 counts[i] = create_tmp_var (type, ".count");
3798 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3799 true, GSI_SAME_STMT);
3800 stmt = gimple_build_assign (counts[i], t);
3801 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3802 }
3803 if (SSA_VAR_P (fd->loop.n2))
3804 {
3805 if (i == 0)
3806 t = counts[0];
3807 else
3808 {
3809 t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]);
3810 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3811 true, GSI_SAME_STMT);
3812 }
3813 stmt = gimple_build_assign (fd->loop.n2, t);
3814 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3815 }
3816 }
3817 }
3818 if (in_combined_parallel)
3819 {
3820 /* In a combined parallel loop, emit a call to
3821 GOMP_loop_foo_next. */
3822 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
3823 build_fold_addr_expr (istart0),
3824 build_fold_addr_expr (iend0));
3825 }
3826 else
3827 {
3828 tree t0, t1, t2, t3, t4;
3829 /* If this is not a combined parallel loop, emit a call to
3830 GOMP_loop_foo_start in ENTRY_BB. */
3831 t4 = build_fold_addr_expr (iend0);
3832 t3 = build_fold_addr_expr (istart0);
3833 t2 = fold_convert (fd->iter_type, fd->loop.step);
3834 if (POINTER_TYPE_P (type)
3835 && TYPE_PRECISION (type) != TYPE_PRECISION (fd->iter_type))
3836 {
3837 /* Avoid casting pointers to integer of a different size. */
3838 tree itype
3839 = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
3840 t1 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n2));
3841 t0 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n1));
3842 }
3843 else
3844 {
3845 t1 = fold_convert (fd->iter_type, fd->loop.n2);
3846 t0 = fold_convert (fd->iter_type, fd->loop.n1);
3847 }
3848 if (bias)
3849 {
3850 t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
3851 t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
3852 }
3853 if (fd->iter_type == long_integer_type_node)
3854 {
3855 if (fd->chunk_size)
3856 {
3857 t = fold_convert (fd->iter_type, fd->chunk_size);
3858 t = build_call_expr (builtin_decl_explicit (start_fn),
3859 6, t0, t1, t2, t, t3, t4);
3860 }
3861 else
3862 t = build_call_expr (builtin_decl_explicit (start_fn),
3863 5, t0, t1, t2, t3, t4);
3864 }
3865 else
3866 {
3867 tree t5;
3868 tree c_bool_type;
3869 tree bfn_decl;
3870
3871 /* The GOMP_loop_ull_*start functions have additional boolean
3872 argument, true for < loops and false for > loops.
3873 In Fortran, the C bool type can be different from
3874 boolean_type_node. */
3875 bfn_decl = builtin_decl_explicit (start_fn);
3876 c_bool_type = TREE_TYPE (TREE_TYPE (bfn_decl));
3877 t5 = build_int_cst (c_bool_type,
3878 fd->loop.cond_code == LT_EXPR ? 1 : 0);
3879 if (fd->chunk_size)
3880 {
3881 tree bfn_decl = builtin_decl_explicit (start_fn);
3882 t = fold_convert (fd->iter_type, fd->chunk_size);
3883 t = build_call_expr (bfn_decl, 7, t5, t0, t1, t2, t, t3, t4);
3884 }
3885 else
3886 t = build_call_expr (builtin_decl_explicit (start_fn),
3887 6, t5, t0, t1, t2, t3, t4);
3888 }
3889 }
3890 if (TREE_TYPE (t) != boolean_type_node)
3891 t = fold_build2 (NE_EXPR, boolean_type_node,
3892 t, build_int_cst (TREE_TYPE (t), 0));
3893 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3894 true, GSI_SAME_STMT);
3895 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
3896
3897 /* Remove the GIMPLE_OMP_FOR statement. */
3898 gsi_remove (&gsi, true);
3899
3900 /* Iteration setup for sequential loop goes in L0_BB. */
3901 gsi = gsi_start_bb (l0_bb);
3902 t = istart0;
3903 if (bias)
3904 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
3905 if (POINTER_TYPE_P (type))
3906 t = fold_convert (lang_hooks.types.type_for_size (TYPE_PRECISION (type),
3907 0), t);
3908 t = fold_convert (type, t);
3909 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3910 false, GSI_CONTINUE_LINKING);
3911 stmt = gimple_build_assign (fd->loop.v, t);
3912 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3913
3914 t = iend0;
3915 if (bias)
3916 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
3917 if (POINTER_TYPE_P (type))
3918 t = fold_convert (lang_hooks.types.type_for_size (TYPE_PRECISION (type),
3919 0), t);
3920 t = fold_convert (type, t);
3921 iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3922 false, GSI_CONTINUE_LINKING);
3923 if (fd->collapse > 1)
3924 {
3925 tree tem = create_tmp_var (type, ".tem");
3926
3927 stmt = gimple_build_assign (tem, fd->loop.v);
3928 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3929 for (i = fd->collapse - 1; i >= 0; i--)
3930 {
3931 tree vtype = TREE_TYPE (fd->loops[i].v), itype;
3932 itype = vtype;
3933 if (POINTER_TYPE_P (vtype))
3934 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (vtype), 0);
3935 t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]);
3936 t = fold_convert (itype, t);
3937 t = fold_build2 (MULT_EXPR, itype, t,
3938 fold_convert (itype, fd->loops[i].step));
3939 if (POINTER_TYPE_P (vtype))
3940 t = fold_build_pointer_plus (fd->loops[i].n1, t);
3941 else
3942 t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t);
3943 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3944 false, GSI_CONTINUE_LINKING);
3945 stmt = gimple_build_assign (fd->loops[i].v, t);
3946 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3947 if (i != 0)
3948 {
3949 t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]);
3950 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3951 false, GSI_CONTINUE_LINKING);
3952 stmt = gimple_build_assign (tem, t);
3953 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3954 }
3955 }
3956 }
3957
3958 if (!broken_loop)
3959 {
3960 /* Code to control the increment and predicate for the sequential
3961 loop goes in the CONT_BB. */
3962 gsi = gsi_last_bb (cont_bb);
3963 stmt = gsi_stmt (gsi);
3964 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
3965 vmain = gimple_omp_continue_control_use (stmt);
3966 vback = gimple_omp_continue_control_def (stmt);
3967
3968 if (POINTER_TYPE_P (type))
3969 t = fold_build_pointer_plus (vmain, fd->loop.step);
3970 else
3971 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
3972 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3973 true, GSI_SAME_STMT);
3974 stmt = gimple_build_assign (vback, t);
3975 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3976
3977 t = build2 (fd->loop.cond_code, boolean_type_node, vback, iend);
3978 stmt = gimple_build_cond_empty (t);
3979 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3980
3981 /* Remove GIMPLE_OMP_CONTINUE. */
3982 gsi_remove (&gsi, true);
3983
3984 if (fd->collapse > 1)
3985 {
3986 basic_block last_bb, bb;
3987
3988 last_bb = cont_bb;
3989 for (i = fd->collapse - 1; i >= 0; i--)
3990 {
3991 tree vtype = TREE_TYPE (fd->loops[i].v);
3992
3993 bb = create_empty_bb (last_bb);
3994 gsi = gsi_start_bb (bb);
3995
3996 if (i < fd->collapse - 1)
3997 {
3998 e = make_edge (last_bb, bb, EDGE_FALSE_VALUE);
3999 e->probability = REG_BR_PROB_BASE / 8;
4000
4001 t = fd->loops[i + 1].n1;
4002 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4003 false, GSI_CONTINUE_LINKING);
4004 stmt = gimple_build_assign (fd->loops[i + 1].v, t);
4005 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4006 }
4007 else
4008 collapse_bb = bb;
4009
4010 set_immediate_dominator (CDI_DOMINATORS, bb, last_bb);
4011
4012 if (POINTER_TYPE_P (vtype))
4013 t = fold_build_pointer_plus (fd->loops[i].v, fd->loops[i].step);
4014 else
4015 t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v,
4016 fd->loops[i].step);
4017 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4018 false, GSI_CONTINUE_LINKING);
4019 stmt = gimple_build_assign (fd->loops[i].v, t);
4020 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4021
4022 if (i > 0)
4023 {
4024 t = fd->loops[i].n2;
4025 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4026 false, GSI_CONTINUE_LINKING);
4027 t = fold_build2 (fd->loops[i].cond_code, boolean_type_node,
4028 fd->loops[i].v, t);
4029 stmt = gimple_build_cond_empty (t);
4030 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4031 e = make_edge (bb, l1_bb, EDGE_TRUE_VALUE);
4032 e->probability = REG_BR_PROB_BASE * 7 / 8;
4033 }
4034 else
4035 make_edge (bb, l1_bb, EDGE_FALLTHRU);
4036 last_bb = bb;
4037 }
4038 }
4039
4040 /* Emit code to get the next parallel iteration in L2_BB. */
4041 gsi = gsi_start_bb (l2_bb);
4042
4043 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
4044 build_fold_addr_expr (istart0),
4045 build_fold_addr_expr (iend0));
4046 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4047 false, GSI_CONTINUE_LINKING);
4048 if (TREE_TYPE (t) != boolean_type_node)
4049 t = fold_build2 (NE_EXPR, boolean_type_node,
4050 t, build_int_cst (TREE_TYPE (t), 0));
4051 stmt = gimple_build_cond_empty (t);
4052 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4053 }
4054
4055 /* Add the loop cleanup function. */
4056 gsi = gsi_last_bb (exit_bb);
4057 if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4058 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_NOWAIT);
4059 else
4060 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END);
4061 stmt = gimple_build_call (t, 0);
4062 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
4063 gsi_remove (&gsi, true);
4064
4065 /* Connect the new blocks. */
4066 find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
4067 find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
4068
4069 if (!broken_loop)
4070 {
4071 gimple_seq phis;
4072
4073 e = find_edge (cont_bb, l3_bb);
4074 ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
4075
4076 phis = phi_nodes (l3_bb);
4077 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
4078 {
4079 gimple phi = gsi_stmt (gsi);
4080 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
4081 PHI_ARG_DEF_FROM_EDGE (phi, e));
4082 }
4083 remove_edge (e);
4084
4085 make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
4086 if (fd->collapse > 1)
4087 {
4088 e = find_edge (cont_bb, l1_bb);
4089 remove_edge (e);
4090 e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
4091 }
4092 else
4093 {
4094 e = find_edge (cont_bb, l1_bb);
4095 e->flags = EDGE_TRUE_VALUE;
4096 }
4097 e->probability = REG_BR_PROB_BASE * 7 / 8;
4098 find_edge (cont_bb, l2_bb)->probability = REG_BR_PROB_BASE / 8;
4099 make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
4100
4101 set_immediate_dominator (CDI_DOMINATORS, l2_bb,
4102 recompute_dominator (CDI_DOMINATORS, l2_bb));
4103 set_immediate_dominator (CDI_DOMINATORS, l3_bb,
4104 recompute_dominator (CDI_DOMINATORS, l3_bb));
4105 set_immediate_dominator (CDI_DOMINATORS, l0_bb,
4106 recompute_dominator (CDI_DOMINATORS, l0_bb));
4107 set_immediate_dominator (CDI_DOMINATORS, l1_bb,
4108 recompute_dominator (CDI_DOMINATORS, l1_bb));
4109 }
4110 }
4111
4112
4113 /* A subroutine of expand_omp_for. Generate code for a parallel
4114 loop with static schedule and no specified chunk size. Given
4115 parameters:
4116
4117 for (V = N1; V cond N2; V += STEP) BODY;
4118
4119 where COND is "<" or ">", we generate pseudocode
4120
4121 if (cond is <)
4122 adj = STEP - 1;
4123 else
4124 adj = STEP + 1;
4125 if ((__typeof (V)) -1 > 0 && cond is >)
4126 n = -(adj + N2 - N1) / -STEP;
4127 else
4128 n = (adj + N2 - N1) / STEP;
4129 q = n / nthreads;
4130 tt = n % nthreads;
4131 if (threadid < tt) goto L3; else goto L4;
4132 L3:
4133 tt = 0;
4134 q = q + 1;
4135 L4:
4136 s0 = q * threadid + tt;
4137 e0 = s0 + q;
4138 V = s0 * STEP + N1;
4139 if (s0 >= e0) goto L2; else goto L0;
4140 L0:
4141 e = e0 * STEP + N1;
4142 L1:
4143 BODY;
4144 V += STEP;
4145 if (V cond e) goto L1;
4146 L2:
4147 */
4148
4149 static void
4150 expand_omp_for_static_nochunk (struct omp_region *region,
4151 struct omp_for_data *fd)
4152 {
4153 tree n, q, s0, e0, e, t, tt, nthreads, threadid;
4154 tree type, itype, vmain, vback;
4155 basic_block entry_bb, second_bb, third_bb, exit_bb, seq_start_bb;
4156 basic_block body_bb, cont_bb;
4157 basic_block fin_bb;
4158 gimple_stmt_iterator gsi;
4159 gimple stmt;
4160 edge ep;
4161
4162 itype = type = TREE_TYPE (fd->loop.v);
4163 if (POINTER_TYPE_P (type))
4164 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
4165
4166 entry_bb = region->entry;
4167 cont_bb = region->cont;
4168 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
4169 gcc_assert (BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
4170 seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
4171 body_bb = single_succ (seq_start_bb);
4172 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4173 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4174 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4175 exit_bb = region->exit;
4176
4177 /* Iteration space partitioning goes in ENTRY_BB. */
4178 gsi = gsi_last_bb (entry_bb);
4179 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4180
4181 t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS), 0);
4182 t = fold_convert (itype, t);
4183 nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4184 true, GSI_SAME_STMT);
4185
4186 t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM), 0);
4187 t = fold_convert (itype, t);
4188 threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4189 true, GSI_SAME_STMT);
4190
4191 fd->loop.n1
4192 = force_gimple_operand_gsi (&gsi, fold_convert (type, fd->loop.n1),
4193 true, NULL_TREE, true, GSI_SAME_STMT);
4194 fd->loop.n2
4195 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.n2),
4196 true, NULL_TREE, true, GSI_SAME_STMT);
4197 fd->loop.step
4198 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.step),
4199 true, NULL_TREE, true, GSI_SAME_STMT);
4200
4201 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4202 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4203 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4204 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4205 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4206 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4207 fold_build1 (NEGATE_EXPR, itype, t),
4208 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4209 else
4210 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4211 t = fold_convert (itype, t);
4212 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4213
4214 q = create_tmp_var (itype, "q");
4215 t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads);
4216 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
4217 gsi_insert_before (&gsi, gimple_build_assign (q, t), GSI_SAME_STMT);
4218
4219 tt = create_tmp_var (itype, "tt");
4220 t = fold_build2 (TRUNC_MOD_EXPR, itype, n, nthreads);
4221 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
4222 gsi_insert_before (&gsi, gimple_build_assign (tt, t), GSI_SAME_STMT);
4223
4224 t = build2 (LT_EXPR, boolean_type_node, threadid, tt);
4225 stmt = gimple_build_cond_empty (t);
4226 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4227
4228 second_bb = split_block (entry_bb, stmt)->dest;
4229 gsi = gsi_last_bb (second_bb);
4230 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4231
4232 gsi_insert_before (&gsi, gimple_build_assign (tt, build_int_cst (itype, 0)),
4233 GSI_SAME_STMT);
4234 stmt = gimple_build_assign_with_ops (PLUS_EXPR, q, q,
4235 build_int_cst (itype, 1));
4236 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4237
4238 third_bb = split_block (second_bb, stmt)->dest;
4239 gsi = gsi_last_bb (third_bb);
4240 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4241
4242 t = build2 (MULT_EXPR, itype, q, threadid);
4243 t = build2 (PLUS_EXPR, itype, t, tt);
4244 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4245
4246 t = fold_build2 (PLUS_EXPR, itype, s0, q);
4247 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4248
4249 t = build2 (GE_EXPR, boolean_type_node, s0, e0);
4250 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4251
4252 /* Remove the GIMPLE_OMP_FOR statement. */
4253 gsi_remove (&gsi, true);
4254
4255 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4256 gsi = gsi_start_bb (seq_start_bb);
4257
4258 t = fold_convert (itype, s0);
4259 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4260 if (POINTER_TYPE_P (type))
4261 t = fold_build_pointer_plus (fd->loop.n1, t);
4262 else
4263 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4264 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4265 false, GSI_CONTINUE_LINKING);
4266 stmt = gimple_build_assign (fd->loop.v, t);
4267 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4268
4269 t = fold_convert (itype, e0);
4270 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4271 if (POINTER_TYPE_P (type))
4272 t = fold_build_pointer_plus (fd->loop.n1, t);
4273 else
4274 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4275 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4276 false, GSI_CONTINUE_LINKING);
4277
4278 /* The code controlling the sequential loop replaces the
4279 GIMPLE_OMP_CONTINUE. */
4280 gsi = gsi_last_bb (cont_bb);
4281 stmt = gsi_stmt (gsi);
4282 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4283 vmain = gimple_omp_continue_control_use (stmt);
4284 vback = gimple_omp_continue_control_def (stmt);
4285
4286 if (POINTER_TYPE_P (type))
4287 t = fold_build_pointer_plus (vmain, fd->loop.step);
4288 else
4289 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
4290 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4291 true, GSI_SAME_STMT);
4292 stmt = gimple_build_assign (vback, t);
4293 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4294
4295 t = build2 (fd->loop.cond_code, boolean_type_node, vback, e);
4296 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4297
4298 /* Remove the GIMPLE_OMP_CONTINUE statement. */
4299 gsi_remove (&gsi, true);
4300
4301 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4302 gsi = gsi_last_bb (exit_bb);
4303 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4304 force_gimple_operand_gsi (&gsi, build_omp_barrier (), false, NULL_TREE,
4305 false, GSI_SAME_STMT);
4306 gsi_remove (&gsi, true);
4307
4308 /* Connect all the blocks. */
4309 ep = make_edge (entry_bb, third_bb, EDGE_FALSE_VALUE);
4310 ep->probability = REG_BR_PROB_BASE / 4 * 3;
4311 ep = find_edge (entry_bb, second_bb);
4312 ep->flags = EDGE_TRUE_VALUE;
4313 ep->probability = REG_BR_PROB_BASE / 4;
4314 find_edge (third_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
4315 find_edge (third_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
4316
4317 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4318 find_edge (cont_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4319
4320 set_immediate_dominator (CDI_DOMINATORS, second_bb, entry_bb);
4321 set_immediate_dominator (CDI_DOMINATORS, third_bb, entry_bb);
4322 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, third_bb);
4323 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4324 recompute_dominator (CDI_DOMINATORS, body_bb));
4325 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4326 recompute_dominator (CDI_DOMINATORS, fin_bb));
4327 }
4328
4329
4330 /* A subroutine of expand_omp_for. Generate code for a parallel
4331 loop with static schedule and a specified chunk size. Given
4332 parameters:
4333
4334 for (V = N1; V cond N2; V += STEP) BODY;
4335
4336 where COND is "<" or ">", we generate pseudocode
4337
4338 if (cond is <)
4339 adj = STEP - 1;
4340 else
4341 adj = STEP + 1;
4342 if ((__typeof (V)) -1 > 0 && cond is >)
4343 n = -(adj + N2 - N1) / -STEP;
4344 else
4345 n = (adj + N2 - N1) / STEP;
4346 trip = 0;
4347 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
4348 here so that V is defined
4349 if the loop is not entered
4350 L0:
4351 s0 = (trip * nthreads + threadid) * CHUNK;
4352 e0 = min(s0 + CHUNK, n);
4353 if (s0 < n) goto L1; else goto L4;
4354 L1:
4355 V = s0 * STEP + N1;
4356 e = e0 * STEP + N1;
4357 L2:
4358 BODY;
4359 V += STEP;
4360 if (V cond e) goto L2; else goto L3;
4361 L3:
4362 trip += 1;
4363 goto L0;
4364 L4:
4365 */
4366
4367 static void
4368 expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
4369 {
4370 tree n, s0, e0, e, t;
4371 tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
4372 tree type, itype, v_main, v_back, v_extra;
4373 basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
4374 basic_block trip_update_bb, cont_bb, fin_bb;
4375 gimple_stmt_iterator si;
4376 gimple stmt;
4377 edge se;
4378
4379 itype = type = TREE_TYPE (fd->loop.v);
4380 if (POINTER_TYPE_P (type))
4381 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
4382
4383 entry_bb = region->entry;
4384 se = split_block (entry_bb, last_stmt (entry_bb));
4385 entry_bb = se->src;
4386 iter_part_bb = se->dest;
4387 cont_bb = region->cont;
4388 gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
4389 gcc_assert (BRANCH_EDGE (iter_part_bb)->dest
4390 == FALLTHRU_EDGE (cont_bb)->dest);
4391 seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
4392 body_bb = single_succ (seq_start_bb);
4393 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4394 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4395 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4396 trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
4397 exit_bb = region->exit;
4398
4399 /* Trip and adjustment setup goes in ENTRY_BB. */
4400 si = gsi_last_bb (entry_bb);
4401 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_FOR);
4402
4403 t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS), 0);
4404 t = fold_convert (itype, t);
4405 nthreads = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4406 true, GSI_SAME_STMT);
4407
4408 t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM), 0);
4409 t = fold_convert (itype, t);
4410 threadid = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4411 true, GSI_SAME_STMT);
4412
4413 fd->loop.n1
4414 = force_gimple_operand_gsi (&si, fold_convert (type, fd->loop.n1),
4415 true, NULL_TREE, true, GSI_SAME_STMT);
4416 fd->loop.n2
4417 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.n2),
4418 true, NULL_TREE, true, GSI_SAME_STMT);
4419 fd->loop.step
4420 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.step),
4421 true, NULL_TREE, true, GSI_SAME_STMT);
4422 fd->chunk_size
4423 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->chunk_size),
4424 true, NULL_TREE, true, GSI_SAME_STMT);
4425
4426 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4427 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4428 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4429 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4430 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4431 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4432 fold_build1 (NEGATE_EXPR, itype, t),
4433 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4434 else
4435 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4436 t = fold_convert (itype, t);
4437 n = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4438 true, GSI_SAME_STMT);
4439
4440 trip_var = create_tmp_var (itype, ".trip");
4441 if (gimple_in_ssa_p (cfun))
4442 {
4443 add_referenced_var (trip_var);
4444 trip_init = make_ssa_name (trip_var, NULL);
4445 trip_main = make_ssa_name (trip_var, NULL);
4446 trip_back = make_ssa_name (trip_var, NULL);
4447 }
4448 else
4449 {
4450 trip_init = trip_var;
4451 trip_main = trip_var;
4452 trip_back = trip_var;
4453 }
4454
4455 stmt = gimple_build_assign (trip_init, build_int_cst (itype, 0));
4456 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4457
4458 t = fold_build2 (MULT_EXPR, itype, threadid, fd->chunk_size);
4459 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4460 if (POINTER_TYPE_P (type))
4461 t = fold_build_pointer_plus (fd->loop.n1, t);
4462 else
4463 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4464 v_extra = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4465 true, GSI_SAME_STMT);
4466
4467 /* Remove the GIMPLE_OMP_FOR. */
4468 gsi_remove (&si, true);
4469
4470 /* Iteration space partitioning goes in ITER_PART_BB. */
4471 si = gsi_last_bb (iter_part_bb);
4472
4473 t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads);
4474 t = fold_build2 (PLUS_EXPR, itype, t, threadid);
4475 t = fold_build2 (MULT_EXPR, itype, t, fd->chunk_size);
4476 s0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4477 false, GSI_CONTINUE_LINKING);
4478
4479 t = fold_build2 (PLUS_EXPR, itype, s0, fd->chunk_size);
4480 t = fold_build2 (MIN_EXPR, itype, t, n);
4481 e0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4482 false, GSI_CONTINUE_LINKING);
4483
4484 t = build2 (LT_EXPR, boolean_type_node, s0, n);
4485 gsi_insert_after (&si, gimple_build_cond_empty (t), GSI_CONTINUE_LINKING);
4486
4487 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4488 si = gsi_start_bb (seq_start_bb);
4489
4490 t = fold_convert (itype, s0);
4491 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4492 if (POINTER_TYPE_P (type))
4493 t = fold_build_pointer_plus (fd->loop.n1, t);
4494 else
4495 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4496 t = force_gimple_operand_gsi (&si, t, false, NULL_TREE,
4497 false, GSI_CONTINUE_LINKING);
4498 stmt = gimple_build_assign (fd->loop.v, t);
4499 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4500
4501 t = fold_convert (itype, e0);
4502 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4503 if (POINTER_TYPE_P (type))
4504 t = fold_build_pointer_plus (fd->loop.n1, t);
4505 else
4506 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4507 e = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4508 false, GSI_CONTINUE_LINKING);
4509
4510 /* The code controlling the sequential loop goes in CONT_BB,
4511 replacing the GIMPLE_OMP_CONTINUE. */
4512 si = gsi_last_bb (cont_bb);
4513 stmt = gsi_stmt (si);
4514 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4515 v_main = gimple_omp_continue_control_use (stmt);
4516 v_back = gimple_omp_continue_control_def (stmt);
4517
4518 if (POINTER_TYPE_P (type))
4519 t = fold_build_pointer_plus (v_main, fd->loop.step);
4520 else
4521 t = fold_build2 (PLUS_EXPR, type, v_main, fd->loop.step);
4522 stmt = gimple_build_assign (v_back, t);
4523 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4524
4525 t = build2 (fd->loop.cond_code, boolean_type_node, v_back, e);
4526 gsi_insert_before (&si, gimple_build_cond_empty (t), GSI_SAME_STMT);
4527
4528 /* Remove GIMPLE_OMP_CONTINUE. */
4529 gsi_remove (&si, true);
4530
4531 /* Trip update code goes into TRIP_UPDATE_BB. */
4532 si = gsi_start_bb (trip_update_bb);
4533
4534 t = build_int_cst (itype, 1);
4535 t = build2 (PLUS_EXPR, itype, trip_main, t);
4536 stmt = gimple_build_assign (trip_back, t);
4537 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4538
4539 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4540 si = gsi_last_bb (exit_bb);
4541 if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
4542 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4543 false, GSI_SAME_STMT);
4544 gsi_remove (&si, true);
4545
4546 /* Connect the new blocks. */
4547 find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
4548 find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4549
4550 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4551 find_edge (cont_bb, trip_update_bb)->flags = EDGE_FALSE_VALUE;
4552
4553 redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
4554
4555 if (gimple_in_ssa_p (cfun))
4556 {
4557 gimple_stmt_iterator psi;
4558 gimple phi;
4559 edge re, ene;
4560 edge_var_map_vector head;
4561 edge_var_map *vm;
4562 size_t i;
4563
4564 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
4565 remove arguments of the phi nodes in fin_bb. We need to create
4566 appropriate phi nodes in iter_part_bb instead. */
4567 se = single_pred_edge (fin_bb);
4568 re = single_succ_edge (trip_update_bb);
4569 head = redirect_edge_var_map_vector (re);
4570 ene = single_succ_edge (entry_bb);
4571
4572 psi = gsi_start_phis (fin_bb);
4573 for (i = 0; !gsi_end_p (psi) && VEC_iterate (edge_var_map, head, i, vm);
4574 gsi_next (&psi), ++i)
4575 {
4576 gimple nphi;
4577 source_location locus;
4578
4579 phi = gsi_stmt (psi);
4580 t = gimple_phi_result (phi);
4581 gcc_assert (t == redirect_edge_var_map_result (vm));
4582 nphi = create_phi_node (t, iter_part_bb);
4583 SSA_NAME_DEF_STMT (t) = nphi;
4584
4585 t = PHI_ARG_DEF_FROM_EDGE (phi, se);
4586 locus = gimple_phi_arg_location_from_edge (phi, se);
4587
4588 /* A special case -- fd->loop.v is not yet computed in
4589 iter_part_bb, we need to use v_extra instead. */
4590 if (t == fd->loop.v)
4591 t = v_extra;
4592 add_phi_arg (nphi, t, ene, locus);
4593 locus = redirect_edge_var_map_location (vm);
4594 add_phi_arg (nphi, redirect_edge_var_map_def (vm), re, locus);
4595 }
4596 gcc_assert (!gsi_end_p (psi) && i == VEC_length (edge_var_map, head));
4597 redirect_edge_var_map_clear (re);
4598 while (1)
4599 {
4600 psi = gsi_start_phis (fin_bb);
4601 if (gsi_end_p (psi))
4602 break;
4603 remove_phi_node (&psi, false);
4604 }
4605
4606 /* Make phi node for trip. */
4607 phi = create_phi_node (trip_main, iter_part_bb);
4608 SSA_NAME_DEF_STMT (trip_main) = phi;
4609 add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb),
4610 UNKNOWN_LOCATION);
4611 add_phi_arg (phi, trip_init, single_succ_edge (entry_bb),
4612 UNKNOWN_LOCATION);
4613 }
4614
4615 set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
4616 set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
4617 recompute_dominator (CDI_DOMINATORS, iter_part_bb));
4618 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4619 recompute_dominator (CDI_DOMINATORS, fin_bb));
4620 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
4621 recompute_dominator (CDI_DOMINATORS, seq_start_bb));
4622 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4623 recompute_dominator (CDI_DOMINATORS, body_bb));
4624 }
4625
4626
4627 /* Expand the OpenMP loop defined by REGION. */
4628
4629 static void
4630 expand_omp_for (struct omp_region *region)
4631 {
4632 struct omp_for_data fd;
4633 struct omp_for_data_loop *loops;
4634
4635 loops
4636 = (struct omp_for_data_loop *)
4637 alloca (gimple_omp_for_collapse (last_stmt (region->entry))
4638 * sizeof (struct omp_for_data_loop));
4639 extract_omp_for_data (last_stmt (region->entry), &fd, loops);
4640 region->sched_kind = fd.sched_kind;
4641
4642 gcc_assert (EDGE_COUNT (region->entry->succs) == 2);
4643 BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4644 FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4645 if (region->cont)
4646 {
4647 gcc_assert (EDGE_COUNT (region->cont->succs) == 2);
4648 BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4649 FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4650 }
4651
4652 if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
4653 && !fd.have_ordered
4654 && fd.collapse == 1
4655 && region->cont != NULL)
4656 {
4657 if (fd.chunk_size == NULL)
4658 expand_omp_for_static_nochunk (region, &fd);
4659 else
4660 expand_omp_for_static_chunk (region, &fd);
4661 }
4662 else
4663 {
4664 int fn_index, start_ix, next_ix;
4665
4666 gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
4667 fn_index = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
4668 ? 3 : fd.sched_kind;
4669 fn_index += fd.have_ordered * 4;
4670 start_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_START) + fn_index;
4671 next_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_NEXT) + fn_index;
4672 if (fd.iter_type == long_long_unsigned_type_node)
4673 {
4674 start_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_START
4675 - (int)BUILT_IN_GOMP_LOOP_STATIC_START);
4676 next_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
4677 - (int)BUILT_IN_GOMP_LOOP_STATIC_NEXT);
4678 }
4679 expand_omp_for_generic (region, &fd, (enum built_in_function) start_ix,
4680 (enum built_in_function) next_ix);
4681 }
4682
4683 update_ssa (TODO_update_ssa_only_virtuals);
4684 }
4685
4686
4687 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
4688
4689 v = GOMP_sections_start (n);
4690 L0:
4691 switch (v)
4692 {
4693 case 0:
4694 goto L2;
4695 case 1:
4696 section 1;
4697 goto L1;
4698 case 2:
4699 ...
4700 case n:
4701 ...
4702 default:
4703 abort ();
4704 }
4705 L1:
4706 v = GOMP_sections_next ();
4707 goto L0;
4708 L2:
4709 reduction;
4710
4711 If this is a combined parallel sections, replace the call to
4712 GOMP_sections_start with call to GOMP_sections_next. */
4713
4714 static void
4715 expand_omp_sections (struct omp_region *region)
4716 {
4717 tree t, u, vin = NULL, vmain, vnext, l2;
4718 VEC (tree,heap) *label_vec;
4719 unsigned len;
4720 basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
4721 gimple_stmt_iterator si, switch_si;
4722 gimple sections_stmt, stmt, cont;
4723 edge_iterator ei;
4724 edge e;
4725 struct omp_region *inner;
4726 unsigned i, casei;
4727 bool exit_reachable = region->cont != NULL;
4728
4729 gcc_assert (exit_reachable == (region->exit != NULL));
4730 entry_bb = region->entry;
4731 l0_bb = single_succ (entry_bb);
4732 l1_bb = region->cont;
4733 l2_bb = region->exit;
4734 if (exit_reachable)
4735 {
4736 if (single_pred_p (l2_bb) && single_pred (l2_bb) == l0_bb)
4737 l2 = gimple_block_label (l2_bb);
4738 else
4739 {
4740 /* This can happen if there are reductions. */
4741 len = EDGE_COUNT (l0_bb->succs);
4742 gcc_assert (len > 0);
4743 e = EDGE_SUCC (l0_bb, len - 1);
4744 si = gsi_last_bb (e->dest);
4745 l2 = NULL_TREE;
4746 if (gsi_end_p (si)
4747 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4748 l2 = gimple_block_label (e->dest);
4749 else
4750 FOR_EACH_EDGE (e, ei, l0_bb->succs)
4751 {
4752 si = gsi_last_bb (e->dest);
4753 if (gsi_end_p (si)
4754 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4755 {
4756 l2 = gimple_block_label (e->dest);
4757 break;
4758 }
4759 }
4760 }
4761 default_bb = create_empty_bb (l1_bb->prev_bb);
4762 }
4763 else
4764 {
4765 default_bb = create_empty_bb (l0_bb);
4766 l2 = gimple_block_label (default_bb);
4767 }
4768
4769 /* We will build a switch() with enough cases for all the
4770 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
4771 and a default case to abort if something goes wrong. */
4772 len = EDGE_COUNT (l0_bb->succs);
4773
4774 /* Use VEC_quick_push on label_vec throughout, since we know the size
4775 in advance. */
4776 label_vec = VEC_alloc (tree, heap, len);
4777
4778 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
4779 GIMPLE_OMP_SECTIONS statement. */
4780 si = gsi_last_bb (entry_bb);
4781 sections_stmt = gsi_stmt (si);
4782 gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS);
4783 vin = gimple_omp_sections_control (sections_stmt);
4784 if (!is_combined_parallel (region))
4785 {
4786 /* If we are not inside a combined parallel+sections region,
4787 call GOMP_sections_start. */
4788 t = build_int_cst (unsigned_type_node,
4789 exit_reachable ? len - 1 : len);
4790 u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_START);
4791 stmt = gimple_build_call (u, 1, t);
4792 }
4793 else
4794 {
4795 /* Otherwise, call GOMP_sections_next. */
4796 u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
4797 stmt = gimple_build_call (u, 0);
4798 }
4799 gimple_call_set_lhs (stmt, vin);
4800 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4801 gsi_remove (&si, true);
4802
4803 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
4804 L0_BB. */
4805 switch_si = gsi_last_bb (l0_bb);
4806 gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH);
4807 if (exit_reachable)
4808 {
4809 cont = last_stmt (l1_bb);
4810 gcc_assert (gimple_code (cont) == GIMPLE_OMP_CONTINUE);
4811 vmain = gimple_omp_continue_control_use (cont);
4812 vnext = gimple_omp_continue_control_def (cont);
4813 }
4814 else
4815 {
4816 vmain = vin;
4817 vnext = NULL_TREE;
4818 }
4819
4820 i = 0;
4821 if (exit_reachable)
4822 {
4823 t = build_case_label (build_int_cst (unsigned_type_node, 0), NULL, l2);
4824 VEC_quick_push (tree, label_vec, t);
4825 i++;
4826 }
4827
4828 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
4829 for (inner = region->inner, casei = 1;
4830 inner;
4831 inner = inner->next, i++, casei++)
4832 {
4833 basic_block s_entry_bb, s_exit_bb;
4834
4835 /* Skip optional reduction region. */
4836 if (inner->type == GIMPLE_OMP_ATOMIC_LOAD)
4837 {
4838 --i;
4839 --casei;
4840 continue;
4841 }
4842
4843 s_entry_bb = inner->entry;
4844 s_exit_bb = inner->exit;
4845
4846 t = gimple_block_label (s_entry_bb);
4847 u = build_int_cst (unsigned_type_node, casei);
4848 u = build_case_label (u, NULL, t);
4849 VEC_quick_push (tree, label_vec, u);
4850
4851 si = gsi_last_bb (s_entry_bb);
4852 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION);
4853 gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si)));
4854 gsi_remove (&si, true);
4855 single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
4856
4857 if (s_exit_bb == NULL)
4858 continue;
4859
4860 si = gsi_last_bb (s_exit_bb);
4861 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4862 gsi_remove (&si, true);
4863
4864 single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
4865 }
4866
4867 /* Error handling code goes in DEFAULT_BB. */
4868 t = gimple_block_label (default_bb);
4869 u = build_case_label (NULL, NULL, t);
4870 make_edge (l0_bb, default_bb, 0);
4871
4872 stmt = gimple_build_switch_vec (vmain, u, label_vec);
4873 gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT);
4874 gsi_remove (&switch_si, true);
4875 VEC_free (tree, heap, label_vec);
4876
4877 si = gsi_start_bb (default_bb);
4878 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP), 0);
4879 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4880
4881 if (exit_reachable)
4882 {
4883 tree bfn_decl;
4884
4885 /* Code to get the next section goes in L1_BB. */
4886 si = gsi_last_bb (l1_bb);
4887 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE);
4888
4889 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
4890 stmt = gimple_build_call (bfn_decl, 0);
4891 gimple_call_set_lhs (stmt, vnext);
4892 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4893 gsi_remove (&si, true);
4894
4895 single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
4896
4897 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
4898 si = gsi_last_bb (l2_bb);
4899 if (gimple_omp_return_nowait_p (gsi_stmt (si)))
4900 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_NOWAIT);
4901 else
4902 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END);
4903 stmt = gimple_build_call (t, 0);
4904 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4905 gsi_remove (&si, true);
4906 }
4907
4908 set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
4909 }
4910
4911
4912 /* Expand code for an OpenMP single directive. We've already expanded
4913 much of the code, here we simply place the GOMP_barrier call. */
4914
4915 static void
4916 expand_omp_single (struct omp_region *region)
4917 {
4918 basic_block entry_bb, exit_bb;
4919 gimple_stmt_iterator si;
4920 bool need_barrier = false;
4921
4922 entry_bb = region->entry;
4923 exit_bb = region->exit;
4924
4925 si = gsi_last_bb (entry_bb);
4926 /* The terminal barrier at the end of a GOMP_single_copy sequence cannot
4927 be removed. We need to ensure that the thread that entered the single
4928 does not exit before the data is copied out by the other threads. */
4929 if (find_omp_clause (gimple_omp_single_clauses (gsi_stmt (si)),
4930 OMP_CLAUSE_COPYPRIVATE))
4931 need_barrier = true;
4932 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE);
4933 gsi_remove (&si, true);
4934 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4935
4936 si = gsi_last_bb (exit_bb);
4937 if (!gimple_omp_return_nowait_p (gsi_stmt (si)) || need_barrier)
4938 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4939 false, GSI_SAME_STMT);
4940 gsi_remove (&si, true);
4941 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4942 }
4943
4944
4945 /* Generic expansion for OpenMP synchronization directives: master,
4946 ordered and critical. All we need to do here is remove the entry
4947 and exit markers for REGION. */
4948
4949 static void
4950 expand_omp_synch (struct omp_region *region)
4951 {
4952 basic_block entry_bb, exit_bb;
4953 gimple_stmt_iterator si;
4954
4955 entry_bb = region->entry;
4956 exit_bb = region->exit;
4957
4958 si = gsi_last_bb (entry_bb);
4959 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE
4960 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER
4961 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED
4962 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL);
4963 gsi_remove (&si, true);
4964 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4965
4966 if (exit_bb)
4967 {
4968 si = gsi_last_bb (exit_bb);
4969 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4970 gsi_remove (&si, true);
4971 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4972 }
4973 }
4974
4975 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
4976 operation as a normal volatile load. */
4977
4978 static bool
4979 expand_omp_atomic_load (basic_block load_bb, tree addr, tree loaded_val)
4980 {
4981 /* FIXME */
4982 (void) load_bb;
4983 (void) addr;
4984 (void) loaded_val;
4985 return false;
4986 }
4987
4988 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
4989 operation as a normal volatile store. */
4990
4991 static bool
4992 expand_omp_atomic_store (basic_block load_bb, tree addr)
4993 {
4994 /* FIXME */
4995 (void) load_bb;
4996 (void) addr;
4997 return false;
4998 }
4999
5000 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
5001 operation as a __atomic_fetch_op builtin. INDEX is log2 of the
5002 size of the data type, and thus usable to find the index of the builtin
5003 decl. Returns false if the expression is not of the proper form. */
5004
5005 static bool
5006 expand_omp_atomic_fetch_op (basic_block load_bb,
5007 tree addr, tree loaded_val,
5008 tree stored_val, int index)
5009 {
5010 enum built_in_function oldbase, newbase, tmpbase;
5011 tree decl, itype, call;
5012 tree lhs, rhs;
5013 basic_block store_bb = single_succ (load_bb);
5014 gimple_stmt_iterator gsi;
5015 gimple stmt;
5016 location_t loc;
5017 enum tree_code code;
5018 bool need_old, need_new;
5019 enum machine_mode imode;
5020
5021 /* We expect to find the following sequences:
5022
5023 load_bb:
5024 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
5025
5026 store_bb:
5027 val = tmp OP something; (or: something OP tmp)
5028 GIMPLE_OMP_STORE (val)
5029
5030 ???FIXME: Allow a more flexible sequence.
5031 Perhaps use data flow to pick the statements.
5032
5033 */
5034
5035 gsi = gsi_after_labels (store_bb);
5036 stmt = gsi_stmt (gsi);
5037 loc = gimple_location (stmt);
5038 if (!is_gimple_assign (stmt))
5039 return false;
5040 gsi_next (&gsi);
5041 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE)
5042 return false;
5043 need_new = gimple_omp_atomic_need_value_p (gsi_stmt (gsi));
5044 need_old = gimple_omp_atomic_need_value_p (last_stmt (load_bb));
5045 gcc_checking_assert (!need_old || !need_new);
5046
5047 if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0))
5048 return false;
5049
5050 /* Check for one of the supported fetch-op operations. */
5051 code = gimple_assign_rhs_code (stmt);
5052 switch (code)
5053 {
5054 case PLUS_EXPR:
5055 case POINTER_PLUS_EXPR:
5056 oldbase = BUILT_IN_ATOMIC_FETCH_ADD_N;
5057 newbase = BUILT_IN_ATOMIC_ADD_FETCH_N;
5058 break;
5059 case MINUS_EXPR:
5060 oldbase = BUILT_IN_ATOMIC_FETCH_SUB_N;
5061 newbase = BUILT_IN_ATOMIC_SUB_FETCH_N;
5062 break;
5063 case BIT_AND_EXPR:
5064 oldbase = BUILT_IN_ATOMIC_FETCH_AND_N;
5065 newbase = BUILT_IN_ATOMIC_AND_FETCH_N;
5066 break;
5067 case BIT_IOR_EXPR:
5068 oldbase = BUILT_IN_ATOMIC_FETCH_OR_N;
5069 newbase = BUILT_IN_ATOMIC_OR_FETCH_N;
5070 break;
5071 case BIT_XOR_EXPR:
5072 oldbase = BUILT_IN_ATOMIC_FETCH_XOR_N;
5073 newbase = BUILT_IN_ATOMIC_XOR_FETCH_N;
5074 break;
5075 default:
5076 return false;
5077 }
5078
5079 /* Make sure the expression is of the proper form. */
5080 if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0))
5081 rhs = gimple_assign_rhs2 (stmt);
5082 else if (commutative_tree_code (gimple_assign_rhs_code (stmt))
5083 && operand_equal_p (gimple_assign_rhs2 (stmt), loaded_val, 0))
5084 rhs = gimple_assign_rhs1 (stmt);
5085 else
5086 return false;
5087
5088 tmpbase = ((enum built_in_function)
5089 ((need_new ? newbase : oldbase) + index + 1));
5090 decl = builtin_decl_explicit (tmpbase);
5091 if (decl == NULL_TREE)
5092 return false;
5093 itype = TREE_TYPE (TREE_TYPE (decl));
5094 imode = TYPE_MODE (itype);
5095
5096 /* We could test all of the various optabs involved, but the fact of the
5097 matter is that (with the exception of i486 vs i586 and xadd) all targets
5098 that support any atomic operaton optab also implements compare-and-swap.
5099 Let optabs.c take care of expanding any compare-and-swap loop. */
5100 if (!can_compare_and_swap_p (imode, true))
5101 return false;
5102
5103 gsi = gsi_last_bb (load_bb);
5104 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD);
5105
5106 /* OpenMP does not imply any barrier-like semantics on its atomic ops.
5107 It only requires that the operation happen atomically. Thus we can
5108 use the RELAXED memory model. */
5109 call = build_call_expr_loc (loc, decl, 3, addr,
5110 fold_convert_loc (loc, itype, rhs),
5111 build_int_cst (NULL, MEMMODEL_RELAXED));
5112
5113 if (need_old || need_new)
5114 {
5115 lhs = need_old ? loaded_val : stored_val;
5116 call = fold_convert_loc (loc, TREE_TYPE (lhs), call);
5117 call = build2_loc (loc, MODIFY_EXPR, void_type_node, lhs, call);
5118 }
5119 else
5120 call = fold_convert_loc (loc, void_type_node, call);
5121 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
5122 gsi_remove (&gsi, true);
5123
5124 gsi = gsi_last_bb (store_bb);
5125 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
5126 gsi_remove (&gsi, true);
5127 gsi = gsi_last_bb (store_bb);
5128 gsi_remove (&gsi, true);
5129
5130 if (gimple_in_ssa_p (cfun))
5131 update_ssa (TODO_update_ssa_no_phi);
5132
5133 return true;
5134 }
5135
5136 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5137
5138 oldval = *addr;
5139 repeat:
5140 newval = rhs; // with oldval replacing *addr in rhs
5141 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
5142 if (oldval != newval)
5143 goto repeat;
5144
5145 INDEX is log2 of the size of the data type, and thus usable to find the
5146 index of the builtin decl. */
5147
5148 static bool
5149 expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
5150 tree addr, tree loaded_val, tree stored_val,
5151 int index)
5152 {
5153 tree loadedi, storedi, initial, new_storedi, old_vali;
5154 tree type, itype, cmpxchg, iaddr;
5155 gimple_stmt_iterator si;
5156 basic_block loop_header = single_succ (load_bb);
5157 gimple phi, stmt;
5158 edge e;
5159 enum built_in_function fncode;
5160
5161 /* ??? We need a non-pointer interface to __atomic_compare_exchange in
5162 order to use the RELAXED memory model effectively. */
5163 fncode = (enum built_in_function)((int)BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_N
5164 + index + 1);
5165 cmpxchg = builtin_decl_explicit (fncode);
5166 if (cmpxchg == NULL_TREE)
5167 return false;
5168 type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5169 itype = TREE_TYPE (TREE_TYPE (cmpxchg));
5170
5171 if (!can_compare_and_swap_p (TYPE_MODE (itype), true))
5172 return false;
5173
5174 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
5175 si = gsi_last_bb (load_bb);
5176 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
5177
5178 /* For floating-point values, we'll need to view-convert them to integers
5179 so that we can perform the atomic compare and swap. Simplify the
5180 following code by always setting up the "i"ntegral variables. */
5181 if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
5182 {
5183 tree iaddr_val;
5184
5185 iaddr = create_tmp_var (build_pointer_type_for_mode (itype, ptr_mode,
5186 true), NULL);
5187 iaddr_val
5188 = force_gimple_operand_gsi (&si,
5189 fold_convert (TREE_TYPE (iaddr), addr),
5190 false, NULL_TREE, true, GSI_SAME_STMT);
5191 stmt = gimple_build_assign (iaddr, iaddr_val);
5192 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5193 loadedi = create_tmp_var (itype, NULL);
5194 if (gimple_in_ssa_p (cfun))
5195 {
5196 add_referenced_var (iaddr);
5197 add_referenced_var (loadedi);
5198 loadedi = make_ssa_name (loadedi, NULL);
5199 }
5200 }
5201 else
5202 {
5203 iaddr = addr;
5204 loadedi = loaded_val;
5205 }
5206
5207 initial
5208 = force_gimple_operand_gsi (&si,
5209 build2 (MEM_REF, TREE_TYPE (TREE_TYPE (iaddr)),
5210 iaddr,
5211 build_int_cst (TREE_TYPE (iaddr), 0)),
5212 true, NULL_TREE, true, GSI_SAME_STMT);
5213
5214 /* Move the value to the LOADEDI temporary. */
5215 if (gimple_in_ssa_p (cfun))
5216 {
5217 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header)));
5218 phi = create_phi_node (loadedi, loop_header);
5219 SSA_NAME_DEF_STMT (loadedi) = phi;
5220 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
5221 initial);
5222 }
5223 else
5224 gsi_insert_before (&si,
5225 gimple_build_assign (loadedi, initial),
5226 GSI_SAME_STMT);
5227 if (loadedi != loaded_val)
5228 {
5229 gimple_stmt_iterator gsi2;
5230 tree x;
5231
5232 x = build1 (VIEW_CONVERT_EXPR, type, loadedi);
5233 gsi2 = gsi_start_bb (loop_header);
5234 if (gimple_in_ssa_p (cfun))
5235 {
5236 gimple stmt;
5237 x = force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5238 true, GSI_SAME_STMT);
5239 stmt = gimple_build_assign (loaded_val, x);
5240 gsi_insert_before (&gsi2, stmt, GSI_SAME_STMT);
5241 }
5242 else
5243 {
5244 x = build2 (MODIFY_EXPR, TREE_TYPE (loaded_val), loaded_val, x);
5245 force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5246 true, GSI_SAME_STMT);
5247 }
5248 }
5249 gsi_remove (&si, true);
5250
5251 si = gsi_last_bb (store_bb);
5252 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
5253
5254 if (iaddr == addr)
5255 storedi = stored_val;
5256 else
5257 storedi =
5258 force_gimple_operand_gsi (&si,
5259 build1 (VIEW_CONVERT_EXPR, itype,
5260 stored_val), true, NULL_TREE, true,
5261 GSI_SAME_STMT);
5262
5263 /* Build the compare&swap statement. */
5264 new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
5265 new_storedi = force_gimple_operand_gsi (&si,
5266 fold_convert (TREE_TYPE (loadedi),
5267 new_storedi),
5268 true, NULL_TREE,
5269 true, GSI_SAME_STMT);
5270
5271 if (gimple_in_ssa_p (cfun))
5272 old_vali = loadedi;
5273 else
5274 {
5275 old_vali = create_tmp_var (TREE_TYPE (loadedi), NULL);
5276 if (gimple_in_ssa_p (cfun))
5277 add_referenced_var (old_vali);
5278 stmt = gimple_build_assign (old_vali, loadedi);
5279 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5280
5281 stmt = gimple_build_assign (loadedi, new_storedi);
5282 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5283 }
5284
5285 /* Note that we always perform the comparison as an integer, even for
5286 floating point. This allows the atomic operation to properly
5287 succeed even with NaNs and -0.0. */
5288 stmt = gimple_build_cond_empty
5289 (build2 (NE_EXPR, boolean_type_node,
5290 new_storedi, old_vali));
5291 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5292
5293 /* Update cfg. */
5294 e = single_succ_edge (store_bb);
5295 e->flags &= ~EDGE_FALLTHRU;
5296 e->flags |= EDGE_FALSE_VALUE;
5297
5298 e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
5299
5300 /* Copy the new value to loadedi (we already did that before the condition
5301 if we are not in SSA). */
5302 if (gimple_in_ssa_p (cfun))
5303 {
5304 phi = gimple_seq_first_stmt (phi_nodes (loop_header));
5305 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi);
5306 }
5307
5308 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
5309 gsi_remove (&si, true);
5310
5311 if (gimple_in_ssa_p (cfun))
5312 update_ssa (TODO_update_ssa_no_phi);
5313
5314 return true;
5315 }
5316
5317 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5318
5319 GOMP_atomic_start ();
5320 *addr = rhs;
5321 GOMP_atomic_end ();
5322
5323 The result is not globally atomic, but works so long as all parallel
5324 references are within #pragma omp atomic directives. According to
5325 responses received from omp@openmp.org, appears to be within spec.
5326 Which makes sense, since that's how several other compilers handle
5327 this situation as well.
5328 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
5329 expanding. STORED_VAL is the operand of the matching
5330 GIMPLE_OMP_ATOMIC_STORE.
5331
5332 We replace
5333 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
5334 loaded_val = *addr;
5335
5336 and replace
5337 GIMPLE_OMP_ATOMIC_ATORE (stored_val) with
5338 *addr = stored_val;
5339 */
5340
5341 static bool
5342 expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
5343 tree addr, tree loaded_val, tree stored_val)
5344 {
5345 gimple_stmt_iterator si;
5346 gimple stmt;
5347 tree t;
5348
5349 si = gsi_last_bb (load_bb);
5350 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
5351
5352 t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START);
5353 t = build_call_expr (t, 0);
5354 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5355
5356 stmt = gimple_build_assign (loaded_val, build_simple_mem_ref (addr));
5357 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5358 gsi_remove (&si, true);
5359
5360 si = gsi_last_bb (store_bb);
5361 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
5362
5363 stmt = gimple_build_assign (build_simple_mem_ref (unshare_expr (addr)),
5364 stored_val);
5365 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5366
5367 t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END);
5368 t = build_call_expr (t, 0);
5369 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5370 gsi_remove (&si, true);
5371
5372 if (gimple_in_ssa_p (cfun))
5373 update_ssa (TODO_update_ssa_no_phi);
5374 return true;
5375 }
5376
5377 /* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
5378 using expand_omp_atomic_fetch_op. If it failed, we try to
5379 call expand_omp_atomic_pipeline, and if it fails too, the
5380 ultimate fallback is wrapping the operation in a mutex
5381 (expand_omp_atomic_mutex). REGION is the atomic region built
5382 by build_omp_regions_1(). */
5383
5384 static void
5385 expand_omp_atomic (struct omp_region *region)
5386 {
5387 basic_block load_bb = region->entry, store_bb = region->exit;
5388 gimple load = last_stmt (load_bb), store = last_stmt (store_bb);
5389 tree loaded_val = gimple_omp_atomic_load_lhs (load);
5390 tree addr = gimple_omp_atomic_load_rhs (load);
5391 tree stored_val = gimple_omp_atomic_store_val (store);
5392 tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5393 HOST_WIDE_INT index;
5394
5395 /* Make sure the type is one of the supported sizes. */
5396 index = tree_low_cst (TYPE_SIZE_UNIT (type), 1);
5397 index = exact_log2 (index);
5398 if (index >= 0 && index <= 4)
5399 {
5400 unsigned int align = TYPE_ALIGN_UNIT (type);
5401
5402 /* __sync builtins require strict data alignment. */
5403 if (exact_log2 (align) >= index)
5404 {
5405 /* Atomic load. FIXME: have some target hook signalize what loads
5406 are actually atomic? */
5407 if (loaded_val == stored_val
5408 && (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
5409 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
5410 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
5411 && expand_omp_atomic_load (load_bb, addr, loaded_val))
5412 return;
5413
5414 /* Atomic store. FIXME: have some target hook signalize what
5415 stores are actually atomic? */
5416 if ((GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
5417 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
5418 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
5419 && store_bb == single_succ (load_bb)
5420 && first_stmt (store_bb) == store
5421 && expand_omp_atomic_store (load_bb, addr))
5422 return;
5423
5424 /* When possible, use specialized atomic update functions. */
5425 if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
5426 && store_bb == single_succ (load_bb))
5427 {
5428 if (expand_omp_atomic_fetch_op (load_bb, addr,
5429 loaded_val, stored_val, index))
5430 return;
5431 }
5432
5433 /* If we don't have specialized __sync builtins, try and implement
5434 as a compare and swap loop. */
5435 if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
5436 loaded_val, stored_val, index))
5437 return;
5438 }
5439 }
5440
5441 /* The ultimate fallback is wrapping the operation in a mutex. */
5442 expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
5443 }
5444
5445
5446 /* Expand the parallel region tree rooted at REGION. Expansion
5447 proceeds in depth-first order. Innermost regions are expanded
5448 first. This way, parallel regions that require a new function to
5449 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
5450 internal dependencies in their body. */
5451
5452 static void
5453 expand_omp (struct omp_region *region)
5454 {
5455 while (region)
5456 {
5457 location_t saved_location;
5458
5459 /* First, determine whether this is a combined parallel+workshare
5460 region. */
5461 if (region->type == GIMPLE_OMP_PARALLEL)
5462 determine_parallel_type (region);
5463
5464 if (region->inner)
5465 expand_omp (region->inner);
5466
5467 saved_location = input_location;
5468 if (gimple_has_location (last_stmt (region->entry)))
5469 input_location = gimple_location (last_stmt (region->entry));
5470
5471 switch (region->type)
5472 {
5473 case GIMPLE_OMP_PARALLEL:
5474 case GIMPLE_OMP_TASK:
5475 expand_omp_taskreg (region);
5476 break;
5477
5478 case GIMPLE_OMP_FOR:
5479 expand_omp_for (region);
5480 break;
5481
5482 case GIMPLE_OMP_SECTIONS:
5483 expand_omp_sections (region);
5484 break;
5485
5486 case GIMPLE_OMP_SECTION:
5487 /* Individual omp sections are handled together with their
5488 parent GIMPLE_OMP_SECTIONS region. */
5489 break;
5490
5491 case GIMPLE_OMP_SINGLE:
5492 expand_omp_single (region);
5493 break;
5494
5495 case GIMPLE_OMP_MASTER:
5496 case GIMPLE_OMP_ORDERED:
5497 case GIMPLE_OMP_CRITICAL:
5498 expand_omp_synch (region);
5499 break;
5500
5501 case GIMPLE_OMP_ATOMIC_LOAD:
5502 expand_omp_atomic (region);
5503 break;
5504
5505 default:
5506 gcc_unreachable ();
5507 }
5508
5509 input_location = saved_location;
5510 region = region->next;
5511 }
5512 }
5513
5514
5515 /* Helper for build_omp_regions. Scan the dominator tree starting at
5516 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
5517 true, the function ends once a single tree is built (otherwise, whole
5518 forest of OMP constructs may be built). */
5519
5520 static void
5521 build_omp_regions_1 (basic_block bb, struct omp_region *parent,
5522 bool single_tree)
5523 {
5524 gimple_stmt_iterator gsi;
5525 gimple stmt;
5526 basic_block son;
5527
5528 gsi = gsi_last_bb (bb);
5529 if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi)))
5530 {
5531 struct omp_region *region;
5532 enum gimple_code code;
5533
5534 stmt = gsi_stmt (gsi);
5535 code = gimple_code (stmt);
5536 if (code == GIMPLE_OMP_RETURN)
5537 {
5538 /* STMT is the return point out of region PARENT. Mark it
5539 as the exit point and make PARENT the immediately
5540 enclosing region. */
5541 gcc_assert (parent);
5542 region = parent;
5543 region->exit = bb;
5544 parent = parent->outer;
5545 }
5546 else if (code == GIMPLE_OMP_ATOMIC_STORE)
5547 {
5548 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
5549 GIMPLE_OMP_RETURN, but matches with
5550 GIMPLE_OMP_ATOMIC_LOAD. */
5551 gcc_assert (parent);
5552 gcc_assert (parent->type == GIMPLE_OMP_ATOMIC_LOAD);
5553 region = parent;
5554 region->exit = bb;
5555 parent = parent->outer;
5556 }
5557
5558 else if (code == GIMPLE_OMP_CONTINUE)
5559 {
5560 gcc_assert (parent);
5561 parent->cont = bb;
5562 }
5563 else if (code == GIMPLE_OMP_SECTIONS_SWITCH)
5564 {
5565 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
5566 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
5567 ;
5568 }
5569 else
5570 {
5571 /* Otherwise, this directive becomes the parent for a new
5572 region. */
5573 region = new_omp_region (bb, code, parent);
5574 parent = region;
5575 }
5576 }
5577
5578 if (single_tree && !parent)
5579 return;
5580
5581 for (son = first_dom_son (CDI_DOMINATORS, bb);
5582 son;
5583 son = next_dom_son (CDI_DOMINATORS, son))
5584 build_omp_regions_1 (son, parent, single_tree);
5585 }
5586
5587 /* Builds the tree of OMP regions rooted at ROOT, storing it to
5588 root_omp_region. */
5589
5590 static void
5591 build_omp_regions_root (basic_block root)
5592 {
5593 gcc_assert (root_omp_region == NULL);
5594 build_omp_regions_1 (root, NULL, true);
5595 gcc_assert (root_omp_region != NULL);
5596 }
5597
5598 /* Expands omp construct (and its subconstructs) starting in HEAD. */
5599
5600 void
5601 omp_expand_local (basic_block head)
5602 {
5603 build_omp_regions_root (head);
5604 if (dump_file && (dump_flags & TDF_DETAILS))
5605 {
5606 fprintf (dump_file, "\nOMP region tree\n\n");
5607 dump_omp_region (dump_file, root_omp_region, 0);
5608 fprintf (dump_file, "\n");
5609 }
5610
5611 remove_exit_barriers (root_omp_region);
5612 expand_omp (root_omp_region);
5613
5614 free_omp_regions ();
5615 }
5616
5617 /* Scan the CFG and build a tree of OMP regions. Return the root of
5618 the OMP region tree. */
5619
5620 static void
5621 build_omp_regions (void)
5622 {
5623 gcc_assert (root_omp_region == NULL);
5624 calculate_dominance_info (CDI_DOMINATORS);
5625 build_omp_regions_1 (ENTRY_BLOCK_PTR, NULL, false);
5626 }
5627
5628 /* Main entry point for expanding OMP-GIMPLE into runtime calls. */
5629
5630 static unsigned int
5631 execute_expand_omp (void)
5632 {
5633 build_omp_regions ();
5634
5635 if (!root_omp_region)
5636 return 0;
5637
5638 if (dump_file)
5639 {
5640 fprintf (dump_file, "\nOMP region tree\n\n");
5641 dump_omp_region (dump_file, root_omp_region, 0);
5642 fprintf (dump_file, "\n");
5643 }
5644
5645 remove_exit_barriers (root_omp_region);
5646
5647 expand_omp (root_omp_region);
5648
5649 cleanup_tree_cfg ();
5650
5651 free_omp_regions ();
5652
5653 return 0;
5654 }
5655
5656 /* OMP expansion -- the default pass, run before creation of SSA form. */
5657
5658 static bool
5659 gate_expand_omp (void)
5660 {
5661 return (flag_openmp != 0 && !seen_error ());
5662 }
5663
5664 struct gimple_opt_pass pass_expand_omp =
5665 {
5666 {
5667 GIMPLE_PASS,
5668 "ompexp", /* name */
5669 gate_expand_omp, /* gate */
5670 execute_expand_omp, /* execute */
5671 NULL, /* sub */
5672 NULL, /* next */
5673 0, /* static_pass_number */
5674 TV_NONE, /* tv_id */
5675 PROP_gimple_any, /* properties_required */
5676 0, /* properties_provided */
5677 0, /* properties_destroyed */
5678 0, /* todo_flags_start */
5679 0 /* todo_flags_finish */
5680 }
5681 };
5682 \f
5683 /* Routines to lower OpenMP directives into OMP-GIMPLE. */
5684
5685 /* Lower the OpenMP sections directive in the current statement in GSI_P.
5686 CTX is the enclosing OMP context for the current statement. */
5687
5688 static void
5689 lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5690 {
5691 tree block, control;
5692 gimple_stmt_iterator tgsi;
5693 unsigned i, len;
5694 gimple stmt, new_stmt, bind, t;
5695 gimple_seq ilist, dlist, olist, new_body, body;
5696 struct gimplify_ctx gctx;
5697
5698 stmt = gsi_stmt (*gsi_p);
5699
5700 push_gimplify_context (&gctx);
5701
5702 dlist = NULL;
5703 ilist = NULL;
5704 lower_rec_input_clauses (gimple_omp_sections_clauses (stmt),
5705 &ilist, &dlist, ctx);
5706
5707 tgsi = gsi_start (gimple_omp_body (stmt));
5708 for (len = 0; !gsi_end_p (tgsi); len++, gsi_next (&tgsi))
5709 continue;
5710
5711 tgsi = gsi_start (gimple_omp_body (stmt));
5712 body = NULL;
5713 for (i = 0; i < len; i++, gsi_next (&tgsi))
5714 {
5715 omp_context *sctx;
5716 gimple sec_start;
5717
5718 sec_start = gsi_stmt (tgsi);
5719 sctx = maybe_lookup_ctx (sec_start);
5720 gcc_assert (sctx);
5721
5722 gimple_seq_add_stmt (&body, sec_start);
5723
5724 lower_omp (gimple_omp_body (sec_start), sctx);
5725 gimple_seq_add_seq (&body, gimple_omp_body (sec_start));
5726 gimple_omp_set_body (sec_start, NULL);
5727
5728 if (i == len - 1)
5729 {
5730 gimple_seq l = NULL;
5731 lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt), NULL,
5732 &l, ctx);
5733 gimple_seq_add_seq (&body, l);
5734 gimple_omp_section_set_last (sec_start);
5735 }
5736
5737 gimple_seq_add_stmt (&body, gimple_build_omp_return (false));
5738 }
5739
5740 block = make_node (BLOCK);
5741 bind = gimple_build_bind (NULL, body, block);
5742
5743 olist = NULL;
5744 lower_reduction_clauses (gimple_omp_sections_clauses (stmt), &olist, ctx);
5745
5746 block = make_node (BLOCK);
5747 new_stmt = gimple_build_bind (NULL, NULL, block);
5748
5749 pop_gimplify_context (new_stmt);
5750 gimple_bind_append_vars (new_stmt, ctx->block_vars);
5751 BLOCK_VARS (block) = gimple_bind_vars (bind);
5752 if (BLOCK_VARS (block))
5753 TREE_USED (block) = 1;
5754
5755 new_body = NULL;
5756 gimple_seq_add_seq (&new_body, ilist);
5757 gimple_seq_add_stmt (&new_body, stmt);
5758 gimple_seq_add_stmt (&new_body, gimple_build_omp_sections_switch ());
5759 gimple_seq_add_stmt (&new_body, bind);
5760
5761 control = create_tmp_var (unsigned_type_node, ".section");
5762 t = gimple_build_omp_continue (control, control);
5763 gimple_omp_sections_set_control (stmt, control);
5764 gimple_seq_add_stmt (&new_body, t);
5765
5766 gimple_seq_add_seq (&new_body, olist);
5767 gimple_seq_add_seq (&new_body, dlist);
5768
5769 new_body = maybe_catch_exception (new_body);
5770
5771 t = gimple_build_omp_return
5772 (!!find_omp_clause (gimple_omp_sections_clauses (stmt),
5773 OMP_CLAUSE_NOWAIT));
5774 gimple_seq_add_stmt (&new_body, t);
5775
5776 gimple_bind_set_body (new_stmt, new_body);
5777 gimple_omp_set_body (stmt, NULL);
5778
5779 gsi_replace (gsi_p, new_stmt, true);
5780 }
5781
5782
5783 /* A subroutine of lower_omp_single. Expand the simple form of
5784 a GIMPLE_OMP_SINGLE, without a copyprivate clause:
5785
5786 if (GOMP_single_start ())
5787 BODY;
5788 [ GOMP_barrier (); ] -> unless 'nowait' is present.
5789
5790 FIXME. It may be better to delay expanding the logic of this until
5791 pass_expand_omp. The expanded logic may make the job more difficult
5792 to a synchronization analysis pass. */
5793
5794 static void
5795 lower_omp_single_simple (gimple single_stmt, gimple_seq *pre_p)
5796 {
5797 location_t loc = gimple_location (single_stmt);
5798 tree tlabel = create_artificial_label (loc);
5799 tree flabel = create_artificial_label (loc);
5800 gimple call, cond;
5801 tree lhs, decl;
5802
5803 decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_START);
5804 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (decl)), NULL);
5805 call = gimple_build_call (decl, 0);
5806 gimple_call_set_lhs (call, lhs);
5807 gimple_seq_add_stmt (pre_p, call);
5808
5809 cond = gimple_build_cond (EQ_EXPR, lhs,
5810 fold_convert_loc (loc, TREE_TYPE (lhs),
5811 boolean_true_node),
5812 tlabel, flabel);
5813 gimple_seq_add_stmt (pre_p, cond);
5814 gimple_seq_add_stmt (pre_p, gimple_build_label (tlabel));
5815 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5816 gimple_seq_add_stmt (pre_p, gimple_build_label (flabel));
5817 }
5818
5819
5820 /* A subroutine of lower_omp_single. Expand the simple form of
5821 a GIMPLE_OMP_SINGLE, with a copyprivate clause:
5822
5823 #pragma omp single copyprivate (a, b, c)
5824
5825 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
5826
5827 {
5828 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
5829 {
5830 BODY;
5831 copyout.a = a;
5832 copyout.b = b;
5833 copyout.c = c;
5834 GOMP_single_copy_end (&copyout);
5835 }
5836 else
5837 {
5838 a = copyout_p->a;
5839 b = copyout_p->b;
5840 c = copyout_p->c;
5841 }
5842 GOMP_barrier ();
5843 }
5844
5845 FIXME. It may be better to delay expanding the logic of this until
5846 pass_expand_omp. The expanded logic may make the job more difficult
5847 to a synchronization analysis pass. */
5848
5849 static void
5850 lower_omp_single_copy (gimple single_stmt, gimple_seq *pre_p, omp_context *ctx)
5851 {
5852 tree ptr_type, t, l0, l1, l2, bfn_decl;
5853 gimple_seq copyin_seq;
5854 location_t loc = gimple_location (single_stmt);
5855
5856 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
5857
5858 ptr_type = build_pointer_type (ctx->record_type);
5859 ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
5860
5861 l0 = create_artificial_label (loc);
5862 l1 = create_artificial_label (loc);
5863 l2 = create_artificial_label (loc);
5864
5865 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_START);
5866 t = build_call_expr_loc (loc, bfn_decl, 0);
5867 t = fold_convert_loc (loc, ptr_type, t);
5868 gimplify_assign (ctx->receiver_decl, t, pre_p);
5869
5870 t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
5871 build_int_cst (ptr_type, 0));
5872 t = build3 (COND_EXPR, void_type_node, t,
5873 build_and_jump (&l0), build_and_jump (&l1));
5874 gimplify_and_add (t, pre_p);
5875
5876 gimple_seq_add_stmt (pre_p, gimple_build_label (l0));
5877
5878 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5879
5880 copyin_seq = NULL;
5881 lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt), pre_p,
5882 &copyin_seq, ctx);
5883
5884 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
5885 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_END);
5886 t = build_call_expr_loc (loc, bfn_decl, 1, t);
5887 gimplify_and_add (t, pre_p);
5888
5889 t = build_and_jump (&l2);
5890 gimplify_and_add (t, pre_p);
5891
5892 gimple_seq_add_stmt (pre_p, gimple_build_label (l1));
5893
5894 gimple_seq_add_seq (pre_p, copyin_seq);
5895
5896 gimple_seq_add_stmt (pre_p, gimple_build_label (l2));
5897 }
5898
5899
5900 /* Expand code for an OpenMP single directive. */
5901
5902 static void
5903 lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5904 {
5905 tree block;
5906 gimple t, bind, single_stmt = gsi_stmt (*gsi_p);
5907 gimple_seq bind_body, dlist;
5908 struct gimplify_ctx gctx;
5909
5910 push_gimplify_context (&gctx);
5911
5912 bind_body = NULL;
5913 lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt),
5914 &bind_body, &dlist, ctx);
5915 lower_omp (gimple_omp_body (single_stmt), ctx);
5916
5917 gimple_seq_add_stmt (&bind_body, single_stmt);
5918
5919 if (ctx->record_type)
5920 lower_omp_single_copy (single_stmt, &bind_body, ctx);
5921 else
5922 lower_omp_single_simple (single_stmt, &bind_body);
5923
5924 gimple_omp_set_body (single_stmt, NULL);
5925
5926 gimple_seq_add_seq (&bind_body, dlist);
5927
5928 bind_body = maybe_catch_exception (bind_body);
5929
5930 t = gimple_build_omp_return
5931 (!!find_omp_clause (gimple_omp_single_clauses (single_stmt),
5932 OMP_CLAUSE_NOWAIT));
5933 gimple_seq_add_stmt (&bind_body, t);
5934
5935 block = make_node (BLOCK);
5936 bind = gimple_build_bind (NULL, bind_body, block);
5937
5938 pop_gimplify_context (bind);
5939
5940 gimple_bind_append_vars (bind, ctx->block_vars);
5941 BLOCK_VARS (block) = ctx->block_vars;
5942 gsi_replace (gsi_p, bind, true);
5943 if (BLOCK_VARS (block))
5944 TREE_USED (block) = 1;
5945 }
5946
5947
5948 /* Expand code for an OpenMP master directive. */
5949
5950 static void
5951 lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5952 {
5953 tree block, lab = NULL, x, bfn_decl;
5954 gimple stmt = gsi_stmt (*gsi_p), bind;
5955 location_t loc = gimple_location (stmt);
5956 gimple_seq tseq;
5957 struct gimplify_ctx gctx;
5958
5959 push_gimplify_context (&gctx);
5960
5961 block = make_node (BLOCK);
5962 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
5963 block);
5964
5965 bfn_decl = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
5966 x = build_call_expr_loc (loc, bfn_decl, 0);
5967 x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
5968 x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
5969 tseq = NULL;
5970 gimplify_and_add (x, &tseq);
5971 gimple_bind_add_seq (bind, tseq);
5972
5973 lower_omp (gimple_omp_body (stmt), ctx);
5974 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5975 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5976 gimple_omp_set_body (stmt, NULL);
5977
5978 gimple_bind_add_stmt (bind, gimple_build_label (lab));
5979
5980 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
5981
5982 pop_gimplify_context (bind);
5983
5984 gimple_bind_append_vars (bind, ctx->block_vars);
5985 BLOCK_VARS (block) = ctx->block_vars;
5986 gsi_replace (gsi_p, bind, true);
5987 }
5988
5989
5990 /* Expand code for an OpenMP ordered directive. */
5991
5992 static void
5993 lower_omp_ordered (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5994 {
5995 tree block;
5996 gimple stmt = gsi_stmt (*gsi_p), bind, x;
5997 struct gimplify_ctx gctx;
5998
5999 push_gimplify_context (&gctx);
6000
6001 block = make_node (BLOCK);
6002 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
6003 block);
6004
6005 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_START),
6006 0);
6007 gimple_bind_add_stmt (bind, x);
6008
6009 lower_omp (gimple_omp_body (stmt), ctx);
6010 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
6011 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
6012 gimple_omp_set_body (stmt, NULL);
6013
6014 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_END), 0);
6015 gimple_bind_add_stmt (bind, x);
6016
6017 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
6018
6019 pop_gimplify_context (bind);
6020
6021 gimple_bind_append_vars (bind, ctx->block_vars);
6022 BLOCK_VARS (block) = gimple_bind_vars (bind);
6023 gsi_replace (gsi_p, bind, true);
6024 }
6025
6026
6027 /* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
6028 substitution of a couple of function calls. But in the NAMED case,
6029 requires that languages coordinate a symbol name. It is therefore
6030 best put here in common code. */
6031
6032 static GTY((param1_is (tree), param2_is (tree)))
6033 splay_tree critical_name_mutexes;
6034
6035 static void
6036 lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6037 {
6038 tree block;
6039 tree name, lock, unlock;
6040 gimple stmt = gsi_stmt (*gsi_p), bind;
6041 location_t loc = gimple_location (stmt);
6042 gimple_seq tbody;
6043 struct gimplify_ctx gctx;
6044
6045 name = gimple_omp_critical_name (stmt);
6046 if (name)
6047 {
6048 tree decl;
6049 splay_tree_node n;
6050
6051 if (!critical_name_mutexes)
6052 critical_name_mutexes
6053 = splay_tree_new_ggc (splay_tree_compare_pointers,
6054 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_s,
6055 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_node_s);
6056
6057 n = splay_tree_lookup (critical_name_mutexes, (splay_tree_key) name);
6058 if (n == NULL)
6059 {
6060 char *new_str;
6061
6062 decl = create_tmp_var_raw (ptr_type_node, NULL);
6063
6064 new_str = ACONCAT ((".gomp_critical_user_",
6065 IDENTIFIER_POINTER (name), NULL));
6066 DECL_NAME (decl) = get_identifier (new_str);
6067 TREE_PUBLIC (decl) = 1;
6068 TREE_STATIC (decl) = 1;
6069 DECL_COMMON (decl) = 1;
6070 DECL_ARTIFICIAL (decl) = 1;
6071 DECL_IGNORED_P (decl) = 1;
6072 varpool_finalize_decl (decl);
6073
6074 splay_tree_insert (critical_name_mutexes, (splay_tree_key) name,
6075 (splay_tree_value) decl);
6076 }
6077 else
6078 decl = (tree) n->value;
6079
6080 lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_START);
6081 lock = build_call_expr_loc (loc, lock, 1, build_fold_addr_expr_loc (loc, decl));
6082
6083 unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_END);
6084 unlock = build_call_expr_loc (loc, unlock, 1,
6085 build_fold_addr_expr_loc (loc, decl));
6086 }
6087 else
6088 {
6089 lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_START);
6090 lock = build_call_expr_loc (loc, lock, 0);
6091
6092 unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_END);
6093 unlock = build_call_expr_loc (loc, unlock, 0);
6094 }
6095
6096 push_gimplify_context (&gctx);
6097
6098 block = make_node (BLOCK);
6099 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt), block);
6100
6101 tbody = gimple_bind_body (bind);
6102 gimplify_and_add (lock, &tbody);
6103 gimple_bind_set_body (bind, tbody);
6104
6105 lower_omp (gimple_omp_body (stmt), ctx);
6106 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
6107 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
6108 gimple_omp_set_body (stmt, NULL);
6109
6110 tbody = gimple_bind_body (bind);
6111 gimplify_and_add (unlock, &tbody);
6112 gimple_bind_set_body (bind, tbody);
6113
6114 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
6115
6116 pop_gimplify_context (bind);
6117 gimple_bind_append_vars (bind, ctx->block_vars);
6118 BLOCK_VARS (block) = gimple_bind_vars (bind);
6119 gsi_replace (gsi_p, bind, true);
6120 }
6121
6122
6123 /* A subroutine of lower_omp_for. Generate code to emit the predicate
6124 for a lastprivate clause. Given a loop control predicate of (V
6125 cond N2), we gate the clause on (!(V cond N2)). The lowered form
6126 is appended to *DLIST, iterator initialization is appended to
6127 *BODY_P. */
6128
6129 static void
6130 lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p,
6131 gimple_seq *dlist, struct omp_context *ctx)
6132 {
6133 tree clauses, cond, vinit;
6134 enum tree_code cond_code;
6135 gimple_seq stmts;
6136
6137 cond_code = fd->loop.cond_code;
6138 cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
6139
6140 /* When possible, use a strict equality expression. This can let VRP
6141 type optimizations deduce the value and remove a copy. */
6142 if (host_integerp (fd->loop.step, 0))
6143 {
6144 HOST_WIDE_INT step = TREE_INT_CST_LOW (fd->loop.step);
6145 if (step == 1 || step == -1)
6146 cond_code = EQ_EXPR;
6147 }
6148
6149 cond = build2 (cond_code, boolean_type_node, fd->loop.v, fd->loop.n2);
6150
6151 clauses = gimple_omp_for_clauses (fd->for_stmt);
6152 stmts = NULL;
6153 lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
6154 if (!gimple_seq_empty_p (stmts))
6155 {
6156 gimple_seq_add_seq (&stmts, *dlist);
6157 *dlist = stmts;
6158
6159 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
6160 vinit = fd->loop.n1;
6161 if (cond_code == EQ_EXPR
6162 && host_integerp (fd->loop.n2, 0)
6163 && ! integer_zerop (fd->loop.n2))
6164 vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0);
6165
6166 /* Initialize the iterator variable, so that threads that don't execute
6167 any iterations don't execute the lastprivate clauses by accident. */
6168 gimplify_assign (fd->loop.v, vinit, body_p);
6169 }
6170 }
6171
6172
6173 /* Lower code for an OpenMP loop directive. */
6174
6175 static void
6176 lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6177 {
6178 tree *rhs_p, block;
6179 struct omp_for_data fd;
6180 gimple stmt = gsi_stmt (*gsi_p), new_stmt;
6181 gimple_seq omp_for_body, body, dlist;
6182 size_t i;
6183 struct gimplify_ctx gctx;
6184
6185 push_gimplify_context (&gctx);
6186
6187 lower_omp (gimple_omp_for_pre_body (stmt), ctx);
6188 lower_omp (gimple_omp_body (stmt), ctx);
6189
6190 block = make_node (BLOCK);
6191 new_stmt = gimple_build_bind (NULL, NULL, block);
6192
6193 /* Move declaration of temporaries in the loop body before we make
6194 it go away. */
6195 omp_for_body = gimple_omp_body (stmt);
6196 if (!gimple_seq_empty_p (omp_for_body)
6197 && gimple_code (gimple_seq_first_stmt (omp_for_body)) == GIMPLE_BIND)
6198 {
6199 tree vars = gimple_bind_vars (gimple_seq_first_stmt (omp_for_body));
6200 gimple_bind_append_vars (new_stmt, vars);
6201 }
6202
6203 /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
6204 dlist = NULL;
6205 body = NULL;
6206 lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx);
6207 gimple_seq_add_seq (&body, gimple_omp_for_pre_body (stmt));
6208
6209 /* Lower the header expressions. At this point, we can assume that
6210 the header is of the form:
6211
6212 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
6213
6214 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
6215 using the .omp_data_s mapping, if needed. */
6216 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
6217 {
6218 rhs_p = gimple_omp_for_initial_ptr (stmt, i);
6219 if (!is_gimple_min_invariant (*rhs_p))
6220 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6221
6222 rhs_p = gimple_omp_for_final_ptr (stmt, i);
6223 if (!is_gimple_min_invariant (*rhs_p))
6224 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6225
6226 rhs_p = &TREE_OPERAND (gimple_omp_for_incr (stmt, i), 1);
6227 if (!is_gimple_min_invariant (*rhs_p))
6228 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6229 }
6230
6231 /* Once lowered, extract the bounds and clauses. */
6232 extract_omp_for_data (stmt, &fd, NULL);
6233
6234 lower_omp_for_lastprivate (&fd, &body, &dlist, ctx);
6235
6236 gimple_seq_add_stmt (&body, stmt);
6237 gimple_seq_add_seq (&body, gimple_omp_body (stmt));
6238
6239 gimple_seq_add_stmt (&body, gimple_build_omp_continue (fd.loop.v,
6240 fd.loop.v));
6241
6242 /* After the loop, add exit clauses. */
6243 lower_reduction_clauses (gimple_omp_for_clauses (stmt), &body, ctx);
6244 gimple_seq_add_seq (&body, dlist);
6245
6246 body = maybe_catch_exception (body);
6247
6248 /* Region exit marker goes at the end of the loop body. */
6249 gimple_seq_add_stmt (&body, gimple_build_omp_return (fd.have_nowait));
6250
6251 pop_gimplify_context (new_stmt);
6252
6253 gimple_bind_append_vars (new_stmt, ctx->block_vars);
6254 BLOCK_VARS (block) = gimple_bind_vars (new_stmt);
6255 if (BLOCK_VARS (block))
6256 TREE_USED (block) = 1;
6257
6258 gimple_bind_set_body (new_stmt, body);
6259 gimple_omp_set_body (stmt, NULL);
6260 gimple_omp_for_set_pre_body (stmt, NULL);
6261 gsi_replace (gsi_p, new_stmt, true);
6262 }
6263
6264 /* Callback for walk_stmts. Check if the current statement only contains
6265 GIMPLE_OMP_FOR or GIMPLE_OMP_PARALLEL. */
6266
6267 static tree
6268 check_combined_parallel (gimple_stmt_iterator *gsi_p,
6269 bool *handled_ops_p,
6270 struct walk_stmt_info *wi)
6271 {
6272 int *info = (int *) wi->info;
6273 gimple stmt = gsi_stmt (*gsi_p);
6274
6275 *handled_ops_p = true;
6276 switch (gimple_code (stmt))
6277 {
6278 WALK_SUBSTMTS;
6279
6280 case GIMPLE_OMP_FOR:
6281 case GIMPLE_OMP_SECTIONS:
6282 *info = *info == 0 ? 1 : -1;
6283 break;
6284 default:
6285 *info = -1;
6286 break;
6287 }
6288 return NULL;
6289 }
6290
6291 struct omp_taskcopy_context
6292 {
6293 /* This field must be at the beginning, as we do "inheritance": Some
6294 callback functions for tree-inline.c (e.g., omp_copy_decl)
6295 receive a copy_body_data pointer that is up-casted to an
6296 omp_context pointer. */
6297 copy_body_data cb;
6298 omp_context *ctx;
6299 };
6300
6301 static tree
6302 task_copyfn_copy_decl (tree var, copy_body_data *cb)
6303 {
6304 struct omp_taskcopy_context *tcctx = (struct omp_taskcopy_context *) cb;
6305
6306 if (splay_tree_lookup (tcctx->ctx->sfield_map, (splay_tree_key) var))
6307 return create_tmp_var (TREE_TYPE (var), NULL);
6308
6309 return var;
6310 }
6311
6312 static tree
6313 task_copyfn_remap_type (struct omp_taskcopy_context *tcctx, tree orig_type)
6314 {
6315 tree name, new_fields = NULL, type, f;
6316
6317 type = lang_hooks.types.make_type (RECORD_TYPE);
6318 name = DECL_NAME (TYPE_NAME (orig_type));
6319 name = build_decl (gimple_location (tcctx->ctx->stmt),
6320 TYPE_DECL, name, type);
6321 TYPE_NAME (type) = name;
6322
6323 for (f = TYPE_FIELDS (orig_type); f ; f = TREE_CHAIN (f))
6324 {
6325 tree new_f = copy_node (f);
6326 DECL_CONTEXT (new_f) = type;
6327 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &tcctx->cb);
6328 TREE_CHAIN (new_f) = new_fields;
6329 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6330 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6331 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
6332 &tcctx->cb, NULL);
6333 new_fields = new_f;
6334 *pointer_map_insert (tcctx->cb.decl_map, f) = new_f;
6335 }
6336 TYPE_FIELDS (type) = nreverse (new_fields);
6337 layout_type (type);
6338 return type;
6339 }
6340
6341 /* Create task copyfn. */
6342
6343 static void
6344 create_task_copyfn (gimple task_stmt, omp_context *ctx)
6345 {
6346 struct function *child_cfun;
6347 tree child_fn, t, c, src, dst, f, sf, arg, sarg, decl;
6348 tree record_type, srecord_type, bind, list;
6349 bool record_needs_remap = false, srecord_needs_remap = false;
6350 splay_tree_node n;
6351 struct omp_taskcopy_context tcctx;
6352 struct gimplify_ctx gctx;
6353 location_t loc = gimple_location (task_stmt);
6354
6355 child_fn = gimple_omp_task_copy_fn (task_stmt);
6356 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
6357 gcc_assert (child_cfun->cfg == NULL);
6358 DECL_SAVED_TREE (child_fn) = alloc_stmt_list ();
6359
6360 /* Reset DECL_CONTEXT on function arguments. */
6361 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
6362 DECL_CONTEXT (t) = child_fn;
6363
6364 /* Populate the function. */
6365 push_gimplify_context (&gctx);
6366 current_function_decl = child_fn;
6367
6368 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
6369 TREE_SIDE_EFFECTS (bind) = 1;
6370 list = NULL;
6371 DECL_SAVED_TREE (child_fn) = bind;
6372 DECL_SOURCE_LOCATION (child_fn) = gimple_location (task_stmt);
6373
6374 /* Remap src and dst argument types if needed. */
6375 record_type = ctx->record_type;
6376 srecord_type = ctx->srecord_type;
6377 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
6378 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6379 {
6380 record_needs_remap = true;
6381 break;
6382 }
6383 for (f = TYPE_FIELDS (srecord_type); f ; f = DECL_CHAIN (f))
6384 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6385 {
6386 srecord_needs_remap = true;
6387 break;
6388 }
6389
6390 if (record_needs_remap || srecord_needs_remap)
6391 {
6392 memset (&tcctx, '\0', sizeof (tcctx));
6393 tcctx.cb.src_fn = ctx->cb.src_fn;
6394 tcctx.cb.dst_fn = child_fn;
6395 tcctx.cb.src_node = cgraph_get_node (tcctx.cb.src_fn);
6396 gcc_checking_assert (tcctx.cb.src_node);
6397 tcctx.cb.dst_node = tcctx.cb.src_node;
6398 tcctx.cb.src_cfun = ctx->cb.src_cfun;
6399 tcctx.cb.copy_decl = task_copyfn_copy_decl;
6400 tcctx.cb.eh_lp_nr = 0;
6401 tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE;
6402 tcctx.cb.decl_map = pointer_map_create ();
6403 tcctx.ctx = ctx;
6404
6405 if (record_needs_remap)
6406 record_type = task_copyfn_remap_type (&tcctx, record_type);
6407 if (srecord_needs_remap)
6408 srecord_type = task_copyfn_remap_type (&tcctx, srecord_type);
6409 }
6410 else
6411 tcctx.cb.decl_map = NULL;
6412
6413 push_cfun (child_cfun);
6414
6415 arg = DECL_ARGUMENTS (child_fn);
6416 TREE_TYPE (arg) = build_pointer_type (record_type);
6417 sarg = DECL_CHAIN (arg);
6418 TREE_TYPE (sarg) = build_pointer_type (srecord_type);
6419
6420 /* First pass: initialize temporaries used in record_type and srecord_type
6421 sizes and field offsets. */
6422 if (tcctx.cb.decl_map)
6423 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6424 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6425 {
6426 tree *p;
6427
6428 decl = OMP_CLAUSE_DECL (c);
6429 p = (tree *) pointer_map_contains (tcctx.cb.decl_map, decl);
6430 if (p == NULL)
6431 continue;
6432 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6433 sf = (tree) n->value;
6434 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6435 src = build_simple_mem_ref_loc (loc, sarg);
6436 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6437 t = build2 (MODIFY_EXPR, TREE_TYPE (*p), *p, src);
6438 append_to_statement_list (t, &list);
6439 }
6440
6441 /* Second pass: copy shared var pointers and copy construct non-VLA
6442 firstprivate vars. */
6443 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6444 switch (OMP_CLAUSE_CODE (c))
6445 {
6446 case OMP_CLAUSE_SHARED:
6447 decl = OMP_CLAUSE_DECL (c);
6448 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6449 if (n == NULL)
6450 break;
6451 f = (tree) n->value;
6452 if (tcctx.cb.decl_map)
6453 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6454 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6455 sf = (tree) n->value;
6456 if (tcctx.cb.decl_map)
6457 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6458 src = build_simple_mem_ref_loc (loc, sarg);
6459 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6460 dst = build_simple_mem_ref_loc (loc, arg);
6461 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6462 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
6463 append_to_statement_list (t, &list);
6464 break;
6465 case OMP_CLAUSE_FIRSTPRIVATE:
6466 decl = OMP_CLAUSE_DECL (c);
6467 if (is_variable_sized (decl))
6468 break;
6469 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6470 if (n == NULL)
6471 break;
6472 f = (tree) n->value;
6473 if (tcctx.cb.decl_map)
6474 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6475 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6476 if (n != NULL)
6477 {
6478 sf = (tree) n->value;
6479 if (tcctx.cb.decl_map)
6480 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6481 src = build_simple_mem_ref_loc (loc, sarg);
6482 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6483 if (use_pointer_for_field (decl, NULL) || is_reference (decl))
6484 src = build_simple_mem_ref_loc (loc, src);
6485 }
6486 else
6487 src = decl;
6488 dst = build_simple_mem_ref_loc (loc, arg);
6489 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6490 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6491 append_to_statement_list (t, &list);
6492 break;
6493 case OMP_CLAUSE_PRIVATE:
6494 if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c))
6495 break;
6496 decl = OMP_CLAUSE_DECL (c);
6497 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6498 f = (tree) n->value;
6499 if (tcctx.cb.decl_map)
6500 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6501 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6502 if (n != NULL)
6503 {
6504 sf = (tree) n->value;
6505 if (tcctx.cb.decl_map)
6506 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6507 src = build_simple_mem_ref_loc (loc, sarg);
6508 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6509 if (use_pointer_for_field (decl, NULL))
6510 src = build_simple_mem_ref_loc (loc, src);
6511 }
6512 else
6513 src = decl;
6514 dst = build_simple_mem_ref_loc (loc, arg);
6515 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6516 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
6517 append_to_statement_list (t, &list);
6518 break;
6519 default:
6520 break;
6521 }
6522
6523 /* Last pass: handle VLA firstprivates. */
6524 if (tcctx.cb.decl_map)
6525 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6526 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6527 {
6528 tree ind, ptr, df;
6529
6530 decl = OMP_CLAUSE_DECL (c);
6531 if (!is_variable_sized (decl))
6532 continue;
6533 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6534 if (n == NULL)
6535 continue;
6536 f = (tree) n->value;
6537 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6538 gcc_assert (DECL_HAS_VALUE_EXPR_P (decl));
6539 ind = DECL_VALUE_EXPR (decl);
6540 gcc_assert (TREE_CODE (ind) == INDIRECT_REF);
6541 gcc_assert (DECL_P (TREE_OPERAND (ind, 0)));
6542 n = splay_tree_lookup (ctx->sfield_map,
6543 (splay_tree_key) TREE_OPERAND (ind, 0));
6544 sf = (tree) n->value;
6545 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6546 src = build_simple_mem_ref_loc (loc, sarg);
6547 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6548 src = build_simple_mem_ref_loc (loc, src);
6549 dst = build_simple_mem_ref_loc (loc, arg);
6550 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6551 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6552 append_to_statement_list (t, &list);
6553 n = splay_tree_lookup (ctx->field_map,
6554 (splay_tree_key) TREE_OPERAND (ind, 0));
6555 df = (tree) n->value;
6556 df = *(tree *) pointer_map_contains (tcctx.cb.decl_map, df);
6557 ptr = build_simple_mem_ref_loc (loc, arg);
6558 ptr = build3 (COMPONENT_REF, TREE_TYPE (df), ptr, df, NULL);
6559 t = build2 (MODIFY_EXPR, TREE_TYPE (ptr), ptr,
6560 build_fold_addr_expr_loc (loc, dst));
6561 append_to_statement_list (t, &list);
6562 }
6563
6564 t = build1 (RETURN_EXPR, void_type_node, NULL);
6565 append_to_statement_list (t, &list);
6566
6567 if (tcctx.cb.decl_map)
6568 pointer_map_destroy (tcctx.cb.decl_map);
6569 pop_gimplify_context (NULL);
6570 BIND_EXPR_BODY (bind) = list;
6571 pop_cfun ();
6572 current_function_decl = ctx->cb.src_fn;
6573 }
6574
6575 /* Lower the OpenMP parallel or task directive in the current statement
6576 in GSI_P. CTX holds context information for the directive. */
6577
6578 static void
6579 lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6580 {
6581 tree clauses;
6582 tree child_fn, t;
6583 gimple stmt = gsi_stmt (*gsi_p);
6584 gimple par_bind, bind;
6585 gimple_seq par_body, olist, ilist, par_olist, par_ilist, new_body;
6586 struct gimplify_ctx gctx;
6587 location_t loc = gimple_location (stmt);
6588
6589 clauses = gimple_omp_taskreg_clauses (stmt);
6590 par_bind = gimple_seq_first_stmt (gimple_omp_body (stmt));
6591 par_body = gimple_bind_body (par_bind);
6592 child_fn = ctx->cb.dst_fn;
6593 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
6594 && !gimple_omp_parallel_combined_p (stmt))
6595 {
6596 struct walk_stmt_info wi;
6597 int ws_num = 0;
6598
6599 memset (&wi, 0, sizeof (wi));
6600 wi.info = &ws_num;
6601 wi.val_only = true;
6602 walk_gimple_seq (par_body, check_combined_parallel, NULL, &wi);
6603 if (ws_num == 1)
6604 gimple_omp_parallel_set_combined_p (stmt, true);
6605 }
6606 if (ctx->srecord_type)
6607 create_task_copyfn (stmt, ctx);
6608
6609 push_gimplify_context (&gctx);
6610
6611 par_olist = NULL;
6612 par_ilist = NULL;
6613 lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx);
6614 lower_omp (par_body, ctx);
6615 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL)
6616 lower_reduction_clauses (clauses, &par_olist, ctx);
6617
6618 /* Declare all the variables created by mapping and the variables
6619 declared in the scope of the parallel body. */
6620 record_vars_into (ctx->block_vars, child_fn);
6621 record_vars_into (gimple_bind_vars (par_bind), child_fn);
6622
6623 if (ctx->record_type)
6624 {
6625 ctx->sender_decl
6626 = create_tmp_var (ctx->srecord_type ? ctx->srecord_type
6627 : ctx->record_type, ".omp_data_o");
6628 DECL_NAMELESS (ctx->sender_decl) = 1;
6629 TREE_ADDRESSABLE (ctx->sender_decl) = 1;
6630 gimple_omp_taskreg_set_data_arg (stmt, ctx->sender_decl);
6631 }
6632
6633 olist = NULL;
6634 ilist = NULL;
6635 lower_send_clauses (clauses, &ilist, &olist, ctx);
6636 lower_send_shared_vars (&ilist, &olist, ctx);
6637
6638 /* Once all the expansions are done, sequence all the different
6639 fragments inside gimple_omp_body. */
6640
6641 new_body = NULL;
6642
6643 if (ctx->record_type)
6644 {
6645 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
6646 /* fixup_child_record_type might have changed receiver_decl's type. */
6647 t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
6648 gimple_seq_add_stmt (&new_body,
6649 gimple_build_assign (ctx->receiver_decl, t));
6650 }
6651
6652 gimple_seq_add_seq (&new_body, par_ilist);
6653 gimple_seq_add_seq (&new_body, par_body);
6654 gimple_seq_add_seq (&new_body, par_olist);
6655 new_body = maybe_catch_exception (new_body);
6656 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
6657 gimple_omp_set_body (stmt, new_body);
6658
6659 bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
6660 gimple_bind_add_stmt (bind, stmt);
6661 if (ilist || olist)
6662 {
6663 gimple_seq_add_stmt (&ilist, bind);
6664 gimple_seq_add_seq (&ilist, olist);
6665 bind = gimple_build_bind (NULL, ilist, NULL);
6666 }
6667
6668 gsi_replace (gsi_p, bind, true);
6669
6670 pop_gimplify_context (NULL);
6671 }
6672
6673 /* Callback for lower_omp_1. Return non-NULL if *tp needs to be
6674 regimplified. If DATA is non-NULL, lower_omp_1 is outside
6675 of OpenMP context, but with task_shared_vars set. */
6676
6677 static tree
6678 lower_omp_regimplify_p (tree *tp, int *walk_subtrees,
6679 void *data)
6680 {
6681 tree t = *tp;
6682
6683 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
6684 if (TREE_CODE (t) == VAR_DECL && data == NULL && DECL_HAS_VALUE_EXPR_P (t))
6685 return t;
6686
6687 if (task_shared_vars
6688 && DECL_P (t)
6689 && bitmap_bit_p (task_shared_vars, DECL_UID (t)))
6690 return t;
6691
6692 /* If a global variable has been privatized, TREE_CONSTANT on
6693 ADDR_EXPR might be wrong. */
6694 if (data == NULL && TREE_CODE (t) == ADDR_EXPR)
6695 recompute_tree_invariant_for_addr_expr (t);
6696
6697 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
6698 return NULL_TREE;
6699 }
6700
6701 static void
6702 lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6703 {
6704 gimple stmt = gsi_stmt (*gsi_p);
6705 struct walk_stmt_info wi;
6706
6707 if (gimple_has_location (stmt))
6708 input_location = gimple_location (stmt);
6709
6710 if (task_shared_vars)
6711 memset (&wi, '\0', sizeof (wi));
6712
6713 /* If we have issued syntax errors, avoid doing any heavy lifting.
6714 Just replace the OpenMP directives with a NOP to avoid
6715 confusing RTL expansion. */
6716 if (seen_error () && is_gimple_omp (stmt))
6717 {
6718 gsi_replace (gsi_p, gimple_build_nop (), true);
6719 return;
6720 }
6721
6722 switch (gimple_code (stmt))
6723 {
6724 case GIMPLE_COND:
6725 if ((ctx || task_shared_vars)
6726 && (walk_tree (gimple_cond_lhs_ptr (stmt), lower_omp_regimplify_p,
6727 ctx ? NULL : &wi, NULL)
6728 || walk_tree (gimple_cond_rhs_ptr (stmt), lower_omp_regimplify_p,
6729 ctx ? NULL : &wi, NULL)))
6730 gimple_regimplify_operands (stmt, gsi_p);
6731 break;
6732 case GIMPLE_CATCH:
6733 lower_omp (gimple_catch_handler (stmt), ctx);
6734 break;
6735 case GIMPLE_EH_FILTER:
6736 lower_omp (gimple_eh_filter_failure (stmt), ctx);
6737 break;
6738 case GIMPLE_TRY:
6739 lower_omp (gimple_try_eval (stmt), ctx);
6740 lower_omp (gimple_try_cleanup (stmt), ctx);
6741 break;
6742 case GIMPLE_BIND:
6743 lower_omp (gimple_bind_body (stmt), ctx);
6744 break;
6745 case GIMPLE_OMP_PARALLEL:
6746 case GIMPLE_OMP_TASK:
6747 ctx = maybe_lookup_ctx (stmt);
6748 lower_omp_taskreg (gsi_p, ctx);
6749 break;
6750 case GIMPLE_OMP_FOR:
6751 ctx = maybe_lookup_ctx (stmt);
6752 gcc_assert (ctx);
6753 lower_omp_for (gsi_p, ctx);
6754 break;
6755 case GIMPLE_OMP_SECTIONS:
6756 ctx = maybe_lookup_ctx (stmt);
6757 gcc_assert (ctx);
6758 lower_omp_sections (gsi_p, ctx);
6759 break;
6760 case GIMPLE_OMP_SINGLE:
6761 ctx = maybe_lookup_ctx (stmt);
6762 gcc_assert (ctx);
6763 lower_omp_single (gsi_p, ctx);
6764 break;
6765 case GIMPLE_OMP_MASTER:
6766 ctx = maybe_lookup_ctx (stmt);
6767 gcc_assert (ctx);
6768 lower_omp_master (gsi_p, ctx);
6769 break;
6770 case GIMPLE_OMP_ORDERED:
6771 ctx = maybe_lookup_ctx (stmt);
6772 gcc_assert (ctx);
6773 lower_omp_ordered (gsi_p, ctx);
6774 break;
6775 case GIMPLE_OMP_CRITICAL:
6776 ctx = maybe_lookup_ctx (stmt);
6777 gcc_assert (ctx);
6778 lower_omp_critical (gsi_p, ctx);
6779 break;
6780 case GIMPLE_OMP_ATOMIC_LOAD:
6781 if ((ctx || task_shared_vars)
6782 && walk_tree (gimple_omp_atomic_load_rhs_ptr (stmt),
6783 lower_omp_regimplify_p, ctx ? NULL : &wi, NULL))
6784 gimple_regimplify_operands (stmt, gsi_p);
6785 break;
6786 default:
6787 if ((ctx || task_shared_vars)
6788 && walk_gimple_op (stmt, lower_omp_regimplify_p,
6789 ctx ? NULL : &wi))
6790 gimple_regimplify_operands (stmt, gsi_p);
6791 break;
6792 }
6793 }
6794
6795 static void
6796 lower_omp (gimple_seq body, omp_context *ctx)
6797 {
6798 location_t saved_location = input_location;
6799 gimple_stmt_iterator gsi = gsi_start (body);
6800 for (gsi = gsi_start (body); !gsi_end_p (gsi); gsi_next (&gsi))
6801 lower_omp_1 (&gsi, ctx);
6802 input_location = saved_location;
6803 }
6804 \f
6805 /* Main entry point. */
6806
6807 static unsigned int
6808 execute_lower_omp (void)
6809 {
6810 gimple_seq body;
6811
6812 /* This pass always runs, to provide PROP_gimple_lomp.
6813 But there is nothing to do unless -fopenmp is given. */
6814 if (flag_openmp == 0)
6815 return 0;
6816
6817 all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
6818 delete_omp_context);
6819
6820 body = gimple_body (current_function_decl);
6821 scan_omp (body, NULL);
6822 gcc_assert (taskreg_nesting_level == 0);
6823
6824 if (all_contexts->root)
6825 {
6826 struct gimplify_ctx gctx;
6827
6828 if (task_shared_vars)
6829 push_gimplify_context (&gctx);
6830 lower_omp (body, NULL);
6831 if (task_shared_vars)
6832 pop_gimplify_context (NULL);
6833 }
6834
6835 if (all_contexts)
6836 {
6837 splay_tree_delete (all_contexts);
6838 all_contexts = NULL;
6839 }
6840 BITMAP_FREE (task_shared_vars);
6841 return 0;
6842 }
6843
6844 struct gimple_opt_pass pass_lower_omp =
6845 {
6846 {
6847 GIMPLE_PASS,
6848 "omplower", /* name */
6849 NULL, /* gate */
6850 execute_lower_omp, /* execute */
6851 NULL, /* sub */
6852 NULL, /* next */
6853 0, /* static_pass_number */
6854 TV_NONE, /* tv_id */
6855 PROP_gimple_any, /* properties_required */
6856 PROP_gimple_lomp, /* properties_provided */
6857 0, /* properties_destroyed */
6858 0, /* todo_flags_start */
6859 0 /* todo_flags_finish */
6860 }
6861 };
6862 \f
6863 /* The following is a utility to diagnose OpenMP structured block violations.
6864 It is not part of the "omplower" pass, as that's invoked too late. It
6865 should be invoked by the respective front ends after gimplification. */
6866
6867 static splay_tree all_labels;
6868
6869 /* Check for mismatched contexts and generate an error if needed. Return
6870 true if an error is detected. */
6871
6872 static bool
6873 diagnose_sb_0 (gimple_stmt_iterator *gsi_p,
6874 gimple branch_ctx, gimple label_ctx)
6875 {
6876 if (label_ctx == branch_ctx)
6877 return false;
6878
6879
6880 /*
6881 Previously we kept track of the label's entire context in diagnose_sb_[12]
6882 so we could traverse it and issue a correct "exit" or "enter" error
6883 message upon a structured block violation.
6884
6885 We built the context by building a list with tree_cons'ing, but there is
6886 no easy counterpart in gimple tuples. It seems like far too much work
6887 for issuing exit/enter error messages. If someone really misses the
6888 distinct error message... patches welcome.
6889 */
6890
6891 #if 0
6892 /* Try to avoid confusing the user by producing and error message
6893 with correct "exit" or "enter" verbiage. We prefer "exit"
6894 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
6895 if (branch_ctx == NULL)
6896 exit_p = false;
6897 else
6898 {
6899 while (label_ctx)
6900 {
6901 if (TREE_VALUE (label_ctx) == branch_ctx)
6902 {
6903 exit_p = false;
6904 break;
6905 }
6906 label_ctx = TREE_CHAIN (label_ctx);
6907 }
6908 }
6909
6910 if (exit_p)
6911 error ("invalid exit from OpenMP structured block");
6912 else
6913 error ("invalid entry to OpenMP structured block");
6914 #endif
6915
6916 /* If it's obvious we have an invalid entry, be specific about the error. */
6917 if (branch_ctx == NULL)
6918 error ("invalid entry to OpenMP structured block");
6919 else
6920 /* Otherwise, be vague and lazy, but efficient. */
6921 error ("invalid branch to/from an OpenMP structured block");
6922
6923 gsi_replace (gsi_p, gimple_build_nop (), false);
6924 return true;
6925 }
6926
6927 /* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
6928 where each label is found. */
6929
6930 static tree
6931 diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
6932 struct walk_stmt_info *wi)
6933 {
6934 gimple context = (gimple) wi->info;
6935 gimple inner_context;
6936 gimple stmt = gsi_stmt (*gsi_p);
6937
6938 *handled_ops_p = true;
6939
6940 switch (gimple_code (stmt))
6941 {
6942 WALK_SUBSTMTS;
6943
6944 case GIMPLE_OMP_PARALLEL:
6945 case GIMPLE_OMP_TASK:
6946 case GIMPLE_OMP_SECTIONS:
6947 case GIMPLE_OMP_SINGLE:
6948 case GIMPLE_OMP_SECTION:
6949 case GIMPLE_OMP_MASTER:
6950 case GIMPLE_OMP_ORDERED:
6951 case GIMPLE_OMP_CRITICAL:
6952 /* The minimal context here is just the current OMP construct. */
6953 inner_context = stmt;
6954 wi->info = inner_context;
6955 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
6956 wi->info = context;
6957 break;
6958
6959 case GIMPLE_OMP_FOR:
6960 inner_context = stmt;
6961 wi->info = inner_context;
6962 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
6963 walk them. */
6964 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
6965 diagnose_sb_1, NULL, wi);
6966 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
6967 wi->info = context;
6968 break;
6969
6970 case GIMPLE_LABEL:
6971 splay_tree_insert (all_labels, (splay_tree_key) gimple_label_label (stmt),
6972 (splay_tree_value) context);
6973 break;
6974
6975 default:
6976 break;
6977 }
6978
6979 return NULL_TREE;
6980 }
6981
6982 /* Pass 2: Check each branch and see if its context differs from that of
6983 the destination label's context. */
6984
6985 static tree
6986 diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
6987 struct walk_stmt_info *wi)
6988 {
6989 gimple context = (gimple) wi->info;
6990 splay_tree_node n;
6991 gimple stmt = gsi_stmt (*gsi_p);
6992
6993 *handled_ops_p = true;
6994
6995 switch (gimple_code (stmt))
6996 {
6997 WALK_SUBSTMTS;
6998
6999 case GIMPLE_OMP_PARALLEL:
7000 case GIMPLE_OMP_TASK:
7001 case GIMPLE_OMP_SECTIONS:
7002 case GIMPLE_OMP_SINGLE:
7003 case GIMPLE_OMP_SECTION:
7004 case GIMPLE_OMP_MASTER:
7005 case GIMPLE_OMP_ORDERED:
7006 case GIMPLE_OMP_CRITICAL:
7007 wi->info = stmt;
7008 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
7009 wi->info = context;
7010 break;
7011
7012 case GIMPLE_OMP_FOR:
7013 wi->info = stmt;
7014 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
7015 walk them. */
7016 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
7017 diagnose_sb_2, NULL, wi);
7018 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
7019 wi->info = context;
7020 break;
7021
7022 case GIMPLE_COND:
7023 {
7024 tree lab = gimple_cond_true_label (stmt);
7025 if (lab)
7026 {
7027 n = splay_tree_lookup (all_labels,
7028 (splay_tree_key) lab);
7029 diagnose_sb_0 (gsi_p, context,
7030 n ? (gimple) n->value : NULL);
7031 }
7032 lab = gimple_cond_false_label (stmt);
7033 if (lab)
7034 {
7035 n = splay_tree_lookup (all_labels,
7036 (splay_tree_key) lab);
7037 diagnose_sb_0 (gsi_p, context,
7038 n ? (gimple) n->value : NULL);
7039 }
7040 }
7041 break;
7042
7043 case GIMPLE_GOTO:
7044 {
7045 tree lab = gimple_goto_dest (stmt);
7046 if (TREE_CODE (lab) != LABEL_DECL)
7047 break;
7048
7049 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
7050 diagnose_sb_0 (gsi_p, context, n ? (gimple) n->value : NULL);
7051 }
7052 break;
7053
7054 case GIMPLE_SWITCH:
7055 {
7056 unsigned int i;
7057 for (i = 0; i < gimple_switch_num_labels (stmt); ++i)
7058 {
7059 tree lab = CASE_LABEL (gimple_switch_label (stmt, i));
7060 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
7061 if (n && diagnose_sb_0 (gsi_p, context, (gimple) n->value))
7062 break;
7063 }
7064 }
7065 break;
7066
7067 case GIMPLE_RETURN:
7068 diagnose_sb_0 (gsi_p, context, NULL);
7069 break;
7070
7071 default:
7072 break;
7073 }
7074
7075 return NULL_TREE;
7076 }
7077
7078 static unsigned int
7079 diagnose_omp_structured_block_errors (void)
7080 {
7081 struct walk_stmt_info wi;
7082 gimple_seq body = gimple_body (current_function_decl);
7083
7084 all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
7085
7086 memset (&wi, 0, sizeof (wi));
7087 walk_gimple_seq (body, diagnose_sb_1, NULL, &wi);
7088
7089 memset (&wi, 0, sizeof (wi));
7090 wi.want_locations = true;
7091 walk_gimple_seq (body, diagnose_sb_2, NULL, &wi);
7092
7093 splay_tree_delete (all_labels);
7094 all_labels = NULL;
7095
7096 return 0;
7097 }
7098
7099 static bool
7100 gate_diagnose_omp_blocks (void)
7101 {
7102 return flag_openmp != 0;
7103 }
7104
7105 struct gimple_opt_pass pass_diagnose_omp_blocks =
7106 {
7107 {
7108 GIMPLE_PASS,
7109 "*diagnose_omp_blocks", /* name */
7110 gate_diagnose_omp_blocks, /* gate */
7111 diagnose_omp_structured_block_errors, /* execute */
7112 NULL, /* sub */
7113 NULL, /* next */
7114 0, /* static_pass_number */
7115 TV_NONE, /* tv_id */
7116 PROP_gimple_any, /* properties_required */
7117 0, /* properties_provided */
7118 0, /* properties_destroyed */
7119 0, /* todo_flags_start */
7120 0, /* todo_flags_finish */
7121 }
7122 };
7123
7124 #include "gt-omp-low.h"