re PR target/55981 (std::atomic store is split in two smaller stores)
[gcc.git] / gcc / omp-low.c
1 /* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
5
6 Copyright (C) 2005-2013 Free Software Foundation, Inc.
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
14
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "tree.h"
29 #include "rtl.h"
30 #include "gimple.h"
31 #include "tree-iterator.h"
32 #include "tree-inline.h"
33 #include "langhooks.h"
34 #include "diagnostic-core.h"
35 #include "tree-flow.h"
36 #include "flags.h"
37 #include "function.h"
38 #include "expr.h"
39 #include "tree-pass.h"
40 #include "ggc.h"
41 #include "except.h"
42 #include "splay-tree.h"
43 #include "optabs.h"
44 #include "cfgloop.h"
45
46
47 /* Lowering of OpenMP parallel and workshare constructs proceeds in two
48 phases. The first phase scans the function looking for OMP statements
49 and then for variables that must be replaced to satisfy data sharing
50 clauses. The second phase expands code for the constructs, as well as
51 re-gimplifying things when variables have been replaced with complex
52 expressions.
53
54 Final code generation is done by pass_expand_omp. The flowgraph is
55 scanned for parallel regions which are then moved to a new
56 function, to be invoked by the thread library. */
57
58 /* Context structure. Used to store information about each parallel
59 directive in the code. */
60
61 typedef struct omp_context
62 {
63 /* This field must be at the beginning, as we do "inheritance": Some
64 callback functions for tree-inline.c (e.g., omp_copy_decl)
65 receive a copy_body_data pointer that is up-casted to an
66 omp_context pointer. */
67 copy_body_data cb;
68
69 /* The tree of contexts corresponding to the encountered constructs. */
70 struct omp_context *outer;
71 gimple stmt;
72
73 /* Map variables to fields in a structure that allows communication
74 between sending and receiving threads. */
75 splay_tree field_map;
76 tree record_type;
77 tree sender_decl;
78 tree receiver_decl;
79
80 /* These are used just by task contexts, if task firstprivate fn is
81 needed. srecord_type is used to communicate from the thread
82 that encountered the task construct to task firstprivate fn,
83 record_type is allocated by GOMP_task, initialized by task firstprivate
84 fn and passed to the task body fn. */
85 splay_tree sfield_map;
86 tree srecord_type;
87
88 /* A chain of variables to add to the top-level block surrounding the
89 construct. In the case of a parallel, this is in the child function. */
90 tree block_vars;
91
92 /* What to do with variables with implicitly determined sharing
93 attributes. */
94 enum omp_clause_default_kind default_kind;
95
96 /* Nesting depth of this context. Used to beautify error messages re
97 invalid gotos. The outermost ctx is depth 1, with depth 0 being
98 reserved for the main body of the function. */
99 int depth;
100
101 /* True if this parallel directive is nested within another. */
102 bool is_nested;
103 } omp_context;
104
105
106 struct omp_for_data_loop
107 {
108 tree v, n1, n2, step;
109 enum tree_code cond_code;
110 };
111
112 /* A structure describing the main elements of a parallel loop. */
113
114 struct omp_for_data
115 {
116 struct omp_for_data_loop loop;
117 tree chunk_size;
118 gimple for_stmt;
119 tree pre, iter_type;
120 int collapse;
121 bool have_nowait, have_ordered;
122 enum omp_clause_schedule_kind sched_kind;
123 struct omp_for_data_loop *loops;
124 };
125
126
127 static splay_tree all_contexts;
128 static int taskreg_nesting_level;
129 struct omp_region *root_omp_region;
130 static bitmap task_shared_vars;
131
132 static void scan_omp (gimple_seq *, omp_context *);
133 static tree scan_omp_1_op (tree *, int *, void *);
134
135 #define WALK_SUBSTMTS \
136 case GIMPLE_BIND: \
137 case GIMPLE_TRY: \
138 case GIMPLE_CATCH: \
139 case GIMPLE_EH_FILTER: \
140 case GIMPLE_TRANSACTION: \
141 /* The sub-statements for these should be walked. */ \
142 *handled_ops_p = false; \
143 break;
144
145 /* Convenience function for calling scan_omp_1_op on tree operands. */
146
147 static inline tree
148 scan_omp_op (tree *tp, omp_context *ctx)
149 {
150 struct walk_stmt_info wi;
151
152 memset (&wi, 0, sizeof (wi));
153 wi.info = ctx;
154 wi.want_locations = true;
155
156 return walk_tree (tp, scan_omp_1_op, &wi, NULL);
157 }
158
159 static void lower_omp (gimple_seq *, omp_context *);
160 static tree lookup_decl_in_outer_ctx (tree, omp_context *);
161 static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
162
163 /* Find an OpenMP clause of type KIND within CLAUSES. */
164
165 tree
166 find_omp_clause (tree clauses, enum omp_clause_code kind)
167 {
168 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
169 if (OMP_CLAUSE_CODE (clauses) == kind)
170 return clauses;
171
172 return NULL_TREE;
173 }
174
175 /* Return true if CTX is for an omp parallel. */
176
177 static inline bool
178 is_parallel_ctx (omp_context *ctx)
179 {
180 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL;
181 }
182
183
184 /* Return true if CTX is for an omp task. */
185
186 static inline bool
187 is_task_ctx (omp_context *ctx)
188 {
189 return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
190 }
191
192
193 /* Return true if CTX is for an omp parallel or omp task. */
194
195 static inline bool
196 is_taskreg_ctx (omp_context *ctx)
197 {
198 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL
199 || gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
200 }
201
202
203 /* Return true if REGION is a combined parallel+workshare region. */
204
205 static inline bool
206 is_combined_parallel (struct omp_region *region)
207 {
208 return region->is_combined_parallel;
209 }
210
211
212 /* Extract the header elements of parallel loop FOR_STMT and store
213 them into *FD. */
214
215 static void
216 extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
217 struct omp_for_data_loop *loops)
218 {
219 tree t, var, *collapse_iter, *collapse_count;
220 tree count = NULL_TREE, iter_type = long_integer_type_node;
221 struct omp_for_data_loop *loop;
222 int i;
223 struct omp_for_data_loop dummy_loop;
224 location_t loc = gimple_location (for_stmt);
225
226 fd->for_stmt = for_stmt;
227 fd->pre = NULL;
228 fd->collapse = gimple_omp_for_collapse (for_stmt);
229 if (fd->collapse > 1)
230 fd->loops = loops;
231 else
232 fd->loops = &fd->loop;
233
234 fd->have_nowait = fd->have_ordered = false;
235 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
236 fd->chunk_size = NULL_TREE;
237 collapse_iter = NULL;
238 collapse_count = NULL;
239
240 for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
241 switch (OMP_CLAUSE_CODE (t))
242 {
243 case OMP_CLAUSE_NOWAIT:
244 fd->have_nowait = true;
245 break;
246 case OMP_CLAUSE_ORDERED:
247 fd->have_ordered = true;
248 break;
249 case OMP_CLAUSE_SCHEDULE:
250 fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
251 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
252 break;
253 case OMP_CLAUSE_COLLAPSE:
254 if (fd->collapse > 1)
255 {
256 collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
257 collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
258 }
259 default:
260 break;
261 }
262
263 /* FIXME: for now map schedule(auto) to schedule(static).
264 There should be analysis to determine whether all iterations
265 are approximately the same amount of work (then schedule(static)
266 is best) or if it varies (then schedule(dynamic,N) is better). */
267 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
268 {
269 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
270 gcc_assert (fd->chunk_size == NULL);
271 }
272 gcc_assert (fd->collapse == 1 || collapse_iter != NULL);
273 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
274 gcc_assert (fd->chunk_size == NULL);
275 else if (fd->chunk_size == NULL)
276 {
277 /* We only need to compute a default chunk size for ordered
278 static loops and dynamic loops. */
279 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
280 || fd->have_ordered
281 || fd->collapse > 1)
282 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
283 ? integer_zero_node : integer_one_node;
284 }
285
286 for (i = 0; i < fd->collapse; i++)
287 {
288 if (fd->collapse == 1)
289 loop = &fd->loop;
290 else if (loops != NULL)
291 loop = loops + i;
292 else
293 loop = &dummy_loop;
294
295
296 loop->v = gimple_omp_for_index (for_stmt, i);
297 gcc_assert (SSA_VAR_P (loop->v));
298 gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
299 || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
300 var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
301 loop->n1 = gimple_omp_for_initial (for_stmt, i);
302
303 loop->cond_code = gimple_omp_for_cond (for_stmt, i);
304 loop->n2 = gimple_omp_for_final (for_stmt, i);
305 switch (loop->cond_code)
306 {
307 case LT_EXPR:
308 case GT_EXPR:
309 break;
310 case LE_EXPR:
311 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
312 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, 1);
313 else
314 loop->n2 = fold_build2_loc (loc,
315 PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
316 build_int_cst (TREE_TYPE (loop->n2), 1));
317 loop->cond_code = LT_EXPR;
318 break;
319 case GE_EXPR:
320 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
321 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, -1);
322 else
323 loop->n2 = fold_build2_loc (loc,
324 MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
325 build_int_cst (TREE_TYPE (loop->n2), 1));
326 loop->cond_code = GT_EXPR;
327 break;
328 default:
329 gcc_unreachable ();
330 }
331
332 t = gimple_omp_for_incr (for_stmt, i);
333 gcc_assert (TREE_OPERAND (t, 0) == var);
334 switch (TREE_CODE (t))
335 {
336 case PLUS_EXPR:
337 loop->step = TREE_OPERAND (t, 1);
338 break;
339 case POINTER_PLUS_EXPR:
340 loop->step = fold_convert (ssizetype, TREE_OPERAND (t, 1));
341 break;
342 case MINUS_EXPR:
343 loop->step = TREE_OPERAND (t, 1);
344 loop->step = fold_build1_loc (loc,
345 NEGATE_EXPR, TREE_TYPE (loop->step),
346 loop->step);
347 break;
348 default:
349 gcc_unreachable ();
350 }
351
352 if (iter_type != long_long_unsigned_type_node)
353 {
354 if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
355 iter_type = long_long_unsigned_type_node;
356 else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
357 && TYPE_PRECISION (TREE_TYPE (loop->v))
358 >= TYPE_PRECISION (iter_type))
359 {
360 tree n;
361
362 if (loop->cond_code == LT_EXPR)
363 n = fold_build2_loc (loc,
364 PLUS_EXPR, TREE_TYPE (loop->v),
365 loop->n2, loop->step);
366 else
367 n = loop->n1;
368 if (TREE_CODE (n) != INTEGER_CST
369 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
370 iter_type = long_long_unsigned_type_node;
371 }
372 else if (TYPE_PRECISION (TREE_TYPE (loop->v))
373 > TYPE_PRECISION (iter_type))
374 {
375 tree n1, n2;
376
377 if (loop->cond_code == LT_EXPR)
378 {
379 n1 = loop->n1;
380 n2 = fold_build2_loc (loc,
381 PLUS_EXPR, TREE_TYPE (loop->v),
382 loop->n2, loop->step);
383 }
384 else
385 {
386 n1 = fold_build2_loc (loc,
387 MINUS_EXPR, TREE_TYPE (loop->v),
388 loop->n2, loop->step);
389 n2 = loop->n1;
390 }
391 if (TREE_CODE (n1) != INTEGER_CST
392 || TREE_CODE (n2) != INTEGER_CST
393 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
394 || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
395 iter_type = long_long_unsigned_type_node;
396 }
397 }
398
399 if (collapse_count && *collapse_count == NULL)
400 {
401 if ((i == 0 || count != NULL_TREE)
402 && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
403 && TREE_CONSTANT (loop->n1)
404 && TREE_CONSTANT (loop->n2)
405 && TREE_CODE (loop->step) == INTEGER_CST)
406 {
407 tree itype = TREE_TYPE (loop->v);
408
409 if (POINTER_TYPE_P (itype))
410 itype = signed_type_for (itype);
411 t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
412 t = fold_build2_loc (loc,
413 PLUS_EXPR, itype,
414 fold_convert_loc (loc, itype, loop->step), t);
415 t = fold_build2_loc (loc, PLUS_EXPR, itype, t,
416 fold_convert_loc (loc, itype, loop->n2));
417 t = fold_build2_loc (loc, MINUS_EXPR, itype, t,
418 fold_convert_loc (loc, itype, loop->n1));
419 if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
420 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype,
421 fold_build1_loc (loc, NEGATE_EXPR, itype, t),
422 fold_build1_loc (loc, NEGATE_EXPR, itype,
423 fold_convert_loc (loc, itype,
424 loop->step)));
425 else
426 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, t,
427 fold_convert_loc (loc, itype, loop->step));
428 t = fold_convert_loc (loc, long_long_unsigned_type_node, t);
429 if (count != NULL_TREE)
430 count = fold_build2_loc (loc,
431 MULT_EXPR, long_long_unsigned_type_node,
432 count, t);
433 else
434 count = t;
435 if (TREE_CODE (count) != INTEGER_CST)
436 count = NULL_TREE;
437 }
438 else
439 count = NULL_TREE;
440 }
441 }
442
443 if (count)
444 {
445 if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
446 iter_type = long_long_unsigned_type_node;
447 else
448 iter_type = long_integer_type_node;
449 }
450 else if (collapse_iter && *collapse_iter != NULL)
451 iter_type = TREE_TYPE (*collapse_iter);
452 fd->iter_type = iter_type;
453 if (collapse_iter && *collapse_iter == NULL)
454 *collapse_iter = create_tmp_var (iter_type, ".iter");
455 if (collapse_count && *collapse_count == NULL)
456 {
457 if (count)
458 *collapse_count = fold_convert_loc (loc, iter_type, count);
459 else
460 *collapse_count = create_tmp_var (iter_type, ".count");
461 }
462
463 if (fd->collapse > 1)
464 {
465 fd->loop.v = *collapse_iter;
466 fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
467 fd->loop.n2 = *collapse_count;
468 fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
469 fd->loop.cond_code = LT_EXPR;
470 }
471 }
472
473
474 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
475 is the immediate dominator of PAR_ENTRY_BB, return true if there
476 are no data dependencies that would prevent expanding the parallel
477 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
478
479 When expanding a combined parallel+workshare region, the call to
480 the child function may need additional arguments in the case of
481 GIMPLE_OMP_FOR regions. In some cases, these arguments are
482 computed out of variables passed in from the parent to the child
483 via 'struct .omp_data_s'. For instance:
484
485 #pragma omp parallel for schedule (guided, i * 4)
486 for (j ...)
487
488 Is lowered into:
489
490 # BLOCK 2 (PAR_ENTRY_BB)
491 .omp_data_o.i = i;
492 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
493
494 # BLOCK 3 (WS_ENTRY_BB)
495 .omp_data_i = &.omp_data_o;
496 D.1667 = .omp_data_i->i;
497 D.1598 = D.1667 * 4;
498 #pragma omp for schedule (guided, D.1598)
499
500 When we outline the parallel region, the call to the child function
501 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
502 that value is computed *after* the call site. So, in principle we
503 cannot do the transformation.
504
505 To see whether the code in WS_ENTRY_BB blocks the combined
506 parallel+workshare call, we collect all the variables used in the
507 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
508 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
509 call.
510
511 FIXME. If we had the SSA form built at this point, we could merely
512 hoist the code in block 3 into block 2 and be done with it. But at
513 this point we don't have dataflow information and though we could
514 hack something up here, it is really not worth the aggravation. */
515
516 static bool
517 workshare_safe_to_combine_p (basic_block ws_entry_bb)
518 {
519 struct omp_for_data fd;
520 gimple ws_stmt = last_stmt (ws_entry_bb);
521
522 if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
523 return true;
524
525 gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR);
526
527 extract_omp_for_data (ws_stmt, &fd, NULL);
528
529 if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
530 return false;
531 if (fd.iter_type != long_integer_type_node)
532 return false;
533
534 /* FIXME. We give up too easily here. If any of these arguments
535 are not constants, they will likely involve variables that have
536 been mapped into fields of .omp_data_s for sharing with the child
537 function. With appropriate data flow, it would be possible to
538 see through this. */
539 if (!is_gimple_min_invariant (fd.loop.n1)
540 || !is_gimple_min_invariant (fd.loop.n2)
541 || !is_gimple_min_invariant (fd.loop.step)
542 || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
543 return false;
544
545 return true;
546 }
547
548
549 /* Collect additional arguments needed to emit a combined
550 parallel+workshare call. WS_STMT is the workshare directive being
551 expanded. */
552
553 static vec<tree, va_gc> *
554 get_ws_args_for (gimple ws_stmt)
555 {
556 tree t;
557 location_t loc = gimple_location (ws_stmt);
558 vec<tree, va_gc> *ws_args;
559
560 if (gimple_code (ws_stmt) == GIMPLE_OMP_FOR)
561 {
562 struct omp_for_data fd;
563
564 extract_omp_for_data (ws_stmt, &fd, NULL);
565
566 vec_alloc (ws_args, 3 + (fd.chunk_size != 0));
567
568 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n1);
569 ws_args->quick_push (t);
570
571 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n2);
572 ws_args->quick_push (t);
573
574 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.step);
575 ws_args->quick_push (t);
576
577 if (fd.chunk_size)
578 {
579 t = fold_convert_loc (loc, long_integer_type_node, fd.chunk_size);
580 ws_args->quick_push (t);
581 }
582
583 return ws_args;
584 }
585 else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
586 {
587 /* Number of sections is equal to the number of edges from the
588 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
589 the exit of the sections region. */
590 basic_block bb = single_succ (gimple_bb (ws_stmt));
591 t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
592 vec_alloc (ws_args, 1);
593 ws_args->quick_push (t);
594 return ws_args;
595 }
596
597 gcc_unreachable ();
598 }
599
600
601 /* Discover whether REGION is a combined parallel+workshare region. */
602
603 static void
604 determine_parallel_type (struct omp_region *region)
605 {
606 basic_block par_entry_bb, par_exit_bb;
607 basic_block ws_entry_bb, ws_exit_bb;
608
609 if (region == NULL || region->inner == NULL
610 || region->exit == NULL || region->inner->exit == NULL
611 || region->inner->cont == NULL)
612 return;
613
614 /* We only support parallel+for and parallel+sections. */
615 if (region->type != GIMPLE_OMP_PARALLEL
616 || (region->inner->type != GIMPLE_OMP_FOR
617 && region->inner->type != GIMPLE_OMP_SECTIONS))
618 return;
619
620 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
621 WS_EXIT_BB -> PAR_EXIT_BB. */
622 par_entry_bb = region->entry;
623 par_exit_bb = region->exit;
624 ws_entry_bb = region->inner->entry;
625 ws_exit_bb = region->inner->exit;
626
627 if (single_succ (par_entry_bb) == ws_entry_bb
628 && single_succ (ws_exit_bb) == par_exit_bb
629 && workshare_safe_to_combine_p (ws_entry_bb)
630 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb))
631 || (last_and_only_stmt (ws_entry_bb)
632 && last_and_only_stmt (par_exit_bb))))
633 {
634 gimple ws_stmt = last_stmt (ws_entry_bb);
635
636 if (region->inner->type == GIMPLE_OMP_FOR)
637 {
638 /* If this is a combined parallel loop, we need to determine
639 whether or not to use the combined library calls. There
640 are two cases where we do not apply the transformation:
641 static loops and any kind of ordered loop. In the first
642 case, we already open code the loop so there is no need
643 to do anything else. In the latter case, the combined
644 parallel loop call would still need extra synchronization
645 to implement ordered semantics, so there would not be any
646 gain in using the combined call. */
647 tree clauses = gimple_omp_for_clauses (ws_stmt);
648 tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
649 if (c == NULL
650 || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC
651 || find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
652 {
653 region->is_combined_parallel = false;
654 region->inner->is_combined_parallel = false;
655 return;
656 }
657 }
658
659 region->is_combined_parallel = true;
660 region->inner->is_combined_parallel = true;
661 region->ws_args = get_ws_args_for (ws_stmt);
662 }
663 }
664
665
666 /* Return true if EXPR is variable sized. */
667
668 static inline bool
669 is_variable_sized (const_tree expr)
670 {
671 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
672 }
673
674 /* Return true if DECL is a reference type. */
675
676 static inline bool
677 is_reference (tree decl)
678 {
679 return lang_hooks.decls.omp_privatize_by_reference (decl);
680 }
681
682 /* Lookup variables in the decl or field splay trees. The "maybe" form
683 allows for the variable form to not have been entered, otherwise we
684 assert that the variable must have been entered. */
685
686 static inline tree
687 lookup_decl (tree var, omp_context *ctx)
688 {
689 tree *n;
690 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
691 return *n;
692 }
693
694 static inline tree
695 maybe_lookup_decl (const_tree var, omp_context *ctx)
696 {
697 tree *n;
698 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
699 return n ? *n : NULL_TREE;
700 }
701
702 static inline tree
703 lookup_field (tree var, omp_context *ctx)
704 {
705 splay_tree_node n;
706 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
707 return (tree) n->value;
708 }
709
710 static inline tree
711 lookup_sfield (tree var, omp_context *ctx)
712 {
713 splay_tree_node n;
714 n = splay_tree_lookup (ctx->sfield_map
715 ? ctx->sfield_map : ctx->field_map,
716 (splay_tree_key) var);
717 return (tree) n->value;
718 }
719
720 static inline tree
721 maybe_lookup_field (tree var, omp_context *ctx)
722 {
723 splay_tree_node n;
724 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
725 return n ? (tree) n->value : NULL_TREE;
726 }
727
728 /* Return true if DECL should be copied by pointer. SHARED_CTX is
729 the parallel context if DECL is to be shared. */
730
731 static bool
732 use_pointer_for_field (tree decl, omp_context *shared_ctx)
733 {
734 if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
735 return true;
736
737 /* We can only use copy-in/copy-out semantics for shared variables
738 when we know the value is not accessible from an outer scope. */
739 if (shared_ctx)
740 {
741 /* ??? Trivially accessible from anywhere. But why would we even
742 be passing an address in this case? Should we simply assert
743 this to be false, or should we have a cleanup pass that removes
744 these from the list of mappings? */
745 if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
746 return true;
747
748 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
749 without analyzing the expression whether or not its location
750 is accessible to anyone else. In the case of nested parallel
751 regions it certainly may be. */
752 if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
753 return true;
754
755 /* Do not use copy-in/copy-out for variables that have their
756 address taken. */
757 if (TREE_ADDRESSABLE (decl))
758 return true;
759
760 /* Disallow copy-in/out in nested parallel if
761 decl is shared in outer parallel, otherwise
762 each thread could store the shared variable
763 in its own copy-in location, making the
764 variable no longer really shared. */
765 if (!TREE_READONLY (decl) && shared_ctx->is_nested)
766 {
767 omp_context *up;
768
769 for (up = shared_ctx->outer; up; up = up->outer)
770 if (is_taskreg_ctx (up) && maybe_lookup_decl (decl, up))
771 break;
772
773 if (up)
774 {
775 tree c;
776
777 for (c = gimple_omp_taskreg_clauses (up->stmt);
778 c; c = OMP_CLAUSE_CHAIN (c))
779 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
780 && OMP_CLAUSE_DECL (c) == decl)
781 break;
782
783 if (c)
784 goto maybe_mark_addressable_and_ret;
785 }
786 }
787
788 /* For tasks avoid using copy-in/out, unless they are readonly
789 (in which case just copy-in is used). As tasks can be
790 deferred or executed in different thread, when GOMP_task
791 returns, the task hasn't necessarily terminated. */
792 if (!TREE_READONLY (decl) && is_task_ctx (shared_ctx))
793 {
794 tree outer;
795 maybe_mark_addressable_and_ret:
796 outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
797 if (is_gimple_reg (outer))
798 {
799 /* Taking address of OUTER in lower_send_shared_vars
800 might need regimplification of everything that uses the
801 variable. */
802 if (!task_shared_vars)
803 task_shared_vars = BITMAP_ALLOC (NULL);
804 bitmap_set_bit (task_shared_vars, DECL_UID (outer));
805 TREE_ADDRESSABLE (outer) = 1;
806 }
807 return true;
808 }
809 }
810
811 return false;
812 }
813
814 /* Create a new VAR_DECL and copy information from VAR to it. */
815
816 tree
817 copy_var_decl (tree var, tree name, tree type)
818 {
819 tree copy = build_decl (DECL_SOURCE_LOCATION (var), VAR_DECL, name, type);
820
821 TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (var);
822 TREE_THIS_VOLATILE (copy) = TREE_THIS_VOLATILE (var);
823 DECL_GIMPLE_REG_P (copy) = DECL_GIMPLE_REG_P (var);
824 DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (var);
825 DECL_IGNORED_P (copy) = DECL_IGNORED_P (var);
826 DECL_CONTEXT (copy) = DECL_CONTEXT (var);
827 TREE_USED (copy) = 1;
828 DECL_SEEN_IN_BIND_EXPR_P (copy) = 1;
829
830 return copy;
831 }
832
833 /* Construct a new automatic decl similar to VAR. */
834
835 static tree
836 omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
837 {
838 tree copy = copy_var_decl (var, name, type);
839
840 DECL_CONTEXT (copy) = current_function_decl;
841 DECL_CHAIN (copy) = ctx->block_vars;
842 ctx->block_vars = copy;
843
844 return copy;
845 }
846
847 static tree
848 omp_copy_decl_1 (tree var, omp_context *ctx)
849 {
850 return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
851 }
852
853 /* Build COMPONENT_REF and set TREE_THIS_VOLATILE and TREE_READONLY on it
854 as appropriate. */
855 static tree
856 omp_build_component_ref (tree obj, tree field)
857 {
858 tree ret = build3 (COMPONENT_REF, TREE_TYPE (field), obj, field, NULL);
859 if (TREE_THIS_VOLATILE (field))
860 TREE_THIS_VOLATILE (ret) |= 1;
861 if (TREE_READONLY (field))
862 TREE_READONLY (ret) |= 1;
863 return ret;
864 }
865
866 /* Build tree nodes to access the field for VAR on the receiver side. */
867
868 static tree
869 build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
870 {
871 tree x, field = lookup_field (var, ctx);
872
873 /* If the receiver record type was remapped in the child function,
874 remap the field into the new record type. */
875 x = maybe_lookup_field (field, ctx);
876 if (x != NULL)
877 field = x;
878
879 x = build_simple_mem_ref (ctx->receiver_decl);
880 x = omp_build_component_ref (x, field);
881 if (by_ref)
882 x = build_simple_mem_ref (x);
883
884 return x;
885 }
886
887 /* Build tree nodes to access VAR in the scope outer to CTX. In the case
888 of a parallel, this is a component reference; for workshare constructs
889 this is some variable. */
890
891 static tree
892 build_outer_var_ref (tree var, omp_context *ctx)
893 {
894 tree x;
895
896 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
897 x = var;
898 else if (is_variable_sized (var))
899 {
900 x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
901 x = build_outer_var_ref (x, ctx);
902 x = build_simple_mem_ref (x);
903 }
904 else if (is_taskreg_ctx (ctx))
905 {
906 bool by_ref = use_pointer_for_field (var, NULL);
907 x = build_receiver_ref (var, by_ref, ctx);
908 }
909 else if (ctx->outer)
910 x = lookup_decl (var, ctx->outer);
911 else if (is_reference (var))
912 /* This can happen with orphaned constructs. If var is reference, it is
913 possible it is shared and as such valid. */
914 x = var;
915 else
916 gcc_unreachable ();
917
918 if (is_reference (var))
919 x = build_simple_mem_ref (x);
920
921 return x;
922 }
923
924 /* Build tree nodes to access the field for VAR on the sender side. */
925
926 static tree
927 build_sender_ref (tree var, omp_context *ctx)
928 {
929 tree field = lookup_sfield (var, ctx);
930 return omp_build_component_ref (ctx->sender_decl, field);
931 }
932
933 /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
934
935 static void
936 install_var_field (tree var, bool by_ref, int mask, omp_context *ctx)
937 {
938 tree field, type, sfield = NULL_TREE;
939
940 gcc_assert ((mask & 1) == 0
941 || !splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
942 gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
943 || !splay_tree_lookup (ctx->sfield_map, (splay_tree_key) var));
944
945 type = TREE_TYPE (var);
946 if (by_ref)
947 type = build_pointer_type (type);
948 else if ((mask & 3) == 1 && is_reference (var))
949 type = TREE_TYPE (type);
950
951 field = build_decl (DECL_SOURCE_LOCATION (var),
952 FIELD_DECL, DECL_NAME (var), type);
953
954 /* Remember what variable this field was created for. This does have a
955 side effect of making dwarf2out ignore this member, so for helpful
956 debugging we clear it later in delete_omp_context. */
957 DECL_ABSTRACT_ORIGIN (field) = var;
958 if (type == TREE_TYPE (var))
959 {
960 DECL_ALIGN (field) = DECL_ALIGN (var);
961 DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
962 TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
963 }
964 else
965 DECL_ALIGN (field) = TYPE_ALIGN (type);
966
967 if ((mask & 3) == 3)
968 {
969 insert_field_into_struct (ctx->record_type, field);
970 if (ctx->srecord_type)
971 {
972 sfield = build_decl (DECL_SOURCE_LOCATION (var),
973 FIELD_DECL, DECL_NAME (var), type);
974 DECL_ABSTRACT_ORIGIN (sfield) = var;
975 DECL_ALIGN (sfield) = DECL_ALIGN (field);
976 DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
977 TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
978 insert_field_into_struct (ctx->srecord_type, sfield);
979 }
980 }
981 else
982 {
983 if (ctx->srecord_type == NULL_TREE)
984 {
985 tree t;
986
987 ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
988 ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
989 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
990 {
991 sfield = build_decl (DECL_SOURCE_LOCATION (var),
992 FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
993 DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
994 insert_field_into_struct (ctx->srecord_type, sfield);
995 splay_tree_insert (ctx->sfield_map,
996 (splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
997 (splay_tree_value) sfield);
998 }
999 }
1000 sfield = field;
1001 insert_field_into_struct ((mask & 1) ? ctx->record_type
1002 : ctx->srecord_type, field);
1003 }
1004
1005 if (mask & 1)
1006 splay_tree_insert (ctx->field_map, (splay_tree_key) var,
1007 (splay_tree_value) field);
1008 if ((mask & 2) && ctx->sfield_map)
1009 splay_tree_insert (ctx->sfield_map, (splay_tree_key) var,
1010 (splay_tree_value) sfield);
1011 }
1012
1013 static tree
1014 install_var_local (tree var, omp_context *ctx)
1015 {
1016 tree new_var = omp_copy_decl_1 (var, ctx);
1017 insert_decl_map (&ctx->cb, var, new_var);
1018 return new_var;
1019 }
1020
1021 /* Adjust the replacement for DECL in CTX for the new context. This means
1022 copying the DECL_VALUE_EXPR, and fixing up the type. */
1023
1024 static void
1025 fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
1026 {
1027 tree new_decl, size;
1028
1029 new_decl = lookup_decl (decl, ctx);
1030
1031 TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
1032
1033 if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
1034 && DECL_HAS_VALUE_EXPR_P (decl))
1035 {
1036 tree ve = DECL_VALUE_EXPR (decl);
1037 walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL);
1038 SET_DECL_VALUE_EXPR (new_decl, ve);
1039 DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
1040 }
1041
1042 if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
1043 {
1044 size = remap_decl (DECL_SIZE (decl), &ctx->cb);
1045 if (size == error_mark_node)
1046 size = TYPE_SIZE (TREE_TYPE (new_decl));
1047 DECL_SIZE (new_decl) = size;
1048
1049 size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
1050 if (size == error_mark_node)
1051 size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
1052 DECL_SIZE_UNIT (new_decl) = size;
1053 }
1054 }
1055
1056 /* The callback for remap_decl. Search all containing contexts for a
1057 mapping of the variable; this avoids having to duplicate the splay
1058 tree ahead of time. We know a mapping doesn't already exist in the
1059 given context. Create new mappings to implement default semantics. */
1060
1061 static tree
1062 omp_copy_decl (tree var, copy_body_data *cb)
1063 {
1064 omp_context *ctx = (omp_context *) cb;
1065 tree new_var;
1066
1067 if (TREE_CODE (var) == LABEL_DECL)
1068 {
1069 new_var = create_artificial_label (DECL_SOURCE_LOCATION (var));
1070 DECL_CONTEXT (new_var) = current_function_decl;
1071 insert_decl_map (&ctx->cb, var, new_var);
1072 return new_var;
1073 }
1074
1075 while (!is_taskreg_ctx (ctx))
1076 {
1077 ctx = ctx->outer;
1078 if (ctx == NULL)
1079 return var;
1080 new_var = maybe_lookup_decl (var, ctx);
1081 if (new_var)
1082 return new_var;
1083 }
1084
1085 if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
1086 return var;
1087
1088 return error_mark_node;
1089 }
1090
1091
1092 /* Return the parallel region associated with STMT. */
1093
1094 /* Debugging dumps for parallel regions. */
1095 void dump_omp_region (FILE *, struct omp_region *, int);
1096 void debug_omp_region (struct omp_region *);
1097 void debug_all_omp_regions (void);
1098
1099 /* Dump the parallel region tree rooted at REGION. */
1100
1101 void
1102 dump_omp_region (FILE *file, struct omp_region *region, int indent)
1103 {
1104 fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
1105 gimple_code_name[region->type]);
1106
1107 if (region->inner)
1108 dump_omp_region (file, region->inner, indent + 4);
1109
1110 if (region->cont)
1111 {
1112 fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
1113 region->cont->index);
1114 }
1115
1116 if (region->exit)
1117 fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
1118 region->exit->index);
1119 else
1120 fprintf (file, "%*s[no exit marker]\n", indent, "");
1121
1122 if (region->next)
1123 dump_omp_region (file, region->next, indent);
1124 }
1125
1126 DEBUG_FUNCTION void
1127 debug_omp_region (struct omp_region *region)
1128 {
1129 dump_omp_region (stderr, region, 0);
1130 }
1131
1132 DEBUG_FUNCTION void
1133 debug_all_omp_regions (void)
1134 {
1135 dump_omp_region (stderr, root_omp_region, 0);
1136 }
1137
1138
1139 /* Create a new parallel region starting at STMT inside region PARENT. */
1140
1141 struct omp_region *
1142 new_omp_region (basic_block bb, enum gimple_code type,
1143 struct omp_region *parent)
1144 {
1145 struct omp_region *region = XCNEW (struct omp_region);
1146
1147 region->outer = parent;
1148 region->entry = bb;
1149 region->type = type;
1150
1151 if (parent)
1152 {
1153 /* This is a nested region. Add it to the list of inner
1154 regions in PARENT. */
1155 region->next = parent->inner;
1156 parent->inner = region;
1157 }
1158 else
1159 {
1160 /* This is a toplevel region. Add it to the list of toplevel
1161 regions in ROOT_OMP_REGION. */
1162 region->next = root_omp_region;
1163 root_omp_region = region;
1164 }
1165
1166 return region;
1167 }
1168
1169 /* Release the memory associated with the region tree rooted at REGION. */
1170
1171 static void
1172 free_omp_region_1 (struct omp_region *region)
1173 {
1174 struct omp_region *i, *n;
1175
1176 for (i = region->inner; i ; i = n)
1177 {
1178 n = i->next;
1179 free_omp_region_1 (i);
1180 }
1181
1182 free (region);
1183 }
1184
1185 /* Release the memory for the entire omp region tree. */
1186
1187 void
1188 free_omp_regions (void)
1189 {
1190 struct omp_region *r, *n;
1191 for (r = root_omp_region; r ; r = n)
1192 {
1193 n = r->next;
1194 free_omp_region_1 (r);
1195 }
1196 root_omp_region = NULL;
1197 }
1198
1199
1200 /* Create a new context, with OUTER_CTX being the surrounding context. */
1201
1202 static omp_context *
1203 new_omp_context (gimple stmt, omp_context *outer_ctx)
1204 {
1205 omp_context *ctx = XCNEW (omp_context);
1206
1207 splay_tree_insert (all_contexts, (splay_tree_key) stmt,
1208 (splay_tree_value) ctx);
1209 ctx->stmt = stmt;
1210
1211 if (outer_ctx)
1212 {
1213 ctx->outer = outer_ctx;
1214 ctx->cb = outer_ctx->cb;
1215 ctx->cb.block = NULL;
1216 ctx->depth = outer_ctx->depth + 1;
1217 }
1218 else
1219 {
1220 ctx->cb.src_fn = current_function_decl;
1221 ctx->cb.dst_fn = current_function_decl;
1222 ctx->cb.src_node = cgraph_get_node (current_function_decl);
1223 gcc_checking_assert (ctx->cb.src_node);
1224 ctx->cb.dst_node = ctx->cb.src_node;
1225 ctx->cb.src_cfun = cfun;
1226 ctx->cb.copy_decl = omp_copy_decl;
1227 ctx->cb.eh_lp_nr = 0;
1228 ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
1229 ctx->depth = 1;
1230 }
1231
1232 ctx->cb.decl_map = pointer_map_create ();
1233
1234 return ctx;
1235 }
1236
1237 static gimple_seq maybe_catch_exception (gimple_seq);
1238
1239 /* Finalize task copyfn. */
1240
1241 static void
1242 finalize_task_copyfn (gimple task_stmt)
1243 {
1244 struct function *child_cfun;
1245 tree child_fn;
1246 gimple_seq seq = NULL, new_seq;
1247 gimple bind;
1248
1249 child_fn = gimple_omp_task_copy_fn (task_stmt);
1250 if (child_fn == NULL_TREE)
1251 return;
1252
1253 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
1254
1255 /* Inform the callgraph about the new function. */
1256 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
1257 = cfun->curr_properties & ~PROP_loops;
1258
1259 push_cfun (child_cfun);
1260 bind = gimplify_body (child_fn, false);
1261 gimple_seq_add_stmt (&seq, bind);
1262 new_seq = maybe_catch_exception (seq);
1263 if (new_seq != seq)
1264 {
1265 bind = gimple_build_bind (NULL, new_seq, NULL);
1266 seq = NULL;
1267 gimple_seq_add_stmt (&seq, bind);
1268 }
1269 gimple_set_body (child_fn, seq);
1270 pop_cfun ();
1271
1272 cgraph_add_new_function (child_fn, false);
1273 }
1274
1275 /* Destroy a omp_context data structures. Called through the splay tree
1276 value delete callback. */
1277
1278 static void
1279 delete_omp_context (splay_tree_value value)
1280 {
1281 omp_context *ctx = (omp_context *) value;
1282
1283 pointer_map_destroy (ctx->cb.decl_map);
1284
1285 if (ctx->field_map)
1286 splay_tree_delete (ctx->field_map);
1287 if (ctx->sfield_map)
1288 splay_tree_delete (ctx->sfield_map);
1289
1290 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1291 it produces corrupt debug information. */
1292 if (ctx->record_type)
1293 {
1294 tree t;
1295 for (t = TYPE_FIELDS (ctx->record_type); t ; t = DECL_CHAIN (t))
1296 DECL_ABSTRACT_ORIGIN (t) = NULL;
1297 }
1298 if (ctx->srecord_type)
1299 {
1300 tree t;
1301 for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = DECL_CHAIN (t))
1302 DECL_ABSTRACT_ORIGIN (t) = NULL;
1303 }
1304
1305 if (is_task_ctx (ctx))
1306 finalize_task_copyfn (ctx->stmt);
1307
1308 XDELETE (ctx);
1309 }
1310
1311 /* Fix up RECEIVER_DECL with a type that has been remapped to the child
1312 context. */
1313
1314 static void
1315 fixup_child_record_type (omp_context *ctx)
1316 {
1317 tree f, type = ctx->record_type;
1318
1319 /* ??? It isn't sufficient to just call remap_type here, because
1320 variably_modified_type_p doesn't work the way we expect for
1321 record types. Testing each field for whether it needs remapping
1322 and creating a new record by hand works, however. */
1323 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
1324 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
1325 break;
1326 if (f)
1327 {
1328 tree name, new_fields = NULL;
1329
1330 type = lang_hooks.types.make_type (RECORD_TYPE);
1331 name = DECL_NAME (TYPE_NAME (ctx->record_type));
1332 name = build_decl (DECL_SOURCE_LOCATION (ctx->receiver_decl),
1333 TYPE_DECL, name, type);
1334 TYPE_NAME (type) = name;
1335
1336 for (f = TYPE_FIELDS (ctx->record_type); f ; f = DECL_CHAIN (f))
1337 {
1338 tree new_f = copy_node (f);
1339 DECL_CONTEXT (new_f) = type;
1340 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
1341 DECL_CHAIN (new_f) = new_fields;
1342 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
1343 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
1344 &ctx->cb, NULL);
1345 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
1346 &ctx->cb, NULL);
1347 new_fields = new_f;
1348
1349 /* Arrange to be able to look up the receiver field
1350 given the sender field. */
1351 splay_tree_insert (ctx->field_map, (splay_tree_key) f,
1352 (splay_tree_value) new_f);
1353 }
1354 TYPE_FIELDS (type) = nreverse (new_fields);
1355 layout_type (type);
1356 }
1357
1358 TREE_TYPE (ctx->receiver_decl) = build_pointer_type (type);
1359 }
1360
1361 /* Instantiate decls as necessary in CTX to satisfy the data sharing
1362 specified by CLAUSES. */
1363
1364 static void
1365 scan_sharing_clauses (tree clauses, omp_context *ctx)
1366 {
1367 tree c, decl;
1368 bool scan_array_reductions = false;
1369
1370 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1371 {
1372 bool by_ref;
1373
1374 switch (OMP_CLAUSE_CODE (c))
1375 {
1376 case OMP_CLAUSE_PRIVATE:
1377 decl = OMP_CLAUSE_DECL (c);
1378 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
1379 goto do_private;
1380 else if (!is_variable_sized (decl))
1381 install_var_local (decl, ctx);
1382 break;
1383
1384 case OMP_CLAUSE_SHARED:
1385 gcc_assert (is_taskreg_ctx (ctx));
1386 decl = OMP_CLAUSE_DECL (c);
1387 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
1388 || !is_variable_sized (decl));
1389 /* Global variables don't need to be copied,
1390 the receiver side will use them directly. */
1391 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1392 break;
1393 by_ref = use_pointer_for_field (decl, ctx);
1394 if (! TREE_READONLY (decl)
1395 || TREE_ADDRESSABLE (decl)
1396 || by_ref
1397 || is_reference (decl))
1398 {
1399 install_var_field (decl, by_ref, 3, ctx);
1400 install_var_local (decl, ctx);
1401 break;
1402 }
1403 /* We don't need to copy const scalar vars back. */
1404 OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
1405 goto do_private;
1406
1407 case OMP_CLAUSE_LASTPRIVATE:
1408 /* Let the corresponding firstprivate clause create
1409 the variable. */
1410 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1411 break;
1412 /* FALLTHRU */
1413
1414 case OMP_CLAUSE_FIRSTPRIVATE:
1415 case OMP_CLAUSE_REDUCTION:
1416 decl = OMP_CLAUSE_DECL (c);
1417 do_private:
1418 if (is_variable_sized (decl))
1419 {
1420 if (is_task_ctx (ctx))
1421 install_var_field (decl, false, 1, ctx);
1422 break;
1423 }
1424 else if (is_taskreg_ctx (ctx))
1425 {
1426 bool global
1427 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
1428 by_ref = use_pointer_for_field (decl, NULL);
1429
1430 if (is_task_ctx (ctx)
1431 && (global || by_ref || is_reference (decl)))
1432 {
1433 install_var_field (decl, false, 1, ctx);
1434 if (!global)
1435 install_var_field (decl, by_ref, 2, ctx);
1436 }
1437 else if (!global)
1438 install_var_field (decl, by_ref, 3, ctx);
1439 }
1440 install_var_local (decl, ctx);
1441 break;
1442
1443 case OMP_CLAUSE_COPYPRIVATE:
1444 case OMP_CLAUSE_COPYIN:
1445 decl = OMP_CLAUSE_DECL (c);
1446 by_ref = use_pointer_for_field (decl, NULL);
1447 install_var_field (decl, by_ref, 3, ctx);
1448 break;
1449
1450 case OMP_CLAUSE_DEFAULT:
1451 ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
1452 break;
1453
1454 case OMP_CLAUSE_FINAL:
1455 case OMP_CLAUSE_IF:
1456 case OMP_CLAUSE_NUM_THREADS:
1457 case OMP_CLAUSE_SCHEDULE:
1458 if (ctx->outer)
1459 scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
1460 break;
1461
1462 case OMP_CLAUSE_NOWAIT:
1463 case OMP_CLAUSE_ORDERED:
1464 case OMP_CLAUSE_COLLAPSE:
1465 case OMP_CLAUSE_UNTIED:
1466 case OMP_CLAUSE_MERGEABLE:
1467 break;
1468
1469 default:
1470 gcc_unreachable ();
1471 }
1472 }
1473
1474 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1475 {
1476 switch (OMP_CLAUSE_CODE (c))
1477 {
1478 case OMP_CLAUSE_LASTPRIVATE:
1479 /* Let the corresponding firstprivate clause create
1480 the variable. */
1481 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1482 scan_array_reductions = true;
1483 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1484 break;
1485 /* FALLTHRU */
1486
1487 case OMP_CLAUSE_PRIVATE:
1488 case OMP_CLAUSE_FIRSTPRIVATE:
1489 case OMP_CLAUSE_REDUCTION:
1490 decl = OMP_CLAUSE_DECL (c);
1491 if (is_variable_sized (decl))
1492 install_var_local (decl, ctx);
1493 fixup_remapped_decl (decl, ctx,
1494 OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
1495 && OMP_CLAUSE_PRIVATE_DEBUG (c));
1496 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1497 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1498 scan_array_reductions = true;
1499 break;
1500
1501 case OMP_CLAUSE_SHARED:
1502 decl = OMP_CLAUSE_DECL (c);
1503 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1504 fixup_remapped_decl (decl, ctx, false);
1505 break;
1506
1507 case OMP_CLAUSE_COPYPRIVATE:
1508 case OMP_CLAUSE_COPYIN:
1509 case OMP_CLAUSE_DEFAULT:
1510 case OMP_CLAUSE_IF:
1511 case OMP_CLAUSE_NUM_THREADS:
1512 case OMP_CLAUSE_SCHEDULE:
1513 case OMP_CLAUSE_NOWAIT:
1514 case OMP_CLAUSE_ORDERED:
1515 case OMP_CLAUSE_COLLAPSE:
1516 case OMP_CLAUSE_UNTIED:
1517 case OMP_CLAUSE_FINAL:
1518 case OMP_CLAUSE_MERGEABLE:
1519 break;
1520
1521 default:
1522 gcc_unreachable ();
1523 }
1524 }
1525
1526 if (scan_array_reductions)
1527 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1528 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1529 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1530 {
1531 scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
1532 scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
1533 }
1534 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
1535 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1536 scan_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
1537 }
1538
1539 /* Create a new name for omp child function. Returns an identifier. */
1540
1541 static GTY(()) unsigned int tmp_ompfn_id_num;
1542
1543 static tree
1544 create_omp_child_function_name (bool task_copy)
1545 {
1546 return (clone_function_name (current_function_decl,
1547 task_copy ? "_omp_cpyfn" : "_omp_fn"));
1548 }
1549
1550 /* Build a decl for the omp child function. It'll not contain a body
1551 yet, just the bare decl. */
1552
1553 static void
1554 create_omp_child_function (omp_context *ctx, bool task_copy)
1555 {
1556 tree decl, type, name, t;
1557
1558 name = create_omp_child_function_name (task_copy);
1559 if (task_copy)
1560 type = build_function_type_list (void_type_node, ptr_type_node,
1561 ptr_type_node, NULL_TREE);
1562 else
1563 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
1564
1565 decl = build_decl (gimple_location (ctx->stmt),
1566 FUNCTION_DECL, name, type);
1567
1568 if (!task_copy)
1569 ctx->cb.dst_fn = decl;
1570 else
1571 gimple_omp_task_set_copy_fn (ctx->stmt, decl);
1572
1573 TREE_STATIC (decl) = 1;
1574 TREE_USED (decl) = 1;
1575 DECL_ARTIFICIAL (decl) = 1;
1576 DECL_NAMELESS (decl) = 1;
1577 DECL_IGNORED_P (decl) = 0;
1578 TREE_PUBLIC (decl) = 0;
1579 DECL_UNINLINABLE (decl) = 1;
1580 DECL_EXTERNAL (decl) = 0;
1581 DECL_CONTEXT (decl) = NULL_TREE;
1582 DECL_INITIAL (decl) = make_node (BLOCK);
1583
1584 t = build_decl (DECL_SOURCE_LOCATION (decl),
1585 RESULT_DECL, NULL_TREE, void_type_node);
1586 DECL_ARTIFICIAL (t) = 1;
1587 DECL_IGNORED_P (t) = 1;
1588 DECL_CONTEXT (t) = decl;
1589 DECL_RESULT (decl) = t;
1590
1591 t = build_decl (DECL_SOURCE_LOCATION (decl),
1592 PARM_DECL, get_identifier (".omp_data_i"), ptr_type_node);
1593 DECL_ARTIFICIAL (t) = 1;
1594 DECL_NAMELESS (t) = 1;
1595 DECL_ARG_TYPE (t) = ptr_type_node;
1596 DECL_CONTEXT (t) = current_function_decl;
1597 TREE_USED (t) = 1;
1598 DECL_ARGUMENTS (decl) = t;
1599 if (!task_copy)
1600 ctx->receiver_decl = t;
1601 else
1602 {
1603 t = build_decl (DECL_SOURCE_LOCATION (decl),
1604 PARM_DECL, get_identifier (".omp_data_o"),
1605 ptr_type_node);
1606 DECL_ARTIFICIAL (t) = 1;
1607 DECL_NAMELESS (t) = 1;
1608 DECL_ARG_TYPE (t) = ptr_type_node;
1609 DECL_CONTEXT (t) = current_function_decl;
1610 TREE_USED (t) = 1;
1611 TREE_ADDRESSABLE (t) = 1;
1612 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
1613 DECL_ARGUMENTS (decl) = t;
1614 }
1615
1616 /* Allocate memory for the function structure. The call to
1617 allocate_struct_function clobbers CFUN, so we need to restore
1618 it afterward. */
1619 push_struct_function (decl);
1620 cfun->function_end_locus = gimple_location (ctx->stmt);
1621 pop_cfun ();
1622 }
1623
1624
1625 /* Scan an OpenMP parallel directive. */
1626
1627 static void
1628 scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1629 {
1630 omp_context *ctx;
1631 tree name;
1632 gimple stmt = gsi_stmt (*gsi);
1633
1634 /* Ignore parallel directives with empty bodies, unless there
1635 are copyin clauses. */
1636 if (optimize > 0
1637 && empty_body_p (gimple_omp_body (stmt))
1638 && find_omp_clause (gimple_omp_parallel_clauses (stmt),
1639 OMP_CLAUSE_COPYIN) == NULL)
1640 {
1641 gsi_replace (gsi, gimple_build_nop (), false);
1642 return;
1643 }
1644
1645 ctx = new_omp_context (stmt, outer_ctx);
1646 if (taskreg_nesting_level > 1)
1647 ctx->is_nested = true;
1648 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1649 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1650 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1651 name = create_tmp_var_name (".omp_data_s");
1652 name = build_decl (gimple_location (stmt),
1653 TYPE_DECL, name, ctx->record_type);
1654 DECL_ARTIFICIAL (name) = 1;
1655 DECL_NAMELESS (name) = 1;
1656 TYPE_NAME (ctx->record_type) = name;
1657 create_omp_child_function (ctx, false);
1658 gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
1659
1660 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx);
1661 scan_omp (gimple_omp_body_ptr (stmt), ctx);
1662
1663 if (TYPE_FIELDS (ctx->record_type) == NULL)
1664 ctx->record_type = ctx->receiver_decl = NULL;
1665 else
1666 {
1667 layout_type (ctx->record_type);
1668 fixup_child_record_type (ctx);
1669 }
1670 }
1671
1672 /* Scan an OpenMP task directive. */
1673
1674 static void
1675 scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1676 {
1677 omp_context *ctx;
1678 tree name, t;
1679 gimple stmt = gsi_stmt (*gsi);
1680 location_t loc = gimple_location (stmt);
1681
1682 /* Ignore task directives with empty bodies. */
1683 if (optimize > 0
1684 && empty_body_p (gimple_omp_body (stmt)))
1685 {
1686 gsi_replace (gsi, gimple_build_nop (), false);
1687 return;
1688 }
1689
1690 ctx = new_omp_context (stmt, outer_ctx);
1691 if (taskreg_nesting_level > 1)
1692 ctx->is_nested = true;
1693 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1694 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1695 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1696 name = create_tmp_var_name (".omp_data_s");
1697 name = build_decl (gimple_location (stmt),
1698 TYPE_DECL, name, ctx->record_type);
1699 DECL_ARTIFICIAL (name) = 1;
1700 DECL_NAMELESS (name) = 1;
1701 TYPE_NAME (ctx->record_type) = name;
1702 create_omp_child_function (ctx, false);
1703 gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
1704
1705 scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
1706
1707 if (ctx->srecord_type)
1708 {
1709 name = create_tmp_var_name (".omp_data_a");
1710 name = build_decl (gimple_location (stmt),
1711 TYPE_DECL, name, ctx->srecord_type);
1712 DECL_ARTIFICIAL (name) = 1;
1713 DECL_NAMELESS (name) = 1;
1714 TYPE_NAME (ctx->srecord_type) = name;
1715 create_omp_child_function (ctx, true);
1716 }
1717
1718 scan_omp (gimple_omp_body_ptr (stmt), ctx);
1719
1720 if (TYPE_FIELDS (ctx->record_type) == NULL)
1721 {
1722 ctx->record_type = ctx->receiver_decl = NULL;
1723 t = build_int_cst (long_integer_type_node, 0);
1724 gimple_omp_task_set_arg_size (stmt, t);
1725 t = build_int_cst (long_integer_type_node, 1);
1726 gimple_omp_task_set_arg_align (stmt, t);
1727 }
1728 else
1729 {
1730 tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
1731 /* Move VLA fields to the end. */
1732 p = &TYPE_FIELDS (ctx->record_type);
1733 while (*p)
1734 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
1735 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
1736 {
1737 *q = *p;
1738 *p = TREE_CHAIN (*p);
1739 TREE_CHAIN (*q) = NULL_TREE;
1740 q = &TREE_CHAIN (*q);
1741 }
1742 else
1743 p = &DECL_CHAIN (*p);
1744 *p = vla_fields;
1745 layout_type (ctx->record_type);
1746 fixup_child_record_type (ctx);
1747 if (ctx->srecord_type)
1748 layout_type (ctx->srecord_type);
1749 t = fold_convert_loc (loc, long_integer_type_node,
1750 TYPE_SIZE_UNIT (ctx->record_type));
1751 gimple_omp_task_set_arg_size (stmt, t);
1752 t = build_int_cst (long_integer_type_node,
1753 TYPE_ALIGN_UNIT (ctx->record_type));
1754 gimple_omp_task_set_arg_align (stmt, t);
1755 }
1756 }
1757
1758
1759 /* Scan an OpenMP loop directive. */
1760
1761 static void
1762 scan_omp_for (gimple stmt, omp_context *outer_ctx)
1763 {
1764 omp_context *ctx;
1765 size_t i;
1766
1767 ctx = new_omp_context (stmt, outer_ctx);
1768
1769 scan_sharing_clauses (gimple_omp_for_clauses (stmt), ctx);
1770
1771 scan_omp (gimple_omp_for_pre_body_ptr (stmt), ctx);
1772 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
1773 {
1774 scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx);
1775 scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx);
1776 scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx);
1777 scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx);
1778 }
1779 scan_omp (gimple_omp_body_ptr (stmt), ctx);
1780 }
1781
1782 /* Scan an OpenMP sections directive. */
1783
1784 static void
1785 scan_omp_sections (gimple stmt, omp_context *outer_ctx)
1786 {
1787 omp_context *ctx;
1788
1789 ctx = new_omp_context (stmt, outer_ctx);
1790 scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx);
1791 scan_omp (gimple_omp_body_ptr (stmt), ctx);
1792 }
1793
1794 /* Scan an OpenMP single directive. */
1795
1796 static void
1797 scan_omp_single (gimple stmt, omp_context *outer_ctx)
1798 {
1799 omp_context *ctx;
1800 tree name;
1801
1802 ctx = new_omp_context (stmt, outer_ctx);
1803 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1804 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1805 name = create_tmp_var_name (".omp_copy_s");
1806 name = build_decl (gimple_location (stmt),
1807 TYPE_DECL, name, ctx->record_type);
1808 TYPE_NAME (ctx->record_type) = name;
1809
1810 scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx);
1811 scan_omp (gimple_omp_body_ptr (stmt), ctx);
1812
1813 if (TYPE_FIELDS (ctx->record_type) == NULL)
1814 ctx->record_type = NULL;
1815 else
1816 layout_type (ctx->record_type);
1817 }
1818
1819
1820 /* Check OpenMP nesting restrictions. */
1821 static bool
1822 check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
1823 {
1824 switch (gimple_code (stmt))
1825 {
1826 case GIMPLE_OMP_FOR:
1827 case GIMPLE_OMP_SECTIONS:
1828 case GIMPLE_OMP_SINGLE:
1829 case GIMPLE_CALL:
1830 for (; ctx != NULL; ctx = ctx->outer)
1831 switch (gimple_code (ctx->stmt))
1832 {
1833 case GIMPLE_OMP_FOR:
1834 case GIMPLE_OMP_SECTIONS:
1835 case GIMPLE_OMP_SINGLE:
1836 case GIMPLE_OMP_ORDERED:
1837 case GIMPLE_OMP_MASTER:
1838 case GIMPLE_OMP_TASK:
1839 if (is_gimple_call (stmt))
1840 {
1841 error_at (gimple_location (stmt),
1842 "barrier region may not be closely nested inside "
1843 "of work-sharing, critical, ordered, master or "
1844 "explicit task region");
1845 return false;
1846 }
1847 error_at (gimple_location (stmt),
1848 "work-sharing region may not be closely nested inside "
1849 "of work-sharing, critical, ordered, master or explicit "
1850 "task region");
1851 return false;
1852 case GIMPLE_OMP_PARALLEL:
1853 return true;
1854 default:
1855 break;
1856 }
1857 break;
1858 case GIMPLE_OMP_MASTER:
1859 for (; ctx != NULL; ctx = ctx->outer)
1860 switch (gimple_code (ctx->stmt))
1861 {
1862 case GIMPLE_OMP_FOR:
1863 case GIMPLE_OMP_SECTIONS:
1864 case GIMPLE_OMP_SINGLE:
1865 case GIMPLE_OMP_TASK:
1866 error_at (gimple_location (stmt),
1867 "master region may not be closely nested inside "
1868 "of work-sharing or explicit task region");
1869 return false;
1870 case GIMPLE_OMP_PARALLEL:
1871 return true;
1872 default:
1873 break;
1874 }
1875 break;
1876 case GIMPLE_OMP_ORDERED:
1877 for (; ctx != NULL; ctx = ctx->outer)
1878 switch (gimple_code (ctx->stmt))
1879 {
1880 case GIMPLE_OMP_CRITICAL:
1881 case GIMPLE_OMP_TASK:
1882 error_at (gimple_location (stmt),
1883 "ordered region may not be closely nested inside "
1884 "of critical or explicit task region");
1885 return false;
1886 case GIMPLE_OMP_FOR:
1887 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
1888 OMP_CLAUSE_ORDERED) == NULL)
1889 {
1890 error_at (gimple_location (stmt),
1891 "ordered region must be closely nested inside "
1892 "a loop region with an ordered clause");
1893 return false;
1894 }
1895 return true;
1896 case GIMPLE_OMP_PARALLEL:
1897 return true;
1898 default:
1899 break;
1900 }
1901 break;
1902 case GIMPLE_OMP_CRITICAL:
1903 for (; ctx != NULL; ctx = ctx->outer)
1904 if (gimple_code (ctx->stmt) == GIMPLE_OMP_CRITICAL
1905 && (gimple_omp_critical_name (stmt)
1906 == gimple_omp_critical_name (ctx->stmt)))
1907 {
1908 error_at (gimple_location (stmt),
1909 "critical region may not be nested inside a critical "
1910 "region with the same name");
1911 return false;
1912 }
1913 break;
1914 default:
1915 break;
1916 }
1917 return true;
1918 }
1919
1920
1921 /* Helper function scan_omp.
1922
1923 Callback for walk_tree or operators in walk_gimple_stmt used to
1924 scan for OpenMP directives in TP. */
1925
1926 static tree
1927 scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
1928 {
1929 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
1930 omp_context *ctx = (omp_context *) wi->info;
1931 tree t = *tp;
1932
1933 switch (TREE_CODE (t))
1934 {
1935 case VAR_DECL:
1936 case PARM_DECL:
1937 case LABEL_DECL:
1938 case RESULT_DECL:
1939 if (ctx)
1940 *tp = remap_decl (t, &ctx->cb);
1941 break;
1942
1943 default:
1944 if (ctx && TYPE_P (t))
1945 *tp = remap_type (t, &ctx->cb);
1946 else if (!DECL_P (t))
1947 {
1948 *walk_subtrees = 1;
1949 if (ctx)
1950 {
1951 tree tem = remap_type (TREE_TYPE (t), &ctx->cb);
1952 if (tem != TREE_TYPE (t))
1953 {
1954 if (TREE_CODE (t) == INTEGER_CST)
1955 *tp = build_int_cst_wide (tem,
1956 TREE_INT_CST_LOW (t),
1957 TREE_INT_CST_HIGH (t));
1958 else
1959 TREE_TYPE (t) = tem;
1960 }
1961 }
1962 }
1963 break;
1964 }
1965
1966 return NULL_TREE;
1967 }
1968
1969
1970 /* Helper function for scan_omp.
1971
1972 Callback for walk_gimple_stmt used to scan for OpenMP directives in
1973 the current statement in GSI. */
1974
1975 static tree
1976 scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
1977 struct walk_stmt_info *wi)
1978 {
1979 gimple stmt = gsi_stmt (*gsi);
1980 omp_context *ctx = (omp_context *) wi->info;
1981
1982 if (gimple_has_location (stmt))
1983 input_location = gimple_location (stmt);
1984
1985 /* Check the OpenMP nesting restrictions. */
1986 if (ctx != NULL)
1987 {
1988 bool remove = false;
1989 if (is_gimple_omp (stmt))
1990 remove = !check_omp_nesting_restrictions (stmt, ctx);
1991 else if (is_gimple_call (stmt))
1992 {
1993 tree fndecl = gimple_call_fndecl (stmt);
1994 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
1995 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
1996 remove = !check_omp_nesting_restrictions (stmt, ctx);
1997 }
1998 if (remove)
1999 {
2000 stmt = gimple_build_nop ();
2001 gsi_replace (gsi, stmt, false);
2002 }
2003 }
2004
2005 *handled_ops_p = true;
2006
2007 switch (gimple_code (stmt))
2008 {
2009 case GIMPLE_OMP_PARALLEL:
2010 taskreg_nesting_level++;
2011 scan_omp_parallel (gsi, ctx);
2012 taskreg_nesting_level--;
2013 break;
2014
2015 case GIMPLE_OMP_TASK:
2016 taskreg_nesting_level++;
2017 scan_omp_task (gsi, ctx);
2018 taskreg_nesting_level--;
2019 break;
2020
2021 case GIMPLE_OMP_FOR:
2022 scan_omp_for (stmt, ctx);
2023 break;
2024
2025 case GIMPLE_OMP_SECTIONS:
2026 scan_omp_sections (stmt, ctx);
2027 break;
2028
2029 case GIMPLE_OMP_SINGLE:
2030 scan_omp_single (stmt, ctx);
2031 break;
2032
2033 case GIMPLE_OMP_SECTION:
2034 case GIMPLE_OMP_MASTER:
2035 case GIMPLE_OMP_ORDERED:
2036 case GIMPLE_OMP_CRITICAL:
2037 ctx = new_omp_context (stmt, ctx);
2038 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2039 break;
2040
2041 case GIMPLE_BIND:
2042 {
2043 tree var;
2044
2045 *handled_ops_p = false;
2046 if (ctx)
2047 for (var = gimple_bind_vars (stmt); var ; var = DECL_CHAIN (var))
2048 insert_decl_map (&ctx->cb, var, var);
2049 }
2050 break;
2051 default:
2052 *handled_ops_p = false;
2053 break;
2054 }
2055
2056 return NULL_TREE;
2057 }
2058
2059
2060 /* Scan all the statements starting at the current statement. CTX
2061 contains context information about the OpenMP directives and
2062 clauses found during the scan. */
2063
2064 static void
2065 scan_omp (gimple_seq *body_p, omp_context *ctx)
2066 {
2067 location_t saved_location;
2068 struct walk_stmt_info wi;
2069
2070 memset (&wi, 0, sizeof (wi));
2071 wi.info = ctx;
2072 wi.want_locations = true;
2073
2074 saved_location = input_location;
2075 walk_gimple_seq_mod (body_p, scan_omp_1_stmt, scan_omp_1_op, &wi);
2076 input_location = saved_location;
2077 }
2078 \f
2079 /* Re-gimplification and code generation routines. */
2080
2081 /* Build a call to GOMP_barrier. */
2082
2083 static tree
2084 build_omp_barrier (void)
2085 {
2086 return build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_BARRIER), 0);
2087 }
2088
2089 /* If a context was created for STMT when it was scanned, return it. */
2090
2091 static omp_context *
2092 maybe_lookup_ctx (gimple stmt)
2093 {
2094 splay_tree_node n;
2095 n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
2096 return n ? (omp_context *) n->value : NULL;
2097 }
2098
2099
2100 /* Find the mapping for DECL in CTX or the immediately enclosing
2101 context that has a mapping for DECL.
2102
2103 If CTX is a nested parallel directive, we may have to use the decl
2104 mappings created in CTX's parent context. Suppose that we have the
2105 following parallel nesting (variable UIDs showed for clarity):
2106
2107 iD.1562 = 0;
2108 #omp parallel shared(iD.1562) -> outer parallel
2109 iD.1562 = iD.1562 + 1;
2110
2111 #omp parallel shared (iD.1562) -> inner parallel
2112 iD.1562 = iD.1562 - 1;
2113
2114 Each parallel structure will create a distinct .omp_data_s structure
2115 for copying iD.1562 in/out of the directive:
2116
2117 outer parallel .omp_data_s.1.i -> iD.1562
2118 inner parallel .omp_data_s.2.i -> iD.1562
2119
2120 A shared variable mapping will produce a copy-out operation before
2121 the parallel directive and a copy-in operation after it. So, in
2122 this case we would have:
2123
2124 iD.1562 = 0;
2125 .omp_data_o.1.i = iD.1562;
2126 #omp parallel shared(iD.1562) -> outer parallel
2127 .omp_data_i.1 = &.omp_data_o.1
2128 .omp_data_i.1->i = .omp_data_i.1->i + 1;
2129
2130 .omp_data_o.2.i = iD.1562; -> **
2131 #omp parallel shared(iD.1562) -> inner parallel
2132 .omp_data_i.2 = &.omp_data_o.2
2133 .omp_data_i.2->i = .omp_data_i.2->i - 1;
2134
2135
2136 ** This is a problem. The symbol iD.1562 cannot be referenced
2137 inside the body of the outer parallel region. But since we are
2138 emitting this copy operation while expanding the inner parallel
2139 directive, we need to access the CTX structure of the outer
2140 parallel directive to get the correct mapping:
2141
2142 .omp_data_o.2.i = .omp_data_i.1->i
2143
2144 Since there may be other workshare or parallel directives enclosing
2145 the parallel directive, it may be necessary to walk up the context
2146 parent chain. This is not a problem in general because nested
2147 parallelism happens only rarely. */
2148
2149 static tree
2150 lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2151 {
2152 tree t;
2153 omp_context *up;
2154
2155 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2156 t = maybe_lookup_decl (decl, up);
2157
2158 gcc_assert (!ctx->is_nested || t || is_global_var (decl));
2159
2160 return t ? t : decl;
2161 }
2162
2163
2164 /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
2165 in outer contexts. */
2166
2167 static tree
2168 maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2169 {
2170 tree t = NULL;
2171 omp_context *up;
2172
2173 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2174 t = maybe_lookup_decl (decl, up);
2175
2176 return t ? t : decl;
2177 }
2178
2179
2180 /* Construct the initialization value for reduction CLAUSE. */
2181
2182 tree
2183 omp_reduction_init (tree clause, tree type)
2184 {
2185 location_t loc = OMP_CLAUSE_LOCATION (clause);
2186 switch (OMP_CLAUSE_REDUCTION_CODE (clause))
2187 {
2188 case PLUS_EXPR:
2189 case MINUS_EXPR:
2190 case BIT_IOR_EXPR:
2191 case BIT_XOR_EXPR:
2192 case TRUTH_OR_EXPR:
2193 case TRUTH_ORIF_EXPR:
2194 case TRUTH_XOR_EXPR:
2195 case NE_EXPR:
2196 return build_zero_cst (type);
2197
2198 case MULT_EXPR:
2199 case TRUTH_AND_EXPR:
2200 case TRUTH_ANDIF_EXPR:
2201 case EQ_EXPR:
2202 return fold_convert_loc (loc, type, integer_one_node);
2203
2204 case BIT_AND_EXPR:
2205 return fold_convert_loc (loc, type, integer_minus_one_node);
2206
2207 case MAX_EXPR:
2208 if (SCALAR_FLOAT_TYPE_P (type))
2209 {
2210 REAL_VALUE_TYPE max, min;
2211 if (HONOR_INFINITIES (TYPE_MODE (type)))
2212 {
2213 real_inf (&max);
2214 real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
2215 }
2216 else
2217 real_maxval (&min, 1, TYPE_MODE (type));
2218 return build_real (type, min);
2219 }
2220 else
2221 {
2222 gcc_assert (INTEGRAL_TYPE_P (type));
2223 return TYPE_MIN_VALUE (type);
2224 }
2225
2226 case MIN_EXPR:
2227 if (SCALAR_FLOAT_TYPE_P (type))
2228 {
2229 REAL_VALUE_TYPE max;
2230 if (HONOR_INFINITIES (TYPE_MODE (type)))
2231 real_inf (&max);
2232 else
2233 real_maxval (&max, 0, TYPE_MODE (type));
2234 return build_real (type, max);
2235 }
2236 else
2237 {
2238 gcc_assert (INTEGRAL_TYPE_P (type));
2239 return TYPE_MAX_VALUE (type);
2240 }
2241
2242 default:
2243 gcc_unreachable ();
2244 }
2245 }
2246
2247 /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
2248 from the receiver (aka child) side and initializers for REFERENCE_TYPE
2249 private variables. Initialization statements go in ILIST, while calls
2250 to destructors go in DLIST. */
2251
2252 static void
2253 lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
2254 omp_context *ctx)
2255 {
2256 tree c, dtor, copyin_seq, x, ptr;
2257 bool copyin_by_ref = false;
2258 bool lastprivate_firstprivate = false;
2259 int pass;
2260
2261 copyin_seq = NULL;
2262
2263 /* Do all the fixed sized types in the first pass, and the variable sized
2264 types in the second pass. This makes sure that the scalar arguments to
2265 the variable sized types are processed before we use them in the
2266 variable sized operations. */
2267 for (pass = 0; pass < 2; ++pass)
2268 {
2269 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2270 {
2271 enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
2272 tree var, new_var;
2273 bool by_ref;
2274 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2275
2276 switch (c_kind)
2277 {
2278 case OMP_CLAUSE_PRIVATE:
2279 if (OMP_CLAUSE_PRIVATE_DEBUG (c))
2280 continue;
2281 break;
2282 case OMP_CLAUSE_SHARED:
2283 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
2284 {
2285 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
2286 continue;
2287 }
2288 case OMP_CLAUSE_FIRSTPRIVATE:
2289 case OMP_CLAUSE_COPYIN:
2290 case OMP_CLAUSE_REDUCTION:
2291 break;
2292 case OMP_CLAUSE_LASTPRIVATE:
2293 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2294 {
2295 lastprivate_firstprivate = true;
2296 if (pass != 0)
2297 continue;
2298 }
2299 break;
2300 default:
2301 continue;
2302 }
2303
2304 new_var = var = OMP_CLAUSE_DECL (c);
2305 if (c_kind != OMP_CLAUSE_COPYIN)
2306 new_var = lookup_decl (var, ctx);
2307
2308 if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
2309 {
2310 if (pass != 0)
2311 continue;
2312 }
2313 else if (is_variable_sized (var))
2314 {
2315 /* For variable sized types, we need to allocate the
2316 actual storage here. Call alloca and store the
2317 result in the pointer decl that we created elsewhere. */
2318 if (pass == 0)
2319 continue;
2320
2321 if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
2322 {
2323 gimple stmt;
2324 tree tmp, atmp;
2325
2326 ptr = DECL_VALUE_EXPR (new_var);
2327 gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
2328 ptr = TREE_OPERAND (ptr, 0);
2329 gcc_assert (DECL_P (ptr));
2330 x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
2331
2332 /* void *tmp = __builtin_alloca */
2333 atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
2334 stmt = gimple_build_call (atmp, 1, x);
2335 tmp = create_tmp_var_raw (ptr_type_node, NULL);
2336 gimple_add_tmp_var (tmp);
2337 gimple_call_set_lhs (stmt, tmp);
2338
2339 gimple_seq_add_stmt (ilist, stmt);
2340
2341 x = fold_convert_loc (clause_loc, TREE_TYPE (ptr), tmp);
2342 gimplify_assign (ptr, x, ilist);
2343 }
2344 }
2345 else if (is_reference (var))
2346 {
2347 /* For references that are being privatized for Fortran,
2348 allocate new backing storage for the new pointer
2349 variable. This allows us to avoid changing all the
2350 code that expects a pointer to something that expects
2351 a direct variable. Note that this doesn't apply to
2352 C++, since reference types are disallowed in data
2353 sharing clauses there, except for NRV optimized
2354 return values. */
2355 if (pass == 0)
2356 continue;
2357
2358 x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
2359 if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
2360 {
2361 x = build_receiver_ref (var, false, ctx);
2362 x = build_fold_addr_expr_loc (clause_loc, x);
2363 }
2364 else if (TREE_CONSTANT (x))
2365 {
2366 const char *name = NULL;
2367 if (DECL_NAME (var))
2368 name = IDENTIFIER_POINTER (DECL_NAME (new_var));
2369
2370 x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
2371 name);
2372 gimple_add_tmp_var (x);
2373 TREE_ADDRESSABLE (x) = 1;
2374 x = build_fold_addr_expr_loc (clause_loc, x);
2375 }
2376 else
2377 {
2378 tree atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
2379 x = build_call_expr_loc (clause_loc, atmp, 1, x);
2380 }
2381
2382 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
2383 gimplify_assign (new_var, x, ilist);
2384
2385 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2386 }
2387 else if (c_kind == OMP_CLAUSE_REDUCTION
2388 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2389 {
2390 if (pass == 0)
2391 continue;
2392 }
2393 else if (pass != 0)
2394 continue;
2395
2396 switch (OMP_CLAUSE_CODE (c))
2397 {
2398 case OMP_CLAUSE_SHARED:
2399 /* Shared global vars are just accessed directly. */
2400 if (is_global_var (new_var))
2401 break;
2402 /* Set up the DECL_VALUE_EXPR for shared variables now. This
2403 needs to be delayed until after fixup_child_record_type so
2404 that we get the correct type during the dereference. */
2405 by_ref = use_pointer_for_field (var, ctx);
2406 x = build_receiver_ref (var, by_ref, ctx);
2407 SET_DECL_VALUE_EXPR (new_var, x);
2408 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2409
2410 /* ??? If VAR is not passed by reference, and the variable
2411 hasn't been initialized yet, then we'll get a warning for
2412 the store into the omp_data_s structure. Ideally, we'd be
2413 able to notice this and not store anything at all, but
2414 we're generating code too early. Suppress the warning. */
2415 if (!by_ref)
2416 TREE_NO_WARNING (var) = 1;
2417 break;
2418
2419 case OMP_CLAUSE_LASTPRIVATE:
2420 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2421 break;
2422 /* FALLTHRU */
2423
2424 case OMP_CLAUSE_PRIVATE:
2425 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE)
2426 x = build_outer_var_ref (var, ctx);
2427 else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2428 {
2429 if (is_task_ctx (ctx))
2430 x = build_receiver_ref (var, false, ctx);
2431 else
2432 x = build_outer_var_ref (var, ctx);
2433 }
2434 else
2435 x = NULL;
2436 x = lang_hooks.decls.omp_clause_default_ctor (c, new_var, x);
2437 if (x)
2438 gimplify_and_add (x, ilist);
2439 /* FALLTHRU */
2440
2441 do_dtor:
2442 x = lang_hooks.decls.omp_clause_dtor (c, new_var);
2443 if (x)
2444 {
2445 gimple_seq tseq = NULL;
2446
2447 dtor = x;
2448 gimplify_stmt (&dtor, &tseq);
2449 gimple_seq_add_seq (dlist, tseq);
2450 }
2451 break;
2452
2453 case OMP_CLAUSE_FIRSTPRIVATE:
2454 if (is_task_ctx (ctx))
2455 {
2456 if (is_reference (var) || is_variable_sized (var))
2457 goto do_dtor;
2458 else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var,
2459 ctx))
2460 || use_pointer_for_field (var, NULL))
2461 {
2462 x = build_receiver_ref (var, false, ctx);
2463 SET_DECL_VALUE_EXPR (new_var, x);
2464 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2465 goto do_dtor;
2466 }
2467 }
2468 x = build_outer_var_ref (var, ctx);
2469 x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x);
2470 gimplify_and_add (x, ilist);
2471 goto do_dtor;
2472 break;
2473
2474 case OMP_CLAUSE_COPYIN:
2475 by_ref = use_pointer_for_field (var, NULL);
2476 x = build_receiver_ref (var, by_ref, ctx);
2477 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
2478 append_to_statement_list (x, &copyin_seq);
2479 copyin_by_ref |= by_ref;
2480 break;
2481
2482 case OMP_CLAUSE_REDUCTION:
2483 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2484 {
2485 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2486 x = build_outer_var_ref (var, ctx);
2487
2488 if (is_reference (var))
2489 x = build_fold_addr_expr_loc (clause_loc, x);
2490 SET_DECL_VALUE_EXPR (placeholder, x);
2491 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2492 lower_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
2493 gimple_seq_add_seq (ilist,
2494 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c));
2495 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
2496 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
2497 }
2498 else
2499 {
2500 x = omp_reduction_init (c, TREE_TYPE (new_var));
2501 gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
2502 gimplify_assign (new_var, x, ilist);
2503 }
2504 break;
2505
2506 default:
2507 gcc_unreachable ();
2508 }
2509 }
2510 }
2511
2512 /* The copyin sequence is not to be executed by the main thread, since
2513 that would result in self-copies. Perhaps not visible to scalars,
2514 but it certainly is to C++ operator=. */
2515 if (copyin_seq)
2516 {
2517 x = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM),
2518 0);
2519 x = build2 (NE_EXPR, boolean_type_node, x,
2520 build_int_cst (TREE_TYPE (x), 0));
2521 x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
2522 gimplify_and_add (x, ilist);
2523 }
2524
2525 /* If any copyin variable is passed by reference, we must ensure the
2526 master thread doesn't modify it before it is copied over in all
2527 threads. Similarly for variables in both firstprivate and
2528 lastprivate clauses we need to ensure the lastprivate copying
2529 happens after firstprivate copying in all threads. */
2530 if (copyin_by_ref || lastprivate_firstprivate)
2531 gimplify_and_add (build_omp_barrier (), ilist);
2532 }
2533
2534
2535 /* Generate code to implement the LASTPRIVATE clauses. This is used for
2536 both parallel and workshare constructs. PREDICATE may be NULL if it's
2537 always true. */
2538
2539 static void
2540 lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *stmt_list,
2541 omp_context *ctx)
2542 {
2543 tree x, c, label = NULL;
2544 bool par_clauses = false;
2545
2546 /* Early exit if there are no lastprivate clauses. */
2547 clauses = find_omp_clause (clauses, OMP_CLAUSE_LASTPRIVATE);
2548 if (clauses == NULL)
2549 {
2550 /* If this was a workshare clause, see if it had been combined
2551 with its parallel. In that case, look for the clauses on the
2552 parallel statement itself. */
2553 if (is_parallel_ctx (ctx))
2554 return;
2555
2556 ctx = ctx->outer;
2557 if (ctx == NULL || !is_parallel_ctx (ctx))
2558 return;
2559
2560 clauses = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2561 OMP_CLAUSE_LASTPRIVATE);
2562 if (clauses == NULL)
2563 return;
2564 par_clauses = true;
2565 }
2566
2567 if (predicate)
2568 {
2569 gimple stmt;
2570 tree label_true, arm1, arm2;
2571
2572 label = create_artificial_label (UNKNOWN_LOCATION);
2573 label_true = create_artificial_label (UNKNOWN_LOCATION);
2574 arm1 = TREE_OPERAND (predicate, 0);
2575 arm2 = TREE_OPERAND (predicate, 1);
2576 gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
2577 gimplify_expr (&arm2, stmt_list, NULL, is_gimple_val, fb_rvalue);
2578 stmt = gimple_build_cond (TREE_CODE (predicate), arm1, arm2,
2579 label_true, label);
2580 gimple_seq_add_stmt (stmt_list, stmt);
2581 gimple_seq_add_stmt (stmt_list, gimple_build_label (label_true));
2582 }
2583
2584 for (c = clauses; c ;)
2585 {
2586 tree var, new_var;
2587 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2588
2589 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
2590 {
2591 var = OMP_CLAUSE_DECL (c);
2592 new_var = lookup_decl (var, ctx);
2593
2594 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
2595 {
2596 lower_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
2597 gimple_seq_add_seq (stmt_list,
2598 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
2599 }
2600 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) = NULL;
2601
2602 x = build_outer_var_ref (var, ctx);
2603 if (is_reference (var))
2604 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2605 x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
2606 gimplify_and_add (x, stmt_list);
2607 }
2608 c = OMP_CLAUSE_CHAIN (c);
2609 if (c == NULL && !par_clauses)
2610 {
2611 /* If this was a workshare clause, see if it had been combined
2612 with its parallel. In that case, continue looking for the
2613 clauses also on the parallel statement itself. */
2614 if (is_parallel_ctx (ctx))
2615 break;
2616
2617 ctx = ctx->outer;
2618 if (ctx == NULL || !is_parallel_ctx (ctx))
2619 break;
2620
2621 c = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2622 OMP_CLAUSE_LASTPRIVATE);
2623 par_clauses = true;
2624 }
2625 }
2626
2627 if (label)
2628 gimple_seq_add_stmt (stmt_list, gimple_build_label (label));
2629 }
2630
2631
2632 /* Generate code to implement the REDUCTION clauses. */
2633
2634 static void
2635 lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
2636 {
2637 gimple_seq sub_seq = NULL;
2638 gimple stmt;
2639 tree x, c;
2640 int count = 0;
2641
2642 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
2643 update in that case, otherwise use a lock. */
2644 for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
2645 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
2646 {
2647 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2648 {
2649 /* Never use OMP_ATOMIC for array reductions. */
2650 count = -1;
2651 break;
2652 }
2653 count++;
2654 }
2655
2656 if (count == 0)
2657 return;
2658
2659 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2660 {
2661 tree var, ref, new_var;
2662 enum tree_code code;
2663 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2664
2665 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
2666 continue;
2667
2668 var = OMP_CLAUSE_DECL (c);
2669 new_var = lookup_decl (var, ctx);
2670 if (is_reference (var))
2671 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2672 ref = build_outer_var_ref (var, ctx);
2673 code = OMP_CLAUSE_REDUCTION_CODE (c);
2674
2675 /* reduction(-:var) sums up the partial results, so it acts
2676 identically to reduction(+:var). */
2677 if (code == MINUS_EXPR)
2678 code = PLUS_EXPR;
2679
2680 if (count == 1)
2681 {
2682 tree addr = build_fold_addr_expr_loc (clause_loc, ref);
2683
2684 addr = save_expr (addr);
2685 ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
2686 x = fold_build2_loc (clause_loc, code, TREE_TYPE (ref), ref, new_var);
2687 x = build2 (OMP_ATOMIC, void_type_node, addr, x);
2688 gimplify_and_add (x, stmt_seqp);
2689 return;
2690 }
2691
2692 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2693 {
2694 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2695
2696 if (is_reference (var))
2697 ref = build_fold_addr_expr_loc (clause_loc, ref);
2698 SET_DECL_VALUE_EXPR (placeholder, ref);
2699 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2700 lower_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
2701 gimple_seq_add_seq (&sub_seq, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
2702 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
2703 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
2704 }
2705 else
2706 {
2707 x = build2 (code, TREE_TYPE (ref), ref, new_var);
2708 ref = build_outer_var_ref (var, ctx);
2709 gimplify_assign (ref, x, &sub_seq);
2710 }
2711 }
2712
2713 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START),
2714 0);
2715 gimple_seq_add_stmt (stmt_seqp, stmt);
2716
2717 gimple_seq_add_seq (stmt_seqp, sub_seq);
2718
2719 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END),
2720 0);
2721 gimple_seq_add_stmt (stmt_seqp, stmt);
2722 }
2723
2724
2725 /* Generate code to implement the COPYPRIVATE clauses. */
2726
2727 static void
2728 lower_copyprivate_clauses (tree clauses, gimple_seq *slist, gimple_seq *rlist,
2729 omp_context *ctx)
2730 {
2731 tree c;
2732
2733 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2734 {
2735 tree var, new_var, ref, x;
2736 bool by_ref;
2737 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2738
2739 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
2740 continue;
2741
2742 var = OMP_CLAUSE_DECL (c);
2743 by_ref = use_pointer_for_field (var, NULL);
2744
2745 ref = build_sender_ref (var, ctx);
2746 x = new_var = lookup_decl_in_outer_ctx (var, ctx);
2747 if (by_ref)
2748 {
2749 x = build_fold_addr_expr_loc (clause_loc, new_var);
2750 x = fold_convert_loc (clause_loc, TREE_TYPE (ref), x);
2751 }
2752 gimplify_assign (ref, x, slist);
2753
2754 ref = build_receiver_ref (var, false, ctx);
2755 if (by_ref)
2756 {
2757 ref = fold_convert_loc (clause_loc,
2758 build_pointer_type (TREE_TYPE (new_var)),
2759 ref);
2760 ref = build_fold_indirect_ref_loc (clause_loc, ref);
2761 }
2762 if (is_reference (var))
2763 {
2764 ref = fold_convert_loc (clause_loc, TREE_TYPE (new_var), ref);
2765 ref = build_simple_mem_ref_loc (clause_loc, ref);
2766 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2767 }
2768 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, ref);
2769 gimplify_and_add (x, rlist);
2770 }
2771 }
2772
2773
2774 /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
2775 and REDUCTION from the sender (aka parent) side. */
2776
2777 static void
2778 lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
2779 omp_context *ctx)
2780 {
2781 tree c;
2782
2783 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2784 {
2785 tree val, ref, x, var;
2786 bool by_ref, do_in = false, do_out = false;
2787 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2788
2789 switch (OMP_CLAUSE_CODE (c))
2790 {
2791 case OMP_CLAUSE_PRIVATE:
2792 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2793 break;
2794 continue;
2795 case OMP_CLAUSE_FIRSTPRIVATE:
2796 case OMP_CLAUSE_COPYIN:
2797 case OMP_CLAUSE_LASTPRIVATE:
2798 case OMP_CLAUSE_REDUCTION:
2799 break;
2800 default:
2801 continue;
2802 }
2803
2804 val = OMP_CLAUSE_DECL (c);
2805 var = lookup_decl_in_outer_ctx (val, ctx);
2806
2807 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
2808 && is_global_var (var))
2809 continue;
2810 if (is_variable_sized (val))
2811 continue;
2812 by_ref = use_pointer_for_field (val, NULL);
2813
2814 switch (OMP_CLAUSE_CODE (c))
2815 {
2816 case OMP_CLAUSE_PRIVATE:
2817 case OMP_CLAUSE_FIRSTPRIVATE:
2818 case OMP_CLAUSE_COPYIN:
2819 do_in = true;
2820 break;
2821
2822 case OMP_CLAUSE_LASTPRIVATE:
2823 if (by_ref || is_reference (val))
2824 {
2825 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2826 continue;
2827 do_in = true;
2828 }
2829 else
2830 {
2831 do_out = true;
2832 if (lang_hooks.decls.omp_private_outer_ref (val))
2833 do_in = true;
2834 }
2835 break;
2836
2837 case OMP_CLAUSE_REDUCTION:
2838 do_in = true;
2839 do_out = !(by_ref || is_reference (val));
2840 break;
2841
2842 default:
2843 gcc_unreachable ();
2844 }
2845
2846 if (do_in)
2847 {
2848 ref = build_sender_ref (val, ctx);
2849 x = by_ref ? build_fold_addr_expr_loc (clause_loc, var) : var;
2850 gimplify_assign (ref, x, ilist);
2851 if (is_task_ctx (ctx))
2852 DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL;
2853 }
2854
2855 if (do_out)
2856 {
2857 ref = build_sender_ref (val, ctx);
2858 gimplify_assign (var, ref, olist);
2859 }
2860 }
2861 }
2862
2863 /* Generate code to implement SHARED from the sender (aka parent)
2864 side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
2865 list things that got automatically shared. */
2866
2867 static void
2868 lower_send_shared_vars (gimple_seq *ilist, gimple_seq *olist, omp_context *ctx)
2869 {
2870 tree var, ovar, nvar, f, x, record_type;
2871
2872 if (ctx->record_type == NULL)
2873 return;
2874
2875 record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type;
2876 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
2877 {
2878 ovar = DECL_ABSTRACT_ORIGIN (f);
2879 nvar = maybe_lookup_decl (ovar, ctx);
2880 if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
2881 continue;
2882
2883 /* If CTX is a nested parallel directive. Find the immediately
2884 enclosing parallel or workshare construct that contains a
2885 mapping for OVAR. */
2886 var = lookup_decl_in_outer_ctx (ovar, ctx);
2887
2888 if (use_pointer_for_field (ovar, ctx))
2889 {
2890 x = build_sender_ref (ovar, ctx);
2891 var = build_fold_addr_expr (var);
2892 gimplify_assign (x, var, ilist);
2893 }
2894 else
2895 {
2896 x = build_sender_ref (ovar, ctx);
2897 gimplify_assign (x, var, ilist);
2898
2899 if (!TREE_READONLY (var)
2900 /* We don't need to receive a new reference to a result
2901 or parm decl. In fact we may not store to it as we will
2902 invalidate any pending RSO and generate wrong gimple
2903 during inlining. */
2904 && !((TREE_CODE (var) == RESULT_DECL
2905 || TREE_CODE (var) == PARM_DECL)
2906 && DECL_BY_REFERENCE (var)))
2907 {
2908 x = build_sender_ref (ovar, ctx);
2909 gimplify_assign (var, x, olist);
2910 }
2911 }
2912 }
2913 }
2914
2915
2916 /* A convenience function to build an empty GIMPLE_COND with just the
2917 condition. */
2918
2919 static gimple
2920 gimple_build_cond_empty (tree cond)
2921 {
2922 enum tree_code pred_code;
2923 tree lhs, rhs;
2924
2925 gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs);
2926 return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE);
2927 }
2928
2929
2930 /* Build the function calls to GOMP_parallel_start etc to actually
2931 generate the parallel operation. REGION is the parallel region
2932 being expanded. BB is the block where to insert the code. WS_ARGS
2933 will be set if this is a call to a combined parallel+workshare
2934 construct, it contains the list of additional arguments needed by
2935 the workshare construct. */
2936
2937 static void
2938 expand_parallel_call (struct omp_region *region, basic_block bb,
2939 gimple entry_stmt, vec<tree, va_gc> *ws_args)
2940 {
2941 tree t, t1, t2, val, cond, c, clauses;
2942 gimple_stmt_iterator gsi;
2943 gimple stmt;
2944 enum built_in_function start_ix;
2945 int start_ix2;
2946 location_t clause_loc;
2947 vec<tree, va_gc> *args;
2948
2949 clauses = gimple_omp_parallel_clauses (entry_stmt);
2950
2951 /* Determine what flavor of GOMP_parallel_start we will be
2952 emitting. */
2953 start_ix = BUILT_IN_GOMP_PARALLEL_START;
2954 if (is_combined_parallel (region))
2955 {
2956 switch (region->inner->type)
2957 {
2958 case GIMPLE_OMP_FOR:
2959 gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
2960 start_ix2 = ((int)BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START
2961 + (region->inner->sched_kind
2962 == OMP_CLAUSE_SCHEDULE_RUNTIME
2963 ? 3 : region->inner->sched_kind));
2964 start_ix = (enum built_in_function)start_ix2;
2965 break;
2966 case GIMPLE_OMP_SECTIONS:
2967 start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS_START;
2968 break;
2969 default:
2970 gcc_unreachable ();
2971 }
2972 }
2973
2974 /* By default, the value of NUM_THREADS is zero (selected at run time)
2975 and there is no conditional. */
2976 cond = NULL_TREE;
2977 val = build_int_cst (unsigned_type_node, 0);
2978
2979 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
2980 if (c)
2981 cond = OMP_CLAUSE_IF_EXPR (c);
2982
2983 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
2984 if (c)
2985 {
2986 val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
2987 clause_loc = OMP_CLAUSE_LOCATION (c);
2988 }
2989 else
2990 clause_loc = gimple_location (entry_stmt);
2991
2992 /* Ensure 'val' is of the correct type. */
2993 val = fold_convert_loc (clause_loc, unsigned_type_node, val);
2994
2995 /* If we found the clause 'if (cond)', build either
2996 (cond != 0) or (cond ? val : 1u). */
2997 if (cond)
2998 {
2999 gimple_stmt_iterator gsi;
3000
3001 cond = gimple_boolify (cond);
3002
3003 if (integer_zerop (val))
3004 val = fold_build2_loc (clause_loc,
3005 EQ_EXPR, unsigned_type_node, cond,
3006 build_int_cst (TREE_TYPE (cond), 0));
3007 else
3008 {
3009 basic_block cond_bb, then_bb, else_bb;
3010 edge e, e_then, e_else;
3011 tree tmp_then, tmp_else, tmp_join, tmp_var;
3012
3013 tmp_var = create_tmp_var (TREE_TYPE (val), NULL);
3014 if (gimple_in_ssa_p (cfun))
3015 {
3016 tmp_then = make_ssa_name (tmp_var, NULL);
3017 tmp_else = make_ssa_name (tmp_var, NULL);
3018 tmp_join = make_ssa_name (tmp_var, NULL);
3019 }
3020 else
3021 {
3022 tmp_then = tmp_var;
3023 tmp_else = tmp_var;
3024 tmp_join = tmp_var;
3025 }
3026
3027 e = split_block (bb, NULL);
3028 cond_bb = e->src;
3029 bb = e->dest;
3030 remove_edge (e);
3031
3032 then_bb = create_empty_bb (cond_bb);
3033 else_bb = create_empty_bb (then_bb);
3034 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
3035 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
3036
3037 stmt = gimple_build_cond_empty (cond);
3038 gsi = gsi_start_bb (cond_bb);
3039 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3040
3041 gsi = gsi_start_bb (then_bb);
3042 stmt = gimple_build_assign (tmp_then, val);
3043 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3044
3045 gsi = gsi_start_bb (else_bb);
3046 stmt = gimple_build_assign
3047 (tmp_else, build_int_cst (unsigned_type_node, 1));
3048 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3049
3050 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
3051 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
3052 e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
3053 e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
3054
3055 if (gimple_in_ssa_p (cfun))
3056 {
3057 gimple phi = create_phi_node (tmp_join, bb);
3058 add_phi_arg (phi, tmp_then, e_then, UNKNOWN_LOCATION);
3059 add_phi_arg (phi, tmp_else, e_else, UNKNOWN_LOCATION);
3060 }
3061
3062 val = tmp_join;
3063 }
3064
3065 gsi = gsi_start_bb (bb);
3066 val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE,
3067 false, GSI_CONTINUE_LINKING);
3068 }
3069
3070 gsi = gsi_last_bb (bb);
3071 t = gimple_omp_parallel_data_arg (entry_stmt);
3072 if (t == NULL)
3073 t1 = null_pointer_node;
3074 else
3075 t1 = build_fold_addr_expr (t);
3076 t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
3077
3078 vec_alloc (args, 3 + vec_safe_length (ws_args));
3079 args->quick_push (t2);
3080 args->quick_push (t1);
3081 args->quick_push (val);
3082 if (ws_args)
3083 args->splice (*ws_args);
3084
3085 t = build_call_expr_loc_vec (UNKNOWN_LOCATION,
3086 builtin_decl_explicit (start_ix), args);
3087
3088 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3089 false, GSI_CONTINUE_LINKING);
3090
3091 t = gimple_omp_parallel_data_arg (entry_stmt);
3092 if (t == NULL)
3093 t = null_pointer_node;
3094 else
3095 t = build_fold_addr_expr (t);
3096 t = build_call_expr_loc (gimple_location (entry_stmt),
3097 gimple_omp_parallel_child_fn (entry_stmt), 1, t);
3098 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3099 false, GSI_CONTINUE_LINKING);
3100
3101 t = build_call_expr_loc (gimple_location (entry_stmt),
3102 builtin_decl_explicit (BUILT_IN_GOMP_PARALLEL_END),
3103 0);
3104 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3105 false, GSI_CONTINUE_LINKING);
3106 }
3107
3108
3109 /* Build the function call to GOMP_task to actually
3110 generate the task operation. BB is the block where to insert the code. */
3111
3112 static void
3113 expand_task_call (basic_block bb, gimple entry_stmt)
3114 {
3115 tree t, t1, t2, t3, flags, cond, c, c2, clauses;
3116 gimple_stmt_iterator gsi;
3117 location_t loc = gimple_location (entry_stmt);
3118
3119 clauses = gimple_omp_task_clauses (entry_stmt);
3120
3121 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
3122 if (c)
3123 cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (c));
3124 else
3125 cond = boolean_true_node;
3126
3127 c = find_omp_clause (clauses, OMP_CLAUSE_UNTIED);
3128 c2 = find_omp_clause (clauses, OMP_CLAUSE_MERGEABLE);
3129 flags = build_int_cst (unsigned_type_node,
3130 (c ? 1 : 0) + (c2 ? 4 : 0));
3131
3132 c = find_omp_clause (clauses, OMP_CLAUSE_FINAL);
3133 if (c)
3134 {
3135 c = gimple_boolify (OMP_CLAUSE_FINAL_EXPR (c));
3136 c = fold_build3_loc (loc, COND_EXPR, unsigned_type_node, c,
3137 build_int_cst (unsigned_type_node, 2),
3138 build_int_cst (unsigned_type_node, 0));
3139 flags = fold_build2_loc (loc, PLUS_EXPR, unsigned_type_node, flags, c);
3140 }
3141
3142 gsi = gsi_last_bb (bb);
3143 t = gimple_omp_task_data_arg (entry_stmt);
3144 if (t == NULL)
3145 t2 = null_pointer_node;
3146 else
3147 t2 = build_fold_addr_expr_loc (loc, t);
3148 t1 = build_fold_addr_expr_loc (loc, gimple_omp_task_child_fn (entry_stmt));
3149 t = gimple_omp_task_copy_fn (entry_stmt);
3150 if (t == NULL)
3151 t3 = null_pointer_node;
3152 else
3153 t3 = build_fold_addr_expr_loc (loc, t);
3154
3155 t = build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_TASK),
3156 7, t1, t2, t3,
3157 gimple_omp_task_arg_size (entry_stmt),
3158 gimple_omp_task_arg_align (entry_stmt), cond, flags);
3159
3160 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3161 false, GSI_CONTINUE_LINKING);
3162 }
3163
3164
3165 /* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
3166 catch handler and return it. This prevents programs from violating the
3167 structured block semantics with throws. */
3168
3169 static gimple_seq
3170 maybe_catch_exception (gimple_seq body)
3171 {
3172 gimple g;
3173 tree decl;
3174
3175 if (!flag_exceptions)
3176 return body;
3177
3178 if (lang_hooks.eh_protect_cleanup_actions != NULL)
3179 decl = lang_hooks.eh_protect_cleanup_actions ();
3180 else
3181 decl = builtin_decl_explicit (BUILT_IN_TRAP);
3182
3183 g = gimple_build_eh_must_not_throw (decl);
3184 g = gimple_build_try (body, gimple_seq_alloc_with_stmt (g),
3185 GIMPLE_TRY_CATCH);
3186
3187 return gimple_seq_alloc_with_stmt (g);
3188 }
3189
3190 /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
3191
3192 static tree
3193 vec2chain (vec<tree, va_gc> *v)
3194 {
3195 tree chain = NULL_TREE, t;
3196 unsigned ix;
3197
3198 FOR_EACH_VEC_SAFE_ELT_REVERSE (v, ix, t)
3199 {
3200 DECL_CHAIN (t) = chain;
3201 chain = t;
3202 }
3203
3204 return chain;
3205 }
3206
3207
3208 /* Remove barriers in REGION->EXIT's block. Note that this is only
3209 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
3210 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
3211 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
3212 removed. */
3213
3214 static void
3215 remove_exit_barrier (struct omp_region *region)
3216 {
3217 gimple_stmt_iterator gsi;
3218 basic_block exit_bb;
3219 edge_iterator ei;
3220 edge e;
3221 gimple stmt;
3222 int any_addressable_vars = -1;
3223
3224 exit_bb = region->exit;
3225
3226 /* If the parallel region doesn't return, we don't have REGION->EXIT
3227 block at all. */
3228 if (! exit_bb)
3229 return;
3230
3231 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
3232 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
3233 statements that can appear in between are extremely limited -- no
3234 memory operations at all. Here, we allow nothing at all, so the
3235 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
3236 gsi = gsi_last_bb (exit_bb);
3237 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3238 gsi_prev (&gsi);
3239 if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
3240 return;
3241
3242 FOR_EACH_EDGE (e, ei, exit_bb->preds)
3243 {
3244 gsi = gsi_last_bb (e->src);
3245 if (gsi_end_p (gsi))
3246 continue;
3247 stmt = gsi_stmt (gsi);
3248 if (gimple_code (stmt) == GIMPLE_OMP_RETURN
3249 && !gimple_omp_return_nowait_p (stmt))
3250 {
3251 /* OpenMP 3.0 tasks unfortunately prevent this optimization
3252 in many cases. If there could be tasks queued, the barrier
3253 might be needed to let the tasks run before some local
3254 variable of the parallel that the task uses as shared
3255 runs out of scope. The task can be spawned either
3256 from within current function (this would be easy to check)
3257 or from some function it calls and gets passed an address
3258 of such a variable. */
3259 if (any_addressable_vars < 0)
3260 {
3261 gimple parallel_stmt = last_stmt (region->entry);
3262 tree child_fun = gimple_omp_parallel_child_fn (parallel_stmt);
3263 tree local_decls, block, decl;
3264 unsigned ix;
3265
3266 any_addressable_vars = 0;
3267 FOR_EACH_LOCAL_DECL (DECL_STRUCT_FUNCTION (child_fun), ix, decl)
3268 if (TREE_ADDRESSABLE (decl))
3269 {
3270 any_addressable_vars = 1;
3271 break;
3272 }
3273 for (block = gimple_block (stmt);
3274 !any_addressable_vars
3275 && block
3276 && TREE_CODE (block) == BLOCK;
3277 block = BLOCK_SUPERCONTEXT (block))
3278 {
3279 for (local_decls = BLOCK_VARS (block);
3280 local_decls;
3281 local_decls = DECL_CHAIN (local_decls))
3282 if (TREE_ADDRESSABLE (local_decls))
3283 {
3284 any_addressable_vars = 1;
3285 break;
3286 }
3287 if (block == gimple_block (parallel_stmt))
3288 break;
3289 }
3290 }
3291 if (!any_addressable_vars)
3292 gimple_omp_return_set_nowait (stmt);
3293 }
3294 }
3295 }
3296
3297 static void
3298 remove_exit_barriers (struct omp_region *region)
3299 {
3300 if (region->type == GIMPLE_OMP_PARALLEL)
3301 remove_exit_barrier (region);
3302
3303 if (region->inner)
3304 {
3305 region = region->inner;
3306 remove_exit_barriers (region);
3307 while (region->next)
3308 {
3309 region = region->next;
3310 remove_exit_barriers (region);
3311 }
3312 }
3313 }
3314
3315 /* Optimize omp_get_thread_num () and omp_get_num_threads ()
3316 calls. These can't be declared as const functions, but
3317 within one parallel body they are constant, so they can be
3318 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
3319 which are declared const. Similarly for task body, except
3320 that in untied task omp_get_thread_num () can change at any task
3321 scheduling point. */
3322
3323 static void
3324 optimize_omp_library_calls (gimple entry_stmt)
3325 {
3326 basic_block bb;
3327 gimple_stmt_iterator gsi;
3328 tree thr_num_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
3329 tree thr_num_id = DECL_ASSEMBLER_NAME (thr_num_tree);
3330 tree num_thr_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
3331 tree num_thr_id = DECL_ASSEMBLER_NAME (num_thr_tree);
3332 bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK
3333 && find_omp_clause (gimple_omp_task_clauses (entry_stmt),
3334 OMP_CLAUSE_UNTIED) != NULL);
3335
3336 FOR_EACH_BB (bb)
3337 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3338 {
3339 gimple call = gsi_stmt (gsi);
3340 tree decl;
3341
3342 if (is_gimple_call (call)
3343 && (decl = gimple_call_fndecl (call))
3344 && DECL_EXTERNAL (decl)
3345 && TREE_PUBLIC (decl)
3346 && DECL_INITIAL (decl) == NULL)
3347 {
3348 tree built_in;
3349
3350 if (DECL_NAME (decl) == thr_num_id)
3351 {
3352 /* In #pragma omp task untied omp_get_thread_num () can change
3353 during the execution of the task region. */
3354 if (untied_task)
3355 continue;
3356 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
3357 }
3358 else if (DECL_NAME (decl) == num_thr_id)
3359 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
3360 else
3361 continue;
3362
3363 if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
3364 || gimple_call_num_args (call) != 0)
3365 continue;
3366
3367 if (flag_exceptions && !TREE_NOTHROW (decl))
3368 continue;
3369
3370 if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
3371 || !types_compatible_p (TREE_TYPE (TREE_TYPE (decl)),
3372 TREE_TYPE (TREE_TYPE (built_in))))
3373 continue;
3374
3375 gimple_call_set_fndecl (call, built_in);
3376 }
3377 }
3378 }
3379
3380 /* Expand the OpenMP parallel or task directive starting at REGION. */
3381
3382 static void
3383 expand_omp_taskreg (struct omp_region *region)
3384 {
3385 basic_block entry_bb, exit_bb, new_bb;
3386 struct function *child_cfun;
3387 tree child_fn, block, t;
3388 gimple_stmt_iterator gsi;
3389 gimple entry_stmt, stmt;
3390 edge e;
3391 vec<tree, va_gc> *ws_args;
3392
3393 entry_stmt = last_stmt (region->entry);
3394 child_fn = gimple_omp_taskreg_child_fn (entry_stmt);
3395 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
3396
3397 entry_bb = region->entry;
3398 exit_bb = region->exit;
3399
3400 if (is_combined_parallel (region))
3401 ws_args = region->ws_args;
3402 else
3403 ws_args = NULL;
3404
3405 if (child_cfun->cfg)
3406 {
3407 /* Due to inlining, it may happen that we have already outlined
3408 the region, in which case all we need to do is make the
3409 sub-graph unreachable and emit the parallel call. */
3410 edge entry_succ_e, exit_succ_e;
3411 gimple_stmt_iterator gsi;
3412
3413 entry_succ_e = single_succ_edge (entry_bb);
3414
3415 gsi = gsi_last_bb (entry_bb);
3416 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL
3417 || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK);
3418 gsi_remove (&gsi, true);
3419
3420 new_bb = entry_bb;
3421 if (exit_bb)
3422 {
3423 exit_succ_e = single_succ_edge (exit_bb);
3424 make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
3425 }
3426 remove_edge_and_dominated_blocks (entry_succ_e);
3427 }
3428 else
3429 {
3430 unsigned srcidx, dstidx, num;
3431
3432 /* If the parallel region needs data sent from the parent
3433 function, then the very first statement (except possible
3434 tree profile counter updates) of the parallel body
3435 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
3436 &.OMP_DATA_O is passed as an argument to the child function,
3437 we need to replace it with the argument as seen by the child
3438 function.
3439
3440 In most cases, this will end up being the identity assignment
3441 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
3442 a function call that has been inlined, the original PARM_DECL
3443 .OMP_DATA_I may have been converted into a different local
3444 variable. In which case, we need to keep the assignment. */
3445 if (gimple_omp_taskreg_data_arg (entry_stmt))
3446 {
3447 basic_block entry_succ_bb = single_succ (entry_bb);
3448 gimple_stmt_iterator gsi;
3449 tree arg, narg;
3450 gimple parcopy_stmt = NULL;
3451
3452 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
3453 {
3454 gimple stmt;
3455
3456 gcc_assert (!gsi_end_p (gsi));
3457 stmt = gsi_stmt (gsi);
3458 if (gimple_code (stmt) != GIMPLE_ASSIGN)
3459 continue;
3460
3461 if (gimple_num_ops (stmt) == 2)
3462 {
3463 tree arg = gimple_assign_rhs1 (stmt);
3464
3465 /* We're ignore the subcode because we're
3466 effectively doing a STRIP_NOPS. */
3467
3468 if (TREE_CODE (arg) == ADDR_EXPR
3469 && TREE_OPERAND (arg, 0)
3470 == gimple_omp_taskreg_data_arg (entry_stmt))
3471 {
3472 parcopy_stmt = stmt;
3473 break;
3474 }
3475 }
3476 }
3477
3478 gcc_assert (parcopy_stmt != NULL);
3479 arg = DECL_ARGUMENTS (child_fn);
3480
3481 if (!gimple_in_ssa_p (cfun))
3482 {
3483 if (gimple_assign_lhs (parcopy_stmt) == arg)
3484 gsi_remove (&gsi, true);
3485 else
3486 {
3487 /* ?? Is setting the subcode really necessary ?? */
3488 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg));
3489 gimple_assign_set_rhs1 (parcopy_stmt, arg);
3490 }
3491 }
3492 else
3493 {
3494 /* If we are in ssa form, we must load the value from the default
3495 definition of the argument. That should not be defined now,
3496 since the argument is not used uninitialized. */
3497 gcc_assert (ssa_default_def (cfun, arg) == NULL);
3498 narg = make_ssa_name (arg, gimple_build_nop ());
3499 set_ssa_default_def (cfun, arg, narg);
3500 /* ?? Is setting the subcode really necessary ?? */
3501 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (narg));
3502 gimple_assign_set_rhs1 (parcopy_stmt, narg);
3503 update_stmt (parcopy_stmt);
3504 }
3505 }
3506
3507 /* Declare local variables needed in CHILD_CFUN. */
3508 block = DECL_INITIAL (child_fn);
3509 BLOCK_VARS (block) = vec2chain (child_cfun->local_decls);
3510 /* The gimplifier could record temporaries in parallel/task block
3511 rather than in containing function's local_decls chain,
3512 which would mean cgraph missed finalizing them. Do it now. */
3513 for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t))
3514 if (TREE_CODE (t) == VAR_DECL
3515 && TREE_STATIC (t)
3516 && !DECL_EXTERNAL (t))
3517 varpool_finalize_decl (t);
3518 DECL_SAVED_TREE (child_fn) = NULL;
3519 /* We'll create a CFG for child_fn, so no gimple body is needed. */
3520 gimple_set_body (child_fn, NULL);
3521 TREE_USED (block) = 1;
3522
3523 /* Reset DECL_CONTEXT on function arguments. */
3524 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
3525 DECL_CONTEXT (t) = child_fn;
3526
3527 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
3528 so that it can be moved to the child function. */
3529 gsi = gsi_last_bb (entry_bb);
3530 stmt = gsi_stmt (gsi);
3531 gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
3532 || gimple_code (stmt) == GIMPLE_OMP_TASK));
3533 gsi_remove (&gsi, true);
3534 e = split_block (entry_bb, stmt);
3535 entry_bb = e->dest;
3536 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
3537
3538 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
3539 if (exit_bb)
3540 {
3541 gsi = gsi_last_bb (exit_bb);
3542 gcc_assert (!gsi_end_p (gsi)
3543 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3544 stmt = gimple_build_return (NULL);
3545 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
3546 gsi_remove (&gsi, true);
3547 }
3548
3549 /* Move the parallel region into CHILD_CFUN. */
3550
3551 if (gimple_in_ssa_p (cfun))
3552 {
3553 init_tree_ssa (child_cfun);
3554 init_ssa_operands (child_cfun);
3555 child_cfun->gimple_df->in_ssa_p = true;
3556 block = NULL_TREE;
3557 }
3558 else
3559 block = gimple_block (entry_stmt);
3560
3561 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
3562 if (exit_bb)
3563 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
3564
3565 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
3566 num = vec_safe_length (child_cfun->local_decls);
3567 for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
3568 {
3569 t = (*child_cfun->local_decls)[srcidx];
3570 if (DECL_CONTEXT (t) == cfun->decl)
3571 continue;
3572 if (srcidx != dstidx)
3573 (*child_cfun->local_decls)[dstidx] = t;
3574 dstidx++;
3575 }
3576 if (dstidx != num)
3577 vec_safe_truncate (child_cfun->local_decls, dstidx);
3578
3579 /* Inform the callgraph about the new function. */
3580 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
3581 = cfun->curr_properties & ~PROP_loops;
3582 cgraph_add_new_function (child_fn, true);
3583
3584 /* Fix the callgraph edges for child_cfun. Those for cfun will be
3585 fixed in a following pass. */
3586 push_cfun (child_cfun);
3587 if (optimize)
3588 optimize_omp_library_calls (entry_stmt);
3589 rebuild_cgraph_edges ();
3590
3591 /* Some EH regions might become dead, see PR34608. If
3592 pass_cleanup_cfg isn't the first pass to happen with the
3593 new child, these dead EH edges might cause problems.
3594 Clean them up now. */
3595 if (flag_exceptions)
3596 {
3597 basic_block bb;
3598 bool changed = false;
3599
3600 FOR_EACH_BB (bb)
3601 changed |= gimple_purge_dead_eh_edges (bb);
3602 if (changed)
3603 cleanup_tree_cfg ();
3604 }
3605 if (gimple_in_ssa_p (cfun))
3606 update_ssa (TODO_update_ssa);
3607 pop_cfun ();
3608 }
3609
3610 /* Emit a library call to launch the children threads. */
3611 if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
3612 expand_parallel_call (region, new_bb, entry_stmt, ws_args);
3613 else
3614 expand_task_call (new_bb, entry_stmt);
3615 if (gimple_in_ssa_p (cfun))
3616 update_ssa (TODO_update_ssa_only_virtuals);
3617 }
3618
3619
3620 /* A subroutine of expand_omp_for. Generate code for a parallel
3621 loop with any schedule. Given parameters:
3622
3623 for (V = N1; V cond N2; V += STEP) BODY;
3624
3625 where COND is "<" or ">", we generate pseudocode
3626
3627 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
3628 if (more) goto L0; else goto L3;
3629 L0:
3630 V = istart0;
3631 iend = iend0;
3632 L1:
3633 BODY;
3634 V += STEP;
3635 if (V cond iend) goto L1; else goto L2;
3636 L2:
3637 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3638 L3:
3639
3640 If this is a combined omp parallel loop, instead of the call to
3641 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
3642
3643 For collapsed loops, given parameters:
3644 collapse(3)
3645 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
3646 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
3647 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
3648 BODY;
3649
3650 we generate pseudocode
3651
3652 if (cond3 is <)
3653 adj = STEP3 - 1;
3654 else
3655 adj = STEP3 + 1;
3656 count3 = (adj + N32 - N31) / STEP3;
3657 if (cond2 is <)
3658 adj = STEP2 - 1;
3659 else
3660 adj = STEP2 + 1;
3661 count2 = (adj + N22 - N21) / STEP2;
3662 if (cond1 is <)
3663 adj = STEP1 - 1;
3664 else
3665 adj = STEP1 + 1;
3666 count1 = (adj + N12 - N11) / STEP1;
3667 count = count1 * count2 * count3;
3668 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
3669 if (more) goto L0; else goto L3;
3670 L0:
3671 V = istart0;
3672 T = V;
3673 V3 = N31 + (T % count3) * STEP3;
3674 T = T / count3;
3675 V2 = N21 + (T % count2) * STEP2;
3676 T = T / count2;
3677 V1 = N11 + T * STEP1;
3678 iend = iend0;
3679 L1:
3680 BODY;
3681 V += 1;
3682 if (V < iend) goto L10; else goto L2;
3683 L10:
3684 V3 += STEP3;
3685 if (V3 cond3 N32) goto L1; else goto L11;
3686 L11:
3687 V3 = N31;
3688 V2 += STEP2;
3689 if (V2 cond2 N22) goto L1; else goto L12;
3690 L12:
3691 V2 = N21;
3692 V1 += STEP1;
3693 goto L1;
3694 L2:
3695 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3696 L3:
3697
3698 */
3699
3700 static void
3701 expand_omp_for_generic (struct omp_region *region,
3702 struct omp_for_data *fd,
3703 enum built_in_function start_fn,
3704 enum built_in_function next_fn)
3705 {
3706 tree type, istart0, iend0, iend;
3707 tree t, vmain, vback, bias = NULL_TREE;
3708 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb;
3709 basic_block l2_bb = NULL, l3_bb = NULL;
3710 gimple_stmt_iterator gsi;
3711 gimple stmt;
3712 bool in_combined_parallel = is_combined_parallel (region);
3713 bool broken_loop = region->cont == NULL;
3714 edge e, ne;
3715 tree *counts = NULL;
3716 int i;
3717
3718 gcc_assert (!broken_loop || !in_combined_parallel);
3719 gcc_assert (fd->iter_type == long_integer_type_node
3720 || !in_combined_parallel);
3721
3722 type = TREE_TYPE (fd->loop.v);
3723 istart0 = create_tmp_var (fd->iter_type, ".istart0");
3724 iend0 = create_tmp_var (fd->iter_type, ".iend0");
3725 TREE_ADDRESSABLE (istart0) = 1;
3726 TREE_ADDRESSABLE (iend0) = 1;
3727
3728 /* See if we need to bias by LLONG_MIN. */
3729 if (fd->iter_type == long_long_unsigned_type_node
3730 && TREE_CODE (type) == INTEGER_TYPE
3731 && !TYPE_UNSIGNED (type))
3732 {
3733 tree n1, n2;
3734
3735 if (fd->loop.cond_code == LT_EXPR)
3736 {
3737 n1 = fd->loop.n1;
3738 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
3739 }
3740 else
3741 {
3742 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
3743 n2 = fd->loop.n1;
3744 }
3745 if (TREE_CODE (n1) != INTEGER_CST
3746 || TREE_CODE (n2) != INTEGER_CST
3747 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
3748 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
3749 }
3750
3751 entry_bb = region->entry;
3752 cont_bb = region->cont;
3753 collapse_bb = NULL;
3754 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
3755 gcc_assert (broken_loop
3756 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
3757 l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
3758 l1_bb = single_succ (l0_bb);
3759 if (!broken_loop)
3760 {
3761 l2_bb = create_empty_bb (cont_bb);
3762 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb);
3763 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
3764 }
3765 else
3766 l2_bb = NULL;
3767 l3_bb = BRANCH_EDGE (entry_bb)->dest;
3768 exit_bb = region->exit;
3769
3770 gsi = gsi_last_bb (entry_bb);
3771
3772 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
3773 if (fd->collapse > 1)
3774 {
3775 /* collapsed loops need work for expansion in SSA form. */
3776 gcc_assert (!gimple_in_ssa_p (cfun));
3777 counts = (tree *) alloca (fd->collapse * sizeof (tree));
3778 for (i = 0; i < fd->collapse; i++)
3779 {
3780 tree itype = TREE_TYPE (fd->loops[i].v);
3781
3782 if (POINTER_TYPE_P (itype))
3783 itype = signed_type_for (itype);
3784 t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR
3785 ? -1 : 1));
3786 t = fold_build2 (PLUS_EXPR, itype,
3787 fold_convert (itype, fd->loops[i].step), t);
3788 t = fold_build2 (PLUS_EXPR, itype, t,
3789 fold_convert (itype, fd->loops[i].n2));
3790 t = fold_build2 (MINUS_EXPR, itype, t,
3791 fold_convert (itype, fd->loops[i].n1));
3792 if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
3793 t = fold_build2 (TRUNC_DIV_EXPR, itype,
3794 fold_build1 (NEGATE_EXPR, itype, t),
3795 fold_build1 (NEGATE_EXPR, itype,
3796 fold_convert (itype,
3797 fd->loops[i].step)));
3798 else
3799 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
3800 fold_convert (itype, fd->loops[i].step));
3801 t = fold_convert (type, t);
3802 if (TREE_CODE (t) == INTEGER_CST)
3803 counts[i] = t;
3804 else
3805 {
3806 counts[i] = create_tmp_reg (type, ".count");
3807 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3808 true, GSI_SAME_STMT);
3809 stmt = gimple_build_assign (counts[i], t);
3810 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3811 }
3812 if (SSA_VAR_P (fd->loop.n2))
3813 {
3814 if (i == 0)
3815 t = counts[0];
3816 else
3817 {
3818 t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]);
3819 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3820 true, GSI_SAME_STMT);
3821 }
3822 stmt = gimple_build_assign (fd->loop.n2, t);
3823 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3824 }
3825 }
3826 }
3827 if (in_combined_parallel)
3828 {
3829 /* In a combined parallel loop, emit a call to
3830 GOMP_loop_foo_next. */
3831 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
3832 build_fold_addr_expr (istart0),
3833 build_fold_addr_expr (iend0));
3834 }
3835 else
3836 {
3837 tree t0, t1, t2, t3, t4;
3838 /* If this is not a combined parallel loop, emit a call to
3839 GOMP_loop_foo_start in ENTRY_BB. */
3840 t4 = build_fold_addr_expr (iend0);
3841 t3 = build_fold_addr_expr (istart0);
3842 t2 = fold_convert (fd->iter_type, fd->loop.step);
3843 if (POINTER_TYPE_P (type)
3844 && TYPE_PRECISION (type) != TYPE_PRECISION (fd->iter_type))
3845 {
3846 /* Avoid casting pointers to integer of a different size. */
3847 tree itype = signed_type_for (type);
3848 t1 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n2));
3849 t0 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n1));
3850 }
3851 else
3852 {
3853 t1 = fold_convert (fd->iter_type, fd->loop.n2);
3854 t0 = fold_convert (fd->iter_type, fd->loop.n1);
3855 }
3856 if (bias)
3857 {
3858 t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
3859 t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
3860 }
3861 if (fd->iter_type == long_integer_type_node)
3862 {
3863 if (fd->chunk_size)
3864 {
3865 t = fold_convert (fd->iter_type, fd->chunk_size);
3866 t = build_call_expr (builtin_decl_explicit (start_fn),
3867 6, t0, t1, t2, t, t3, t4);
3868 }
3869 else
3870 t = build_call_expr (builtin_decl_explicit (start_fn),
3871 5, t0, t1, t2, t3, t4);
3872 }
3873 else
3874 {
3875 tree t5;
3876 tree c_bool_type;
3877 tree bfn_decl;
3878
3879 /* The GOMP_loop_ull_*start functions have additional boolean
3880 argument, true for < loops and false for > loops.
3881 In Fortran, the C bool type can be different from
3882 boolean_type_node. */
3883 bfn_decl = builtin_decl_explicit (start_fn);
3884 c_bool_type = TREE_TYPE (TREE_TYPE (bfn_decl));
3885 t5 = build_int_cst (c_bool_type,
3886 fd->loop.cond_code == LT_EXPR ? 1 : 0);
3887 if (fd->chunk_size)
3888 {
3889 tree bfn_decl = builtin_decl_explicit (start_fn);
3890 t = fold_convert (fd->iter_type, fd->chunk_size);
3891 t = build_call_expr (bfn_decl, 7, t5, t0, t1, t2, t, t3, t4);
3892 }
3893 else
3894 t = build_call_expr (builtin_decl_explicit (start_fn),
3895 6, t5, t0, t1, t2, t3, t4);
3896 }
3897 }
3898 if (TREE_TYPE (t) != boolean_type_node)
3899 t = fold_build2 (NE_EXPR, boolean_type_node,
3900 t, build_int_cst (TREE_TYPE (t), 0));
3901 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3902 true, GSI_SAME_STMT);
3903 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
3904
3905 /* Remove the GIMPLE_OMP_FOR statement. */
3906 gsi_remove (&gsi, true);
3907
3908 /* Iteration setup for sequential loop goes in L0_BB. */
3909 gsi = gsi_start_bb (l0_bb);
3910 t = istart0;
3911 if (bias)
3912 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
3913 if (POINTER_TYPE_P (type))
3914 t = fold_convert (signed_type_for (type), t);
3915 t = fold_convert (type, t);
3916 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3917 false, GSI_CONTINUE_LINKING);
3918 stmt = gimple_build_assign (fd->loop.v, t);
3919 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3920
3921 t = iend0;
3922 if (bias)
3923 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
3924 if (POINTER_TYPE_P (type))
3925 t = fold_convert (signed_type_for (type), t);
3926 t = fold_convert (type, t);
3927 iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3928 false, GSI_CONTINUE_LINKING);
3929 if (fd->collapse > 1)
3930 {
3931 tree tem = create_tmp_reg (type, ".tem");
3932 stmt = gimple_build_assign (tem, fd->loop.v);
3933 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3934 for (i = fd->collapse - 1; i >= 0; i--)
3935 {
3936 tree vtype = TREE_TYPE (fd->loops[i].v), itype;
3937 itype = vtype;
3938 if (POINTER_TYPE_P (vtype))
3939 itype = signed_type_for (vtype);
3940 t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]);
3941 t = fold_convert (itype, t);
3942 t = fold_build2 (MULT_EXPR, itype, t,
3943 fold_convert (itype, fd->loops[i].step));
3944 if (POINTER_TYPE_P (vtype))
3945 t = fold_build_pointer_plus (fd->loops[i].n1, t);
3946 else
3947 t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t);
3948 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3949 false, GSI_CONTINUE_LINKING);
3950 stmt = gimple_build_assign (fd->loops[i].v, t);
3951 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3952 if (i != 0)
3953 {
3954 t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]);
3955 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3956 false, GSI_CONTINUE_LINKING);
3957 stmt = gimple_build_assign (tem, t);
3958 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3959 }
3960 }
3961 }
3962
3963 if (!broken_loop)
3964 {
3965 /* Code to control the increment and predicate for the sequential
3966 loop goes in the CONT_BB. */
3967 gsi = gsi_last_bb (cont_bb);
3968 stmt = gsi_stmt (gsi);
3969 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
3970 vmain = gimple_omp_continue_control_use (stmt);
3971 vback = gimple_omp_continue_control_def (stmt);
3972
3973 if (POINTER_TYPE_P (type))
3974 t = fold_build_pointer_plus (vmain, fd->loop.step);
3975 else
3976 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
3977 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3978 true, GSI_SAME_STMT);
3979 stmt = gimple_build_assign (vback, t);
3980 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3981
3982 t = build2 (fd->loop.cond_code, boolean_type_node, vback, iend);
3983 stmt = gimple_build_cond_empty (t);
3984 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3985
3986 /* Remove GIMPLE_OMP_CONTINUE. */
3987 gsi_remove (&gsi, true);
3988
3989 if (fd->collapse > 1)
3990 {
3991 basic_block last_bb, bb;
3992
3993 last_bb = cont_bb;
3994 for (i = fd->collapse - 1; i >= 0; i--)
3995 {
3996 tree vtype = TREE_TYPE (fd->loops[i].v);
3997
3998 bb = create_empty_bb (last_bb);
3999 gsi = gsi_start_bb (bb);
4000
4001 if (i < fd->collapse - 1)
4002 {
4003 e = make_edge (last_bb, bb, EDGE_FALSE_VALUE);
4004 e->probability = REG_BR_PROB_BASE / 8;
4005
4006 t = fd->loops[i + 1].n1;
4007 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4008 false, GSI_CONTINUE_LINKING);
4009 stmt = gimple_build_assign (fd->loops[i + 1].v, t);
4010 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4011 }
4012 else
4013 collapse_bb = bb;
4014
4015 set_immediate_dominator (CDI_DOMINATORS, bb, last_bb);
4016
4017 if (POINTER_TYPE_P (vtype))
4018 t = fold_build_pointer_plus (fd->loops[i].v, fd->loops[i].step);
4019 else
4020 t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v,
4021 fd->loops[i].step);
4022 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4023 false, GSI_CONTINUE_LINKING);
4024 stmt = gimple_build_assign (fd->loops[i].v, t);
4025 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4026
4027 if (i > 0)
4028 {
4029 t = fd->loops[i].n2;
4030 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4031 false, GSI_CONTINUE_LINKING);
4032 t = fold_build2 (fd->loops[i].cond_code, boolean_type_node,
4033 fd->loops[i].v, t);
4034 stmt = gimple_build_cond_empty (t);
4035 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4036 e = make_edge (bb, l1_bb, EDGE_TRUE_VALUE);
4037 e->probability = REG_BR_PROB_BASE * 7 / 8;
4038 }
4039 else
4040 make_edge (bb, l1_bb, EDGE_FALLTHRU);
4041 last_bb = bb;
4042 }
4043 }
4044
4045 /* Emit code to get the next parallel iteration in L2_BB. */
4046 gsi = gsi_start_bb (l2_bb);
4047
4048 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
4049 build_fold_addr_expr (istart0),
4050 build_fold_addr_expr (iend0));
4051 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4052 false, GSI_CONTINUE_LINKING);
4053 if (TREE_TYPE (t) != boolean_type_node)
4054 t = fold_build2 (NE_EXPR, boolean_type_node,
4055 t, build_int_cst (TREE_TYPE (t), 0));
4056 stmt = gimple_build_cond_empty (t);
4057 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4058 }
4059
4060 /* Add the loop cleanup function. */
4061 gsi = gsi_last_bb (exit_bb);
4062 if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4063 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_NOWAIT);
4064 else
4065 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END);
4066 stmt = gimple_build_call (t, 0);
4067 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
4068 gsi_remove (&gsi, true);
4069
4070 /* Connect the new blocks. */
4071 find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
4072 find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
4073
4074 if (!broken_loop)
4075 {
4076 gimple_seq phis;
4077
4078 e = find_edge (cont_bb, l3_bb);
4079 ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
4080
4081 phis = phi_nodes (l3_bb);
4082 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
4083 {
4084 gimple phi = gsi_stmt (gsi);
4085 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
4086 PHI_ARG_DEF_FROM_EDGE (phi, e));
4087 }
4088 remove_edge (e);
4089
4090 make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
4091 if (fd->collapse > 1)
4092 {
4093 e = find_edge (cont_bb, l1_bb);
4094 remove_edge (e);
4095 e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
4096 }
4097 else
4098 {
4099 e = find_edge (cont_bb, l1_bb);
4100 e->flags = EDGE_TRUE_VALUE;
4101 }
4102 e->probability = REG_BR_PROB_BASE * 7 / 8;
4103 find_edge (cont_bb, l2_bb)->probability = REG_BR_PROB_BASE / 8;
4104 make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
4105
4106 set_immediate_dominator (CDI_DOMINATORS, l2_bb,
4107 recompute_dominator (CDI_DOMINATORS, l2_bb));
4108 set_immediate_dominator (CDI_DOMINATORS, l3_bb,
4109 recompute_dominator (CDI_DOMINATORS, l3_bb));
4110 set_immediate_dominator (CDI_DOMINATORS, l0_bb,
4111 recompute_dominator (CDI_DOMINATORS, l0_bb));
4112 set_immediate_dominator (CDI_DOMINATORS, l1_bb,
4113 recompute_dominator (CDI_DOMINATORS, l1_bb));
4114 }
4115 }
4116
4117
4118 /* A subroutine of expand_omp_for. Generate code for a parallel
4119 loop with static schedule and no specified chunk size. Given
4120 parameters:
4121
4122 for (V = N1; V cond N2; V += STEP) BODY;
4123
4124 where COND is "<" or ">", we generate pseudocode
4125
4126 if (cond is <)
4127 adj = STEP - 1;
4128 else
4129 adj = STEP + 1;
4130 if ((__typeof (V)) -1 > 0 && cond is >)
4131 n = -(adj + N2 - N1) / -STEP;
4132 else
4133 n = (adj + N2 - N1) / STEP;
4134 q = n / nthreads;
4135 tt = n % nthreads;
4136 if (threadid < tt) goto L3; else goto L4;
4137 L3:
4138 tt = 0;
4139 q = q + 1;
4140 L4:
4141 s0 = q * threadid + tt;
4142 e0 = s0 + q;
4143 V = s0 * STEP + N1;
4144 if (s0 >= e0) goto L2; else goto L0;
4145 L0:
4146 e = e0 * STEP + N1;
4147 L1:
4148 BODY;
4149 V += STEP;
4150 if (V cond e) goto L1;
4151 L2:
4152 */
4153
4154 static void
4155 expand_omp_for_static_nochunk (struct omp_region *region,
4156 struct omp_for_data *fd)
4157 {
4158 tree n, q, s0, e0, e, t, tt, nthreads, threadid;
4159 tree type, itype, vmain, vback;
4160 basic_block entry_bb, second_bb, third_bb, exit_bb, seq_start_bb;
4161 basic_block body_bb, cont_bb;
4162 basic_block fin_bb;
4163 gimple_stmt_iterator gsi;
4164 gimple stmt;
4165 edge ep;
4166
4167 itype = type = TREE_TYPE (fd->loop.v);
4168 if (POINTER_TYPE_P (type))
4169 itype = signed_type_for (type);
4170
4171 entry_bb = region->entry;
4172 cont_bb = region->cont;
4173 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
4174 gcc_assert (BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
4175 seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
4176 body_bb = single_succ (seq_start_bb);
4177 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4178 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4179 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4180 exit_bb = region->exit;
4181
4182 /* Iteration space partitioning goes in ENTRY_BB. */
4183 gsi = gsi_last_bb (entry_bb);
4184 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4185
4186 t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS), 0);
4187 t = fold_convert (itype, t);
4188 nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4189 true, GSI_SAME_STMT);
4190
4191 t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM), 0);
4192 t = fold_convert (itype, t);
4193 threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4194 true, GSI_SAME_STMT);
4195
4196 fd->loop.n1
4197 = force_gimple_operand_gsi (&gsi, fold_convert (type, fd->loop.n1),
4198 true, NULL_TREE, true, GSI_SAME_STMT);
4199 fd->loop.n2
4200 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.n2),
4201 true, NULL_TREE, true, GSI_SAME_STMT);
4202 fd->loop.step
4203 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.step),
4204 true, NULL_TREE, true, GSI_SAME_STMT);
4205
4206 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4207 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4208 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4209 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4210 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4211 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4212 fold_build1 (NEGATE_EXPR, itype, t),
4213 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4214 else
4215 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4216 t = fold_convert (itype, t);
4217 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4218
4219 q = create_tmp_reg (itype, "q");
4220 t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads);
4221 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
4222 gsi_insert_before (&gsi, gimple_build_assign (q, t), GSI_SAME_STMT);
4223
4224 tt = create_tmp_reg (itype, "tt");
4225 t = fold_build2 (TRUNC_MOD_EXPR, itype, n, nthreads);
4226 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
4227 gsi_insert_before (&gsi, gimple_build_assign (tt, t), GSI_SAME_STMT);
4228
4229 t = build2 (LT_EXPR, boolean_type_node, threadid, tt);
4230 stmt = gimple_build_cond_empty (t);
4231 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4232
4233 second_bb = split_block (entry_bb, stmt)->dest;
4234 gsi = gsi_last_bb (second_bb);
4235 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4236
4237 gsi_insert_before (&gsi, gimple_build_assign (tt, build_int_cst (itype, 0)),
4238 GSI_SAME_STMT);
4239 stmt = gimple_build_assign_with_ops (PLUS_EXPR, q, q,
4240 build_int_cst (itype, 1));
4241 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4242
4243 third_bb = split_block (second_bb, stmt)->dest;
4244 gsi = gsi_last_bb (third_bb);
4245 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4246
4247 t = build2 (MULT_EXPR, itype, q, threadid);
4248 t = build2 (PLUS_EXPR, itype, t, tt);
4249 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4250
4251 t = fold_build2 (PLUS_EXPR, itype, s0, q);
4252 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4253
4254 t = build2 (GE_EXPR, boolean_type_node, s0, e0);
4255 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4256
4257 /* Remove the GIMPLE_OMP_FOR statement. */
4258 gsi_remove (&gsi, true);
4259
4260 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4261 gsi = gsi_start_bb (seq_start_bb);
4262
4263 t = fold_convert (itype, s0);
4264 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4265 if (POINTER_TYPE_P (type))
4266 t = fold_build_pointer_plus (fd->loop.n1, t);
4267 else
4268 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4269 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4270 false, GSI_CONTINUE_LINKING);
4271 stmt = gimple_build_assign (fd->loop.v, t);
4272 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4273
4274 t = fold_convert (itype, e0);
4275 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4276 if (POINTER_TYPE_P (type))
4277 t = fold_build_pointer_plus (fd->loop.n1, t);
4278 else
4279 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4280 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4281 false, GSI_CONTINUE_LINKING);
4282
4283 /* The code controlling the sequential loop replaces the
4284 GIMPLE_OMP_CONTINUE. */
4285 gsi = gsi_last_bb (cont_bb);
4286 stmt = gsi_stmt (gsi);
4287 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4288 vmain = gimple_omp_continue_control_use (stmt);
4289 vback = gimple_omp_continue_control_def (stmt);
4290
4291 if (POINTER_TYPE_P (type))
4292 t = fold_build_pointer_plus (vmain, fd->loop.step);
4293 else
4294 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
4295 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4296 true, GSI_SAME_STMT);
4297 stmt = gimple_build_assign (vback, t);
4298 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4299
4300 t = build2 (fd->loop.cond_code, boolean_type_node, vback, e);
4301 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4302
4303 /* Remove the GIMPLE_OMP_CONTINUE statement. */
4304 gsi_remove (&gsi, true);
4305
4306 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4307 gsi = gsi_last_bb (exit_bb);
4308 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4309 force_gimple_operand_gsi (&gsi, build_omp_barrier (), false, NULL_TREE,
4310 false, GSI_SAME_STMT);
4311 gsi_remove (&gsi, true);
4312
4313 /* Connect all the blocks. */
4314 ep = make_edge (entry_bb, third_bb, EDGE_FALSE_VALUE);
4315 ep->probability = REG_BR_PROB_BASE / 4 * 3;
4316 ep = find_edge (entry_bb, second_bb);
4317 ep->flags = EDGE_TRUE_VALUE;
4318 ep->probability = REG_BR_PROB_BASE / 4;
4319 find_edge (third_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
4320 find_edge (third_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
4321
4322 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4323 find_edge (cont_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4324
4325 set_immediate_dominator (CDI_DOMINATORS, second_bb, entry_bb);
4326 set_immediate_dominator (CDI_DOMINATORS, third_bb, entry_bb);
4327 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, third_bb);
4328 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4329 recompute_dominator (CDI_DOMINATORS, body_bb));
4330 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4331 recompute_dominator (CDI_DOMINATORS, fin_bb));
4332 }
4333
4334
4335 /* A subroutine of expand_omp_for. Generate code for a parallel
4336 loop with static schedule and a specified chunk size. Given
4337 parameters:
4338
4339 for (V = N1; V cond N2; V += STEP) BODY;
4340
4341 where COND is "<" or ">", we generate pseudocode
4342
4343 if (cond is <)
4344 adj = STEP - 1;
4345 else
4346 adj = STEP + 1;
4347 if ((__typeof (V)) -1 > 0 && cond is >)
4348 n = -(adj + N2 - N1) / -STEP;
4349 else
4350 n = (adj + N2 - N1) / STEP;
4351 trip = 0;
4352 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
4353 here so that V is defined
4354 if the loop is not entered
4355 L0:
4356 s0 = (trip * nthreads + threadid) * CHUNK;
4357 e0 = min(s0 + CHUNK, n);
4358 if (s0 < n) goto L1; else goto L4;
4359 L1:
4360 V = s0 * STEP + N1;
4361 e = e0 * STEP + N1;
4362 L2:
4363 BODY;
4364 V += STEP;
4365 if (V cond e) goto L2; else goto L3;
4366 L3:
4367 trip += 1;
4368 goto L0;
4369 L4:
4370 */
4371
4372 static void
4373 expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
4374 {
4375 tree n, s0, e0, e, t;
4376 tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
4377 tree type, itype, v_main, v_back, v_extra;
4378 basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
4379 basic_block trip_update_bb, cont_bb, fin_bb;
4380 gimple_stmt_iterator si;
4381 gimple stmt;
4382 edge se;
4383
4384 itype = type = TREE_TYPE (fd->loop.v);
4385 if (POINTER_TYPE_P (type))
4386 itype = signed_type_for (type);
4387
4388 entry_bb = region->entry;
4389 se = split_block (entry_bb, last_stmt (entry_bb));
4390 entry_bb = se->src;
4391 iter_part_bb = se->dest;
4392 cont_bb = region->cont;
4393 gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
4394 gcc_assert (BRANCH_EDGE (iter_part_bb)->dest
4395 == FALLTHRU_EDGE (cont_bb)->dest);
4396 seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
4397 body_bb = single_succ (seq_start_bb);
4398 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4399 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4400 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4401 trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
4402 exit_bb = region->exit;
4403
4404 /* Trip and adjustment setup goes in ENTRY_BB. */
4405 si = gsi_last_bb (entry_bb);
4406 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_FOR);
4407
4408 t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS), 0);
4409 t = fold_convert (itype, t);
4410 nthreads = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4411 true, GSI_SAME_STMT);
4412
4413 t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM), 0);
4414 t = fold_convert (itype, t);
4415 threadid = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4416 true, GSI_SAME_STMT);
4417
4418 fd->loop.n1
4419 = force_gimple_operand_gsi (&si, fold_convert (type, fd->loop.n1),
4420 true, NULL_TREE, true, GSI_SAME_STMT);
4421 fd->loop.n2
4422 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.n2),
4423 true, NULL_TREE, true, GSI_SAME_STMT);
4424 fd->loop.step
4425 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.step),
4426 true, NULL_TREE, true, GSI_SAME_STMT);
4427 fd->chunk_size
4428 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->chunk_size),
4429 true, NULL_TREE, true, GSI_SAME_STMT);
4430
4431 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4432 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4433 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4434 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4435 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4436 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4437 fold_build1 (NEGATE_EXPR, itype, t),
4438 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4439 else
4440 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4441 t = fold_convert (itype, t);
4442 n = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4443 true, GSI_SAME_STMT);
4444
4445 trip_var = create_tmp_reg (itype, ".trip");
4446 if (gimple_in_ssa_p (cfun))
4447 {
4448 trip_init = make_ssa_name (trip_var, NULL);
4449 trip_main = make_ssa_name (trip_var, NULL);
4450 trip_back = make_ssa_name (trip_var, NULL);
4451 }
4452 else
4453 {
4454 trip_init = trip_var;
4455 trip_main = trip_var;
4456 trip_back = trip_var;
4457 }
4458
4459 stmt = gimple_build_assign (trip_init, build_int_cst (itype, 0));
4460 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4461
4462 t = fold_build2 (MULT_EXPR, itype, threadid, fd->chunk_size);
4463 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4464 if (POINTER_TYPE_P (type))
4465 t = fold_build_pointer_plus (fd->loop.n1, t);
4466 else
4467 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4468 v_extra = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4469 true, GSI_SAME_STMT);
4470
4471 /* Remove the GIMPLE_OMP_FOR. */
4472 gsi_remove (&si, true);
4473
4474 /* Iteration space partitioning goes in ITER_PART_BB. */
4475 si = gsi_last_bb (iter_part_bb);
4476
4477 t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads);
4478 t = fold_build2 (PLUS_EXPR, itype, t, threadid);
4479 t = fold_build2 (MULT_EXPR, itype, t, fd->chunk_size);
4480 s0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4481 false, GSI_CONTINUE_LINKING);
4482
4483 t = fold_build2 (PLUS_EXPR, itype, s0, fd->chunk_size);
4484 t = fold_build2 (MIN_EXPR, itype, t, n);
4485 e0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4486 false, GSI_CONTINUE_LINKING);
4487
4488 t = build2 (LT_EXPR, boolean_type_node, s0, n);
4489 gsi_insert_after (&si, gimple_build_cond_empty (t), GSI_CONTINUE_LINKING);
4490
4491 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4492 si = gsi_start_bb (seq_start_bb);
4493
4494 t = fold_convert (itype, s0);
4495 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4496 if (POINTER_TYPE_P (type))
4497 t = fold_build_pointer_plus (fd->loop.n1, t);
4498 else
4499 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4500 t = force_gimple_operand_gsi (&si, t, false, NULL_TREE,
4501 false, GSI_CONTINUE_LINKING);
4502 stmt = gimple_build_assign (fd->loop.v, t);
4503 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4504
4505 t = fold_convert (itype, e0);
4506 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4507 if (POINTER_TYPE_P (type))
4508 t = fold_build_pointer_plus (fd->loop.n1, t);
4509 else
4510 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4511 e = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4512 false, GSI_CONTINUE_LINKING);
4513
4514 /* The code controlling the sequential loop goes in CONT_BB,
4515 replacing the GIMPLE_OMP_CONTINUE. */
4516 si = gsi_last_bb (cont_bb);
4517 stmt = gsi_stmt (si);
4518 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4519 v_main = gimple_omp_continue_control_use (stmt);
4520 v_back = gimple_omp_continue_control_def (stmt);
4521
4522 if (POINTER_TYPE_P (type))
4523 t = fold_build_pointer_plus (v_main, fd->loop.step);
4524 else
4525 t = fold_build2 (PLUS_EXPR, type, v_main, fd->loop.step);
4526 stmt = gimple_build_assign (v_back, t);
4527 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4528
4529 t = build2 (fd->loop.cond_code, boolean_type_node, v_back, e);
4530 gsi_insert_before (&si, gimple_build_cond_empty (t), GSI_SAME_STMT);
4531
4532 /* Remove GIMPLE_OMP_CONTINUE. */
4533 gsi_remove (&si, true);
4534
4535 /* Trip update code goes into TRIP_UPDATE_BB. */
4536 si = gsi_start_bb (trip_update_bb);
4537
4538 t = build_int_cst (itype, 1);
4539 t = build2 (PLUS_EXPR, itype, trip_main, t);
4540 stmt = gimple_build_assign (trip_back, t);
4541 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4542
4543 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4544 si = gsi_last_bb (exit_bb);
4545 if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
4546 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4547 false, GSI_SAME_STMT);
4548 gsi_remove (&si, true);
4549
4550 /* Connect the new blocks. */
4551 find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
4552 find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4553
4554 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4555 find_edge (cont_bb, trip_update_bb)->flags = EDGE_FALSE_VALUE;
4556
4557 redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
4558
4559 if (gimple_in_ssa_p (cfun))
4560 {
4561 gimple_stmt_iterator psi;
4562 gimple phi;
4563 edge re, ene;
4564 edge_var_map_vector *head;
4565 edge_var_map *vm;
4566 size_t i;
4567
4568 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
4569 remove arguments of the phi nodes in fin_bb. We need to create
4570 appropriate phi nodes in iter_part_bb instead. */
4571 se = single_pred_edge (fin_bb);
4572 re = single_succ_edge (trip_update_bb);
4573 head = redirect_edge_var_map_vector (re);
4574 ene = single_succ_edge (entry_bb);
4575
4576 psi = gsi_start_phis (fin_bb);
4577 for (i = 0; !gsi_end_p (psi) && head->iterate (i, &vm);
4578 gsi_next (&psi), ++i)
4579 {
4580 gimple nphi;
4581 source_location locus;
4582
4583 phi = gsi_stmt (psi);
4584 t = gimple_phi_result (phi);
4585 gcc_assert (t == redirect_edge_var_map_result (vm));
4586 nphi = create_phi_node (t, iter_part_bb);
4587
4588 t = PHI_ARG_DEF_FROM_EDGE (phi, se);
4589 locus = gimple_phi_arg_location_from_edge (phi, se);
4590
4591 /* A special case -- fd->loop.v is not yet computed in
4592 iter_part_bb, we need to use v_extra instead. */
4593 if (t == fd->loop.v)
4594 t = v_extra;
4595 add_phi_arg (nphi, t, ene, locus);
4596 locus = redirect_edge_var_map_location (vm);
4597 add_phi_arg (nphi, redirect_edge_var_map_def (vm), re, locus);
4598 }
4599 gcc_assert (!gsi_end_p (psi) && i == head->length ());
4600 redirect_edge_var_map_clear (re);
4601 while (1)
4602 {
4603 psi = gsi_start_phis (fin_bb);
4604 if (gsi_end_p (psi))
4605 break;
4606 remove_phi_node (&psi, false);
4607 }
4608
4609 /* Make phi node for trip. */
4610 phi = create_phi_node (trip_main, iter_part_bb);
4611 add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb),
4612 UNKNOWN_LOCATION);
4613 add_phi_arg (phi, trip_init, single_succ_edge (entry_bb),
4614 UNKNOWN_LOCATION);
4615 }
4616
4617 set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
4618 set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
4619 recompute_dominator (CDI_DOMINATORS, iter_part_bb));
4620 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4621 recompute_dominator (CDI_DOMINATORS, fin_bb));
4622 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
4623 recompute_dominator (CDI_DOMINATORS, seq_start_bb));
4624 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4625 recompute_dominator (CDI_DOMINATORS, body_bb));
4626 }
4627
4628
4629 /* Expand the OpenMP loop defined by REGION. */
4630
4631 static void
4632 expand_omp_for (struct omp_region *region)
4633 {
4634 struct omp_for_data fd;
4635 struct omp_for_data_loop *loops;
4636
4637 loops
4638 = (struct omp_for_data_loop *)
4639 alloca (gimple_omp_for_collapse (last_stmt (region->entry))
4640 * sizeof (struct omp_for_data_loop));
4641 extract_omp_for_data (last_stmt (region->entry), &fd, loops);
4642 region->sched_kind = fd.sched_kind;
4643
4644 gcc_assert (EDGE_COUNT (region->entry->succs) == 2);
4645 BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4646 FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4647 if (region->cont)
4648 {
4649 gcc_assert (EDGE_COUNT (region->cont->succs) == 2);
4650 BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4651 FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4652 }
4653
4654 if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
4655 && !fd.have_ordered
4656 && fd.collapse == 1
4657 && region->cont != NULL)
4658 {
4659 if (fd.chunk_size == NULL)
4660 expand_omp_for_static_nochunk (region, &fd);
4661 else
4662 expand_omp_for_static_chunk (region, &fd);
4663 }
4664 else
4665 {
4666 int fn_index, start_ix, next_ix;
4667
4668 if (fd.chunk_size == NULL
4669 && fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
4670 fd.chunk_size = integer_zero_node;
4671 gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
4672 fn_index = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
4673 ? 3 : fd.sched_kind;
4674 fn_index += fd.have_ordered * 4;
4675 start_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_START) + fn_index;
4676 next_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_NEXT) + fn_index;
4677 if (fd.iter_type == long_long_unsigned_type_node)
4678 {
4679 start_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_START
4680 - (int)BUILT_IN_GOMP_LOOP_STATIC_START);
4681 next_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
4682 - (int)BUILT_IN_GOMP_LOOP_STATIC_NEXT);
4683 }
4684 expand_omp_for_generic (region, &fd, (enum built_in_function) start_ix,
4685 (enum built_in_function) next_ix);
4686 }
4687
4688 if (gimple_in_ssa_p (cfun))
4689 update_ssa (TODO_update_ssa_only_virtuals);
4690 }
4691
4692
4693 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
4694
4695 v = GOMP_sections_start (n);
4696 L0:
4697 switch (v)
4698 {
4699 case 0:
4700 goto L2;
4701 case 1:
4702 section 1;
4703 goto L1;
4704 case 2:
4705 ...
4706 case n:
4707 ...
4708 default:
4709 abort ();
4710 }
4711 L1:
4712 v = GOMP_sections_next ();
4713 goto L0;
4714 L2:
4715 reduction;
4716
4717 If this is a combined parallel sections, replace the call to
4718 GOMP_sections_start with call to GOMP_sections_next. */
4719
4720 static void
4721 expand_omp_sections (struct omp_region *region)
4722 {
4723 tree t, u, vin = NULL, vmain, vnext, l2;
4724 vec<tree> label_vec;
4725 unsigned len;
4726 basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
4727 gimple_stmt_iterator si, switch_si;
4728 gimple sections_stmt, stmt, cont;
4729 edge_iterator ei;
4730 edge e;
4731 struct omp_region *inner;
4732 unsigned i, casei;
4733 bool exit_reachable = region->cont != NULL;
4734
4735 gcc_assert (region->exit != NULL);
4736 entry_bb = region->entry;
4737 l0_bb = single_succ (entry_bb);
4738 l1_bb = region->cont;
4739 l2_bb = region->exit;
4740 if (single_pred_p (l2_bb) && single_pred (l2_bb) == l0_bb)
4741 l2 = gimple_block_label (l2_bb);
4742 else
4743 {
4744 /* This can happen if there are reductions. */
4745 len = EDGE_COUNT (l0_bb->succs);
4746 gcc_assert (len > 0);
4747 e = EDGE_SUCC (l0_bb, len - 1);
4748 si = gsi_last_bb (e->dest);
4749 l2 = NULL_TREE;
4750 if (gsi_end_p (si)
4751 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4752 l2 = gimple_block_label (e->dest);
4753 else
4754 FOR_EACH_EDGE (e, ei, l0_bb->succs)
4755 {
4756 si = gsi_last_bb (e->dest);
4757 if (gsi_end_p (si)
4758 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4759 {
4760 l2 = gimple_block_label (e->dest);
4761 break;
4762 }
4763 }
4764 }
4765 if (exit_reachable)
4766 default_bb = create_empty_bb (l1_bb->prev_bb);
4767 else
4768 default_bb = create_empty_bb (l0_bb);
4769
4770 /* We will build a switch() with enough cases for all the
4771 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
4772 and a default case to abort if something goes wrong. */
4773 len = EDGE_COUNT (l0_bb->succs);
4774
4775 /* Use vec::quick_push on label_vec throughout, since we know the size
4776 in advance. */
4777 label_vec.create (len);
4778
4779 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
4780 GIMPLE_OMP_SECTIONS statement. */
4781 si = gsi_last_bb (entry_bb);
4782 sections_stmt = gsi_stmt (si);
4783 gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS);
4784 vin = gimple_omp_sections_control (sections_stmt);
4785 if (!is_combined_parallel (region))
4786 {
4787 /* If we are not inside a combined parallel+sections region,
4788 call GOMP_sections_start. */
4789 t = build_int_cst (unsigned_type_node,
4790 exit_reachable ? len - 1 : len);
4791 u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_START);
4792 stmt = gimple_build_call (u, 1, t);
4793 }
4794 else
4795 {
4796 /* Otherwise, call GOMP_sections_next. */
4797 u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
4798 stmt = gimple_build_call (u, 0);
4799 }
4800 gimple_call_set_lhs (stmt, vin);
4801 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4802 gsi_remove (&si, true);
4803
4804 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
4805 L0_BB. */
4806 switch_si = gsi_last_bb (l0_bb);
4807 gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH);
4808 if (exit_reachable)
4809 {
4810 cont = last_stmt (l1_bb);
4811 gcc_assert (gimple_code (cont) == GIMPLE_OMP_CONTINUE);
4812 vmain = gimple_omp_continue_control_use (cont);
4813 vnext = gimple_omp_continue_control_def (cont);
4814 }
4815 else
4816 {
4817 vmain = vin;
4818 vnext = NULL_TREE;
4819 }
4820
4821 t = build_case_label (build_int_cst (unsigned_type_node, 0), NULL, l2);
4822 label_vec.quick_push (t);
4823 i = 1;
4824
4825 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
4826 for (inner = region->inner, casei = 1;
4827 inner;
4828 inner = inner->next, i++, casei++)
4829 {
4830 basic_block s_entry_bb, s_exit_bb;
4831
4832 /* Skip optional reduction region. */
4833 if (inner->type == GIMPLE_OMP_ATOMIC_LOAD)
4834 {
4835 --i;
4836 --casei;
4837 continue;
4838 }
4839
4840 s_entry_bb = inner->entry;
4841 s_exit_bb = inner->exit;
4842
4843 t = gimple_block_label (s_entry_bb);
4844 u = build_int_cst (unsigned_type_node, casei);
4845 u = build_case_label (u, NULL, t);
4846 label_vec.quick_push (u);
4847
4848 si = gsi_last_bb (s_entry_bb);
4849 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION);
4850 gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si)));
4851 gsi_remove (&si, true);
4852 single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
4853
4854 if (s_exit_bb == NULL)
4855 continue;
4856
4857 si = gsi_last_bb (s_exit_bb);
4858 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4859 gsi_remove (&si, true);
4860
4861 single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
4862 }
4863
4864 /* Error handling code goes in DEFAULT_BB. */
4865 t = gimple_block_label (default_bb);
4866 u = build_case_label (NULL, NULL, t);
4867 make_edge (l0_bb, default_bb, 0);
4868
4869 stmt = gimple_build_switch (vmain, u, label_vec);
4870 gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT);
4871 gsi_remove (&switch_si, true);
4872 label_vec.release ();
4873
4874 si = gsi_start_bb (default_bb);
4875 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP), 0);
4876 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4877
4878 if (exit_reachable)
4879 {
4880 tree bfn_decl;
4881
4882 /* Code to get the next section goes in L1_BB. */
4883 si = gsi_last_bb (l1_bb);
4884 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE);
4885
4886 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
4887 stmt = gimple_build_call (bfn_decl, 0);
4888 gimple_call_set_lhs (stmt, vnext);
4889 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4890 gsi_remove (&si, true);
4891
4892 single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
4893 }
4894
4895 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
4896 si = gsi_last_bb (l2_bb);
4897 if (gimple_omp_return_nowait_p (gsi_stmt (si)))
4898 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_NOWAIT);
4899 else
4900 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END);
4901 stmt = gimple_build_call (t, 0);
4902 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4903 gsi_remove (&si, true);
4904
4905 set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
4906 }
4907
4908
4909 /* Expand code for an OpenMP single directive. We've already expanded
4910 much of the code, here we simply place the GOMP_barrier call. */
4911
4912 static void
4913 expand_omp_single (struct omp_region *region)
4914 {
4915 basic_block entry_bb, exit_bb;
4916 gimple_stmt_iterator si;
4917 bool need_barrier = false;
4918
4919 entry_bb = region->entry;
4920 exit_bb = region->exit;
4921
4922 si = gsi_last_bb (entry_bb);
4923 /* The terminal barrier at the end of a GOMP_single_copy sequence cannot
4924 be removed. We need to ensure that the thread that entered the single
4925 does not exit before the data is copied out by the other threads. */
4926 if (find_omp_clause (gimple_omp_single_clauses (gsi_stmt (si)),
4927 OMP_CLAUSE_COPYPRIVATE))
4928 need_barrier = true;
4929 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE);
4930 gsi_remove (&si, true);
4931 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4932
4933 si = gsi_last_bb (exit_bb);
4934 if (!gimple_omp_return_nowait_p (gsi_stmt (si)) || need_barrier)
4935 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4936 false, GSI_SAME_STMT);
4937 gsi_remove (&si, true);
4938 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4939 }
4940
4941
4942 /* Generic expansion for OpenMP synchronization directives: master,
4943 ordered and critical. All we need to do here is remove the entry
4944 and exit markers for REGION. */
4945
4946 static void
4947 expand_omp_synch (struct omp_region *region)
4948 {
4949 basic_block entry_bb, exit_bb;
4950 gimple_stmt_iterator si;
4951
4952 entry_bb = region->entry;
4953 exit_bb = region->exit;
4954
4955 si = gsi_last_bb (entry_bb);
4956 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE
4957 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER
4958 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED
4959 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL);
4960 gsi_remove (&si, true);
4961 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4962
4963 if (exit_bb)
4964 {
4965 si = gsi_last_bb (exit_bb);
4966 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4967 gsi_remove (&si, true);
4968 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4969 }
4970 }
4971
4972 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
4973 operation as a normal volatile load. */
4974
4975 static bool
4976 expand_omp_atomic_load (basic_block load_bb, tree addr,
4977 tree loaded_val, int index)
4978 {
4979 enum built_in_function tmpbase;
4980 gimple_stmt_iterator gsi;
4981 basic_block store_bb;
4982 location_t loc;
4983 gimple stmt;
4984 tree decl, call, type, itype;
4985
4986 gsi = gsi_last_bb (load_bb);
4987 stmt = gsi_stmt (gsi);
4988 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
4989 loc = gimple_location (stmt);
4990
4991 /* ??? If the target does not implement atomic_load_optab[mode], and mode
4992 is smaller than word size, then expand_atomic_load assumes that the load
4993 is atomic. We could avoid the builtin entirely in this case. */
4994
4995 tmpbase = (enum built_in_function) (BUILT_IN_ATOMIC_LOAD_N + index + 1);
4996 decl = builtin_decl_explicit (tmpbase);
4997 if (decl == NULL_TREE)
4998 return false;
4999
5000 type = TREE_TYPE (loaded_val);
5001 itype = TREE_TYPE (TREE_TYPE (decl));
5002
5003 call = build_call_expr_loc (loc, decl, 2, addr,
5004 build_int_cst (NULL, MEMMODEL_RELAXED));
5005 if (!useless_type_conversion_p (type, itype))
5006 call = fold_build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
5007 call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
5008
5009 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
5010 gsi_remove (&gsi, true);
5011
5012 store_bb = single_succ (load_bb);
5013 gsi = gsi_last_bb (store_bb);
5014 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
5015 gsi_remove (&gsi, true);
5016
5017 if (gimple_in_ssa_p (cfun))
5018 update_ssa (TODO_update_ssa_no_phi);
5019
5020 return true;
5021 }
5022
5023 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
5024 operation as a normal volatile store. */
5025
5026 static bool
5027 expand_omp_atomic_store (basic_block load_bb, tree addr,
5028 tree loaded_val, tree stored_val, int index)
5029 {
5030 enum built_in_function tmpbase;
5031 gimple_stmt_iterator gsi;
5032 basic_block store_bb = single_succ (load_bb);
5033 location_t loc;
5034 gimple stmt;
5035 tree decl, call, type, itype;
5036 enum machine_mode imode;
5037 bool exchange;
5038
5039 gsi = gsi_last_bb (load_bb);
5040 stmt = gsi_stmt (gsi);
5041 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
5042
5043 /* If the load value is needed, then this isn't a store but an exchange. */
5044 exchange = gimple_omp_atomic_need_value_p (stmt);
5045
5046 gsi = gsi_last_bb (store_bb);
5047 stmt = gsi_stmt (gsi);
5048 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_STORE);
5049 loc = gimple_location (stmt);
5050
5051 /* ??? If the target does not implement atomic_store_optab[mode], and mode
5052 is smaller than word size, then expand_atomic_store assumes that the store
5053 is atomic. We could avoid the builtin entirely in this case. */
5054
5055 tmpbase = (exchange ? BUILT_IN_ATOMIC_EXCHANGE_N : BUILT_IN_ATOMIC_STORE_N);
5056 tmpbase = (enum built_in_function) ((int) tmpbase + index + 1);
5057 decl = builtin_decl_explicit (tmpbase);
5058 if (decl == NULL_TREE)
5059 return false;
5060
5061 type = TREE_TYPE (stored_val);
5062
5063 /* Dig out the type of the function's second argument. */
5064 itype = TREE_TYPE (decl);
5065 itype = TYPE_ARG_TYPES (itype);
5066 itype = TREE_CHAIN (itype);
5067 itype = TREE_VALUE (itype);
5068 imode = TYPE_MODE (itype);
5069
5070 if (exchange && !can_atomic_exchange_p (imode, true))
5071 return false;
5072
5073 if (!useless_type_conversion_p (itype, type))
5074 stored_val = fold_build1_loc (loc, VIEW_CONVERT_EXPR, itype, stored_val);
5075 call = build_call_expr_loc (loc, decl, 3, addr, stored_val,
5076 build_int_cst (NULL, MEMMODEL_RELAXED));
5077 if (exchange)
5078 {
5079 if (!useless_type_conversion_p (type, itype))
5080 call = build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
5081 call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
5082 }
5083
5084 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
5085 gsi_remove (&gsi, true);
5086
5087 /* Remove the GIMPLE_OMP_ATOMIC_LOAD that we verified above. */
5088 gsi = gsi_last_bb (load_bb);
5089 gsi_remove (&gsi, true);
5090
5091 if (gimple_in_ssa_p (cfun))
5092 update_ssa (TODO_update_ssa_no_phi);
5093
5094 return true;
5095 }
5096
5097 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
5098 operation as a __atomic_fetch_op builtin. INDEX is log2 of the
5099 size of the data type, and thus usable to find the index of the builtin
5100 decl. Returns false if the expression is not of the proper form. */
5101
5102 static bool
5103 expand_omp_atomic_fetch_op (basic_block load_bb,
5104 tree addr, tree loaded_val,
5105 tree stored_val, int index)
5106 {
5107 enum built_in_function oldbase, newbase, tmpbase;
5108 tree decl, itype, call;
5109 tree lhs, rhs;
5110 basic_block store_bb = single_succ (load_bb);
5111 gimple_stmt_iterator gsi;
5112 gimple stmt;
5113 location_t loc;
5114 enum tree_code code;
5115 bool need_old, need_new;
5116 enum machine_mode imode;
5117
5118 /* We expect to find the following sequences:
5119
5120 load_bb:
5121 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
5122
5123 store_bb:
5124 val = tmp OP something; (or: something OP tmp)
5125 GIMPLE_OMP_STORE (val)
5126
5127 ???FIXME: Allow a more flexible sequence.
5128 Perhaps use data flow to pick the statements.
5129
5130 */
5131
5132 gsi = gsi_after_labels (store_bb);
5133 stmt = gsi_stmt (gsi);
5134 loc = gimple_location (stmt);
5135 if (!is_gimple_assign (stmt))
5136 return false;
5137 gsi_next (&gsi);
5138 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE)
5139 return false;
5140 need_new = gimple_omp_atomic_need_value_p (gsi_stmt (gsi));
5141 need_old = gimple_omp_atomic_need_value_p (last_stmt (load_bb));
5142 gcc_checking_assert (!need_old || !need_new);
5143
5144 if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0))
5145 return false;
5146
5147 /* Check for one of the supported fetch-op operations. */
5148 code = gimple_assign_rhs_code (stmt);
5149 switch (code)
5150 {
5151 case PLUS_EXPR:
5152 case POINTER_PLUS_EXPR:
5153 oldbase = BUILT_IN_ATOMIC_FETCH_ADD_N;
5154 newbase = BUILT_IN_ATOMIC_ADD_FETCH_N;
5155 break;
5156 case MINUS_EXPR:
5157 oldbase = BUILT_IN_ATOMIC_FETCH_SUB_N;
5158 newbase = BUILT_IN_ATOMIC_SUB_FETCH_N;
5159 break;
5160 case BIT_AND_EXPR:
5161 oldbase = BUILT_IN_ATOMIC_FETCH_AND_N;
5162 newbase = BUILT_IN_ATOMIC_AND_FETCH_N;
5163 break;
5164 case BIT_IOR_EXPR:
5165 oldbase = BUILT_IN_ATOMIC_FETCH_OR_N;
5166 newbase = BUILT_IN_ATOMIC_OR_FETCH_N;
5167 break;
5168 case BIT_XOR_EXPR:
5169 oldbase = BUILT_IN_ATOMIC_FETCH_XOR_N;
5170 newbase = BUILT_IN_ATOMIC_XOR_FETCH_N;
5171 break;
5172 default:
5173 return false;
5174 }
5175
5176 /* Make sure the expression is of the proper form. */
5177 if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0))
5178 rhs = gimple_assign_rhs2 (stmt);
5179 else if (commutative_tree_code (gimple_assign_rhs_code (stmt))
5180 && operand_equal_p (gimple_assign_rhs2 (stmt), loaded_val, 0))
5181 rhs = gimple_assign_rhs1 (stmt);
5182 else
5183 return false;
5184
5185 tmpbase = ((enum built_in_function)
5186 ((need_new ? newbase : oldbase) + index + 1));
5187 decl = builtin_decl_explicit (tmpbase);
5188 if (decl == NULL_TREE)
5189 return false;
5190 itype = TREE_TYPE (TREE_TYPE (decl));
5191 imode = TYPE_MODE (itype);
5192
5193 /* We could test all of the various optabs involved, but the fact of the
5194 matter is that (with the exception of i486 vs i586 and xadd) all targets
5195 that support any atomic operaton optab also implements compare-and-swap.
5196 Let optabs.c take care of expanding any compare-and-swap loop. */
5197 if (!can_compare_and_swap_p (imode, true))
5198 return false;
5199
5200 gsi = gsi_last_bb (load_bb);
5201 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD);
5202
5203 /* OpenMP does not imply any barrier-like semantics on its atomic ops.
5204 It only requires that the operation happen atomically. Thus we can
5205 use the RELAXED memory model. */
5206 call = build_call_expr_loc (loc, decl, 3, addr,
5207 fold_convert_loc (loc, itype, rhs),
5208 build_int_cst (NULL, MEMMODEL_RELAXED));
5209
5210 if (need_old || need_new)
5211 {
5212 lhs = need_old ? loaded_val : stored_val;
5213 call = fold_convert_loc (loc, TREE_TYPE (lhs), call);
5214 call = build2_loc (loc, MODIFY_EXPR, void_type_node, lhs, call);
5215 }
5216 else
5217 call = fold_convert_loc (loc, void_type_node, call);
5218 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
5219 gsi_remove (&gsi, true);
5220
5221 gsi = gsi_last_bb (store_bb);
5222 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
5223 gsi_remove (&gsi, true);
5224 gsi = gsi_last_bb (store_bb);
5225 gsi_remove (&gsi, true);
5226
5227 if (gimple_in_ssa_p (cfun))
5228 update_ssa (TODO_update_ssa_no_phi);
5229
5230 return true;
5231 }
5232
5233 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5234
5235 oldval = *addr;
5236 repeat:
5237 newval = rhs; // with oldval replacing *addr in rhs
5238 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
5239 if (oldval != newval)
5240 goto repeat;
5241
5242 INDEX is log2 of the size of the data type, and thus usable to find the
5243 index of the builtin decl. */
5244
5245 static bool
5246 expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
5247 tree addr, tree loaded_val, tree stored_val,
5248 int index)
5249 {
5250 tree loadedi, storedi, initial, new_storedi, old_vali;
5251 tree type, itype, cmpxchg, iaddr;
5252 gimple_stmt_iterator si;
5253 basic_block loop_header = single_succ (load_bb);
5254 gimple phi, stmt;
5255 edge e;
5256 enum built_in_function fncode;
5257
5258 /* ??? We need a non-pointer interface to __atomic_compare_exchange in
5259 order to use the RELAXED memory model effectively. */
5260 fncode = (enum built_in_function)((int)BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_N
5261 + index + 1);
5262 cmpxchg = builtin_decl_explicit (fncode);
5263 if (cmpxchg == NULL_TREE)
5264 return false;
5265 type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5266 itype = TREE_TYPE (TREE_TYPE (cmpxchg));
5267
5268 if (!can_compare_and_swap_p (TYPE_MODE (itype), true))
5269 return false;
5270
5271 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
5272 si = gsi_last_bb (load_bb);
5273 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
5274
5275 /* For floating-point values, we'll need to view-convert them to integers
5276 so that we can perform the atomic compare and swap. Simplify the
5277 following code by always setting up the "i"ntegral variables. */
5278 if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
5279 {
5280 tree iaddr_val;
5281
5282 iaddr = create_tmp_reg (build_pointer_type_for_mode (itype, ptr_mode,
5283 true), NULL);
5284 iaddr_val
5285 = force_gimple_operand_gsi (&si,
5286 fold_convert (TREE_TYPE (iaddr), addr),
5287 false, NULL_TREE, true, GSI_SAME_STMT);
5288 stmt = gimple_build_assign (iaddr, iaddr_val);
5289 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5290 loadedi = create_tmp_var (itype, NULL);
5291 if (gimple_in_ssa_p (cfun))
5292 loadedi = make_ssa_name (loadedi, NULL);
5293 }
5294 else
5295 {
5296 iaddr = addr;
5297 loadedi = loaded_val;
5298 }
5299
5300 initial
5301 = force_gimple_operand_gsi (&si,
5302 build2 (MEM_REF, TREE_TYPE (TREE_TYPE (iaddr)),
5303 iaddr,
5304 build_int_cst (TREE_TYPE (iaddr), 0)),
5305 true, NULL_TREE, true, GSI_SAME_STMT);
5306
5307 /* Move the value to the LOADEDI temporary. */
5308 if (gimple_in_ssa_p (cfun))
5309 {
5310 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header)));
5311 phi = create_phi_node (loadedi, loop_header);
5312 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
5313 initial);
5314 }
5315 else
5316 gsi_insert_before (&si,
5317 gimple_build_assign (loadedi, initial),
5318 GSI_SAME_STMT);
5319 if (loadedi != loaded_val)
5320 {
5321 gimple_stmt_iterator gsi2;
5322 tree x;
5323
5324 x = build1 (VIEW_CONVERT_EXPR, type, loadedi);
5325 gsi2 = gsi_start_bb (loop_header);
5326 if (gimple_in_ssa_p (cfun))
5327 {
5328 gimple stmt;
5329 x = force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5330 true, GSI_SAME_STMT);
5331 stmt = gimple_build_assign (loaded_val, x);
5332 gsi_insert_before (&gsi2, stmt, GSI_SAME_STMT);
5333 }
5334 else
5335 {
5336 x = build2 (MODIFY_EXPR, TREE_TYPE (loaded_val), loaded_val, x);
5337 force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5338 true, GSI_SAME_STMT);
5339 }
5340 }
5341 gsi_remove (&si, true);
5342
5343 si = gsi_last_bb (store_bb);
5344 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
5345
5346 if (iaddr == addr)
5347 storedi = stored_val;
5348 else
5349 storedi =
5350 force_gimple_operand_gsi (&si,
5351 build1 (VIEW_CONVERT_EXPR, itype,
5352 stored_val), true, NULL_TREE, true,
5353 GSI_SAME_STMT);
5354
5355 /* Build the compare&swap statement. */
5356 new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
5357 new_storedi = force_gimple_operand_gsi (&si,
5358 fold_convert (TREE_TYPE (loadedi),
5359 new_storedi),
5360 true, NULL_TREE,
5361 true, GSI_SAME_STMT);
5362
5363 if (gimple_in_ssa_p (cfun))
5364 old_vali = loadedi;
5365 else
5366 {
5367 old_vali = create_tmp_var (TREE_TYPE (loadedi), NULL);
5368 stmt = gimple_build_assign (old_vali, loadedi);
5369 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5370
5371 stmt = gimple_build_assign (loadedi, new_storedi);
5372 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5373 }
5374
5375 /* Note that we always perform the comparison as an integer, even for
5376 floating point. This allows the atomic operation to properly
5377 succeed even with NaNs and -0.0. */
5378 stmt = gimple_build_cond_empty
5379 (build2 (NE_EXPR, boolean_type_node,
5380 new_storedi, old_vali));
5381 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5382
5383 /* Update cfg. */
5384 e = single_succ_edge (store_bb);
5385 e->flags &= ~EDGE_FALLTHRU;
5386 e->flags |= EDGE_FALSE_VALUE;
5387
5388 e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
5389
5390 /* Copy the new value to loadedi (we already did that before the condition
5391 if we are not in SSA). */
5392 if (gimple_in_ssa_p (cfun))
5393 {
5394 phi = gimple_seq_first_stmt (phi_nodes (loop_header));
5395 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi);
5396 }
5397
5398 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
5399 gsi_remove (&si, true);
5400
5401 if (gimple_in_ssa_p (cfun))
5402 update_ssa (TODO_update_ssa_no_phi);
5403
5404 return true;
5405 }
5406
5407 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5408
5409 GOMP_atomic_start ();
5410 *addr = rhs;
5411 GOMP_atomic_end ();
5412
5413 The result is not globally atomic, but works so long as all parallel
5414 references are within #pragma omp atomic directives. According to
5415 responses received from omp@openmp.org, appears to be within spec.
5416 Which makes sense, since that's how several other compilers handle
5417 this situation as well.
5418 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
5419 expanding. STORED_VAL is the operand of the matching
5420 GIMPLE_OMP_ATOMIC_STORE.
5421
5422 We replace
5423 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
5424 loaded_val = *addr;
5425
5426 and replace
5427 GIMPLE_OMP_ATOMIC_STORE (stored_val) with
5428 *addr = stored_val;
5429 */
5430
5431 static bool
5432 expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
5433 tree addr, tree loaded_val, tree stored_val)
5434 {
5435 gimple_stmt_iterator si;
5436 gimple stmt;
5437 tree t;
5438
5439 si = gsi_last_bb (load_bb);
5440 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
5441
5442 t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START);
5443 t = build_call_expr (t, 0);
5444 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5445
5446 stmt = gimple_build_assign (loaded_val, build_simple_mem_ref (addr));
5447 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5448 gsi_remove (&si, true);
5449
5450 si = gsi_last_bb (store_bb);
5451 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
5452
5453 stmt = gimple_build_assign (build_simple_mem_ref (unshare_expr (addr)),
5454 stored_val);
5455 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5456
5457 t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END);
5458 t = build_call_expr (t, 0);
5459 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5460 gsi_remove (&si, true);
5461
5462 if (gimple_in_ssa_p (cfun))
5463 update_ssa (TODO_update_ssa_no_phi);
5464 return true;
5465 }
5466
5467 /* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
5468 using expand_omp_atomic_fetch_op. If it failed, we try to
5469 call expand_omp_atomic_pipeline, and if it fails too, the
5470 ultimate fallback is wrapping the operation in a mutex
5471 (expand_omp_atomic_mutex). REGION is the atomic region built
5472 by build_omp_regions_1(). */
5473
5474 static void
5475 expand_omp_atomic (struct omp_region *region)
5476 {
5477 basic_block load_bb = region->entry, store_bb = region->exit;
5478 gimple load = last_stmt (load_bb), store = last_stmt (store_bb);
5479 tree loaded_val = gimple_omp_atomic_load_lhs (load);
5480 tree addr = gimple_omp_atomic_load_rhs (load);
5481 tree stored_val = gimple_omp_atomic_store_val (store);
5482 tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5483 HOST_WIDE_INT index;
5484
5485 /* Make sure the type is one of the supported sizes. */
5486 index = tree_low_cst (TYPE_SIZE_UNIT (type), 1);
5487 index = exact_log2 (index);
5488 if (index >= 0 && index <= 4)
5489 {
5490 unsigned int align = TYPE_ALIGN_UNIT (type);
5491
5492 /* __sync builtins require strict data alignment. */
5493 if (exact_log2 (align) >= index)
5494 {
5495 /* Atomic load. */
5496 if (loaded_val == stored_val
5497 && (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
5498 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
5499 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
5500 && expand_omp_atomic_load (load_bb, addr, loaded_val, index))
5501 return;
5502
5503 /* Atomic store. */
5504 if ((GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
5505 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
5506 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
5507 && store_bb == single_succ (load_bb)
5508 && first_stmt (store_bb) == store
5509 && expand_omp_atomic_store (load_bb, addr, loaded_val,
5510 stored_val, index))
5511 return;
5512
5513 /* When possible, use specialized atomic update functions. */
5514 if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
5515 && store_bb == single_succ (load_bb)
5516 && expand_omp_atomic_fetch_op (load_bb, addr,
5517 loaded_val, stored_val, index))
5518 return;
5519
5520 /* If we don't have specialized __sync builtins, try and implement
5521 as a compare and swap loop. */
5522 if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
5523 loaded_val, stored_val, index))
5524 return;
5525 }
5526 }
5527
5528 /* The ultimate fallback is wrapping the operation in a mutex. */
5529 expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
5530 }
5531
5532
5533 /* Expand the parallel region tree rooted at REGION. Expansion
5534 proceeds in depth-first order. Innermost regions are expanded
5535 first. This way, parallel regions that require a new function to
5536 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
5537 internal dependencies in their body. */
5538
5539 static void
5540 expand_omp (struct omp_region *region)
5541 {
5542 while (region)
5543 {
5544 location_t saved_location;
5545
5546 /* First, determine whether this is a combined parallel+workshare
5547 region. */
5548 if (region->type == GIMPLE_OMP_PARALLEL)
5549 determine_parallel_type (region);
5550
5551 if (region->inner)
5552 expand_omp (region->inner);
5553
5554 saved_location = input_location;
5555 if (gimple_has_location (last_stmt (region->entry)))
5556 input_location = gimple_location (last_stmt (region->entry));
5557
5558 switch (region->type)
5559 {
5560 case GIMPLE_OMP_PARALLEL:
5561 case GIMPLE_OMP_TASK:
5562 expand_omp_taskreg (region);
5563 break;
5564
5565 case GIMPLE_OMP_FOR:
5566 expand_omp_for (region);
5567 break;
5568
5569 case GIMPLE_OMP_SECTIONS:
5570 expand_omp_sections (region);
5571 break;
5572
5573 case GIMPLE_OMP_SECTION:
5574 /* Individual omp sections are handled together with their
5575 parent GIMPLE_OMP_SECTIONS region. */
5576 break;
5577
5578 case GIMPLE_OMP_SINGLE:
5579 expand_omp_single (region);
5580 break;
5581
5582 case GIMPLE_OMP_MASTER:
5583 case GIMPLE_OMP_ORDERED:
5584 case GIMPLE_OMP_CRITICAL:
5585 expand_omp_synch (region);
5586 break;
5587
5588 case GIMPLE_OMP_ATOMIC_LOAD:
5589 expand_omp_atomic (region);
5590 break;
5591
5592 default:
5593 gcc_unreachable ();
5594 }
5595
5596 input_location = saved_location;
5597 region = region->next;
5598 }
5599 }
5600
5601
5602 /* Helper for build_omp_regions. Scan the dominator tree starting at
5603 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
5604 true, the function ends once a single tree is built (otherwise, whole
5605 forest of OMP constructs may be built). */
5606
5607 static void
5608 build_omp_regions_1 (basic_block bb, struct omp_region *parent,
5609 bool single_tree)
5610 {
5611 gimple_stmt_iterator gsi;
5612 gimple stmt;
5613 basic_block son;
5614
5615 gsi = gsi_last_bb (bb);
5616 if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi)))
5617 {
5618 struct omp_region *region;
5619 enum gimple_code code;
5620
5621 stmt = gsi_stmt (gsi);
5622 code = gimple_code (stmt);
5623 if (code == GIMPLE_OMP_RETURN)
5624 {
5625 /* STMT is the return point out of region PARENT. Mark it
5626 as the exit point and make PARENT the immediately
5627 enclosing region. */
5628 gcc_assert (parent);
5629 region = parent;
5630 region->exit = bb;
5631 parent = parent->outer;
5632 }
5633 else if (code == GIMPLE_OMP_ATOMIC_STORE)
5634 {
5635 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
5636 GIMPLE_OMP_RETURN, but matches with
5637 GIMPLE_OMP_ATOMIC_LOAD. */
5638 gcc_assert (parent);
5639 gcc_assert (parent->type == GIMPLE_OMP_ATOMIC_LOAD);
5640 region = parent;
5641 region->exit = bb;
5642 parent = parent->outer;
5643 }
5644
5645 else if (code == GIMPLE_OMP_CONTINUE)
5646 {
5647 gcc_assert (parent);
5648 parent->cont = bb;
5649 }
5650 else if (code == GIMPLE_OMP_SECTIONS_SWITCH)
5651 {
5652 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
5653 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
5654 ;
5655 }
5656 else
5657 {
5658 /* Otherwise, this directive becomes the parent for a new
5659 region. */
5660 region = new_omp_region (bb, code, parent);
5661 parent = region;
5662 }
5663 }
5664
5665 if (single_tree && !parent)
5666 return;
5667
5668 for (son = first_dom_son (CDI_DOMINATORS, bb);
5669 son;
5670 son = next_dom_son (CDI_DOMINATORS, son))
5671 build_omp_regions_1 (son, parent, single_tree);
5672 }
5673
5674 /* Builds the tree of OMP regions rooted at ROOT, storing it to
5675 root_omp_region. */
5676
5677 static void
5678 build_omp_regions_root (basic_block root)
5679 {
5680 gcc_assert (root_omp_region == NULL);
5681 build_omp_regions_1 (root, NULL, true);
5682 gcc_assert (root_omp_region != NULL);
5683 }
5684
5685 /* Expands omp construct (and its subconstructs) starting in HEAD. */
5686
5687 void
5688 omp_expand_local (basic_block head)
5689 {
5690 build_omp_regions_root (head);
5691 if (dump_file && (dump_flags & TDF_DETAILS))
5692 {
5693 fprintf (dump_file, "\nOMP region tree\n\n");
5694 dump_omp_region (dump_file, root_omp_region, 0);
5695 fprintf (dump_file, "\n");
5696 }
5697
5698 remove_exit_barriers (root_omp_region);
5699 expand_omp (root_omp_region);
5700
5701 free_omp_regions ();
5702 }
5703
5704 /* Scan the CFG and build a tree of OMP regions. Return the root of
5705 the OMP region tree. */
5706
5707 static void
5708 build_omp_regions (void)
5709 {
5710 gcc_assert (root_omp_region == NULL);
5711 calculate_dominance_info (CDI_DOMINATORS);
5712 build_omp_regions_1 (ENTRY_BLOCK_PTR, NULL, false);
5713 }
5714
5715 /* Main entry point for expanding OMP-GIMPLE into runtime calls. */
5716
5717 static unsigned int
5718 execute_expand_omp (void)
5719 {
5720 build_omp_regions ();
5721
5722 if (!root_omp_region)
5723 return 0;
5724
5725 if (dump_file)
5726 {
5727 fprintf (dump_file, "\nOMP region tree\n\n");
5728 dump_omp_region (dump_file, root_omp_region, 0);
5729 fprintf (dump_file, "\n");
5730 }
5731
5732 remove_exit_barriers (root_omp_region);
5733
5734 expand_omp (root_omp_region);
5735
5736 cleanup_tree_cfg ();
5737
5738 free_omp_regions ();
5739
5740 return 0;
5741 }
5742
5743 /* OMP expansion -- the default pass, run before creation of SSA form. */
5744
5745 static bool
5746 gate_expand_omp (void)
5747 {
5748 return (flag_openmp != 0 && !seen_error ());
5749 }
5750
5751 struct gimple_opt_pass pass_expand_omp =
5752 {
5753 {
5754 GIMPLE_PASS,
5755 "ompexp", /* name */
5756 OPTGROUP_NONE, /* optinfo_flags */
5757 gate_expand_omp, /* gate */
5758 execute_expand_omp, /* execute */
5759 NULL, /* sub */
5760 NULL, /* next */
5761 0, /* static_pass_number */
5762 TV_NONE, /* tv_id */
5763 PROP_gimple_any, /* properties_required */
5764 0, /* properties_provided */
5765 0, /* properties_destroyed */
5766 0, /* todo_flags_start */
5767 0 /* todo_flags_finish */
5768 }
5769 };
5770 \f
5771 /* Routines to lower OpenMP directives into OMP-GIMPLE. */
5772
5773 /* Lower the OpenMP sections directive in the current statement in GSI_P.
5774 CTX is the enclosing OMP context for the current statement. */
5775
5776 static void
5777 lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5778 {
5779 tree block, control;
5780 gimple_stmt_iterator tgsi;
5781 gimple stmt, new_stmt, bind, t;
5782 gimple_seq ilist, dlist, olist, new_body;
5783 struct gimplify_ctx gctx;
5784
5785 stmt = gsi_stmt (*gsi_p);
5786
5787 push_gimplify_context (&gctx);
5788
5789 dlist = NULL;
5790 ilist = NULL;
5791 lower_rec_input_clauses (gimple_omp_sections_clauses (stmt),
5792 &ilist, &dlist, ctx);
5793
5794 new_body = gimple_omp_body (stmt);
5795 gimple_omp_set_body (stmt, NULL);
5796 tgsi = gsi_start (new_body);
5797 for (; !gsi_end_p (tgsi); gsi_next (&tgsi))
5798 {
5799 omp_context *sctx;
5800 gimple sec_start;
5801
5802 sec_start = gsi_stmt (tgsi);
5803 sctx = maybe_lookup_ctx (sec_start);
5804 gcc_assert (sctx);
5805
5806 lower_omp (gimple_omp_body_ptr (sec_start), sctx);
5807 gsi_insert_seq_after (&tgsi, gimple_omp_body (sec_start),
5808 GSI_CONTINUE_LINKING);
5809 gimple_omp_set_body (sec_start, NULL);
5810
5811 if (gsi_one_before_end_p (tgsi))
5812 {
5813 gimple_seq l = NULL;
5814 lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt), NULL,
5815 &l, ctx);
5816 gsi_insert_seq_after (&tgsi, l, GSI_CONTINUE_LINKING);
5817 gimple_omp_section_set_last (sec_start);
5818 }
5819
5820 gsi_insert_after (&tgsi, gimple_build_omp_return (false),
5821 GSI_CONTINUE_LINKING);
5822 }
5823
5824 block = make_node (BLOCK);
5825 bind = gimple_build_bind (NULL, new_body, block);
5826
5827 olist = NULL;
5828 lower_reduction_clauses (gimple_omp_sections_clauses (stmt), &olist, ctx);
5829
5830 block = make_node (BLOCK);
5831 new_stmt = gimple_build_bind (NULL, NULL, block);
5832 gsi_replace (gsi_p, new_stmt, true);
5833
5834 pop_gimplify_context (new_stmt);
5835 gimple_bind_append_vars (new_stmt, ctx->block_vars);
5836 BLOCK_VARS (block) = gimple_bind_vars (bind);
5837 if (BLOCK_VARS (block))
5838 TREE_USED (block) = 1;
5839
5840 new_body = NULL;
5841 gimple_seq_add_seq (&new_body, ilist);
5842 gimple_seq_add_stmt (&new_body, stmt);
5843 gimple_seq_add_stmt (&new_body, gimple_build_omp_sections_switch ());
5844 gimple_seq_add_stmt (&new_body, bind);
5845
5846 control = create_tmp_var (unsigned_type_node, ".section");
5847 t = gimple_build_omp_continue (control, control);
5848 gimple_omp_sections_set_control (stmt, control);
5849 gimple_seq_add_stmt (&new_body, t);
5850
5851 gimple_seq_add_seq (&new_body, olist);
5852 gimple_seq_add_seq (&new_body, dlist);
5853
5854 new_body = maybe_catch_exception (new_body);
5855
5856 t = gimple_build_omp_return
5857 (!!find_omp_clause (gimple_omp_sections_clauses (stmt),
5858 OMP_CLAUSE_NOWAIT));
5859 gimple_seq_add_stmt (&new_body, t);
5860
5861 gimple_bind_set_body (new_stmt, new_body);
5862 }
5863
5864
5865 /* A subroutine of lower_omp_single. Expand the simple form of
5866 a GIMPLE_OMP_SINGLE, without a copyprivate clause:
5867
5868 if (GOMP_single_start ())
5869 BODY;
5870 [ GOMP_barrier (); ] -> unless 'nowait' is present.
5871
5872 FIXME. It may be better to delay expanding the logic of this until
5873 pass_expand_omp. The expanded logic may make the job more difficult
5874 to a synchronization analysis pass. */
5875
5876 static void
5877 lower_omp_single_simple (gimple single_stmt, gimple_seq *pre_p)
5878 {
5879 location_t loc = gimple_location (single_stmt);
5880 tree tlabel = create_artificial_label (loc);
5881 tree flabel = create_artificial_label (loc);
5882 gimple call, cond;
5883 tree lhs, decl;
5884
5885 decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_START);
5886 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (decl)), NULL);
5887 call = gimple_build_call (decl, 0);
5888 gimple_call_set_lhs (call, lhs);
5889 gimple_seq_add_stmt (pre_p, call);
5890
5891 cond = gimple_build_cond (EQ_EXPR, lhs,
5892 fold_convert_loc (loc, TREE_TYPE (lhs),
5893 boolean_true_node),
5894 tlabel, flabel);
5895 gimple_seq_add_stmt (pre_p, cond);
5896 gimple_seq_add_stmt (pre_p, gimple_build_label (tlabel));
5897 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5898 gimple_seq_add_stmt (pre_p, gimple_build_label (flabel));
5899 }
5900
5901
5902 /* A subroutine of lower_omp_single. Expand the simple form of
5903 a GIMPLE_OMP_SINGLE, with a copyprivate clause:
5904
5905 #pragma omp single copyprivate (a, b, c)
5906
5907 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
5908
5909 {
5910 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
5911 {
5912 BODY;
5913 copyout.a = a;
5914 copyout.b = b;
5915 copyout.c = c;
5916 GOMP_single_copy_end (&copyout);
5917 }
5918 else
5919 {
5920 a = copyout_p->a;
5921 b = copyout_p->b;
5922 c = copyout_p->c;
5923 }
5924 GOMP_barrier ();
5925 }
5926
5927 FIXME. It may be better to delay expanding the logic of this until
5928 pass_expand_omp. The expanded logic may make the job more difficult
5929 to a synchronization analysis pass. */
5930
5931 static void
5932 lower_omp_single_copy (gimple single_stmt, gimple_seq *pre_p, omp_context *ctx)
5933 {
5934 tree ptr_type, t, l0, l1, l2, bfn_decl;
5935 gimple_seq copyin_seq;
5936 location_t loc = gimple_location (single_stmt);
5937
5938 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
5939
5940 ptr_type = build_pointer_type (ctx->record_type);
5941 ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
5942
5943 l0 = create_artificial_label (loc);
5944 l1 = create_artificial_label (loc);
5945 l2 = create_artificial_label (loc);
5946
5947 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_START);
5948 t = build_call_expr_loc (loc, bfn_decl, 0);
5949 t = fold_convert_loc (loc, ptr_type, t);
5950 gimplify_assign (ctx->receiver_decl, t, pre_p);
5951
5952 t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
5953 build_int_cst (ptr_type, 0));
5954 t = build3 (COND_EXPR, void_type_node, t,
5955 build_and_jump (&l0), build_and_jump (&l1));
5956 gimplify_and_add (t, pre_p);
5957
5958 gimple_seq_add_stmt (pre_p, gimple_build_label (l0));
5959
5960 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5961
5962 copyin_seq = NULL;
5963 lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt), pre_p,
5964 &copyin_seq, ctx);
5965
5966 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
5967 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_END);
5968 t = build_call_expr_loc (loc, bfn_decl, 1, t);
5969 gimplify_and_add (t, pre_p);
5970
5971 t = build_and_jump (&l2);
5972 gimplify_and_add (t, pre_p);
5973
5974 gimple_seq_add_stmt (pre_p, gimple_build_label (l1));
5975
5976 gimple_seq_add_seq (pre_p, copyin_seq);
5977
5978 gimple_seq_add_stmt (pre_p, gimple_build_label (l2));
5979 }
5980
5981
5982 /* Expand code for an OpenMP single directive. */
5983
5984 static void
5985 lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5986 {
5987 tree block;
5988 gimple t, bind, single_stmt = gsi_stmt (*gsi_p);
5989 gimple_seq bind_body, dlist;
5990 struct gimplify_ctx gctx;
5991
5992 push_gimplify_context (&gctx);
5993
5994 block = make_node (BLOCK);
5995 bind = gimple_build_bind (NULL, NULL, block);
5996 gsi_replace (gsi_p, bind, true);
5997 bind_body = NULL;
5998 dlist = NULL;
5999 lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt),
6000 &bind_body, &dlist, ctx);
6001 lower_omp (gimple_omp_body_ptr (single_stmt), ctx);
6002
6003 gimple_seq_add_stmt (&bind_body, single_stmt);
6004
6005 if (ctx->record_type)
6006 lower_omp_single_copy (single_stmt, &bind_body, ctx);
6007 else
6008 lower_omp_single_simple (single_stmt, &bind_body);
6009
6010 gimple_omp_set_body (single_stmt, NULL);
6011
6012 gimple_seq_add_seq (&bind_body, dlist);
6013
6014 bind_body = maybe_catch_exception (bind_body);
6015
6016 t = gimple_build_omp_return
6017 (!!find_omp_clause (gimple_omp_single_clauses (single_stmt),
6018 OMP_CLAUSE_NOWAIT));
6019 gimple_seq_add_stmt (&bind_body, t);
6020 gimple_bind_set_body (bind, bind_body);
6021
6022 pop_gimplify_context (bind);
6023
6024 gimple_bind_append_vars (bind, ctx->block_vars);
6025 BLOCK_VARS (block) = ctx->block_vars;
6026 if (BLOCK_VARS (block))
6027 TREE_USED (block) = 1;
6028 }
6029
6030
6031 /* Expand code for an OpenMP master directive. */
6032
6033 static void
6034 lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6035 {
6036 tree block, lab = NULL, x, bfn_decl;
6037 gimple stmt = gsi_stmt (*gsi_p), bind;
6038 location_t loc = gimple_location (stmt);
6039 gimple_seq tseq;
6040 struct gimplify_ctx gctx;
6041
6042 push_gimplify_context (&gctx);
6043
6044 block = make_node (BLOCK);
6045 bind = gimple_build_bind (NULL, NULL, block);
6046 gsi_replace (gsi_p, bind, true);
6047 gimple_bind_add_stmt (bind, stmt);
6048
6049 bfn_decl = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
6050 x = build_call_expr_loc (loc, bfn_decl, 0);
6051 x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
6052 x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
6053 tseq = NULL;
6054 gimplify_and_add (x, &tseq);
6055 gimple_bind_add_seq (bind, tseq);
6056
6057 lower_omp (gimple_omp_body_ptr (stmt), ctx);
6058 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
6059 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
6060 gimple_omp_set_body (stmt, NULL);
6061
6062 gimple_bind_add_stmt (bind, gimple_build_label (lab));
6063
6064 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
6065
6066 pop_gimplify_context (bind);
6067
6068 gimple_bind_append_vars (bind, ctx->block_vars);
6069 BLOCK_VARS (block) = ctx->block_vars;
6070 }
6071
6072
6073 /* Expand code for an OpenMP ordered directive. */
6074
6075 static void
6076 lower_omp_ordered (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6077 {
6078 tree block;
6079 gimple stmt = gsi_stmt (*gsi_p), bind, x;
6080 struct gimplify_ctx gctx;
6081
6082 push_gimplify_context (&gctx);
6083
6084 block = make_node (BLOCK);
6085 bind = gimple_build_bind (NULL, NULL, block);
6086 gsi_replace (gsi_p, bind, true);
6087 gimple_bind_add_stmt (bind, stmt);
6088
6089 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_START),
6090 0);
6091 gimple_bind_add_stmt (bind, x);
6092
6093 lower_omp (gimple_omp_body_ptr (stmt), ctx);
6094 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
6095 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
6096 gimple_omp_set_body (stmt, NULL);
6097
6098 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_END), 0);
6099 gimple_bind_add_stmt (bind, x);
6100
6101 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
6102
6103 pop_gimplify_context (bind);
6104
6105 gimple_bind_append_vars (bind, ctx->block_vars);
6106 BLOCK_VARS (block) = gimple_bind_vars (bind);
6107 }
6108
6109
6110 /* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
6111 substitution of a couple of function calls. But in the NAMED case,
6112 requires that languages coordinate a symbol name. It is therefore
6113 best put here in common code. */
6114
6115 static GTY((param1_is (tree), param2_is (tree)))
6116 splay_tree critical_name_mutexes;
6117
6118 static void
6119 lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6120 {
6121 tree block;
6122 tree name, lock, unlock;
6123 gimple stmt = gsi_stmt (*gsi_p), bind;
6124 location_t loc = gimple_location (stmt);
6125 gimple_seq tbody;
6126 struct gimplify_ctx gctx;
6127
6128 name = gimple_omp_critical_name (stmt);
6129 if (name)
6130 {
6131 tree decl;
6132 splay_tree_node n;
6133
6134 if (!critical_name_mutexes)
6135 critical_name_mutexes
6136 = splay_tree_new_ggc (splay_tree_compare_pointers,
6137 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_s,
6138 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_node_s);
6139
6140 n = splay_tree_lookup (critical_name_mutexes, (splay_tree_key) name);
6141 if (n == NULL)
6142 {
6143 char *new_str;
6144
6145 decl = create_tmp_var_raw (ptr_type_node, NULL);
6146
6147 new_str = ACONCAT ((".gomp_critical_user_",
6148 IDENTIFIER_POINTER (name), NULL));
6149 DECL_NAME (decl) = get_identifier (new_str);
6150 TREE_PUBLIC (decl) = 1;
6151 TREE_STATIC (decl) = 1;
6152 DECL_COMMON (decl) = 1;
6153 DECL_ARTIFICIAL (decl) = 1;
6154 DECL_IGNORED_P (decl) = 1;
6155 varpool_finalize_decl (decl);
6156
6157 splay_tree_insert (critical_name_mutexes, (splay_tree_key) name,
6158 (splay_tree_value) decl);
6159 }
6160 else
6161 decl = (tree) n->value;
6162
6163 lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_START);
6164 lock = build_call_expr_loc (loc, lock, 1, build_fold_addr_expr_loc (loc, decl));
6165
6166 unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_END);
6167 unlock = build_call_expr_loc (loc, unlock, 1,
6168 build_fold_addr_expr_loc (loc, decl));
6169 }
6170 else
6171 {
6172 lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_START);
6173 lock = build_call_expr_loc (loc, lock, 0);
6174
6175 unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_END);
6176 unlock = build_call_expr_loc (loc, unlock, 0);
6177 }
6178
6179 push_gimplify_context (&gctx);
6180
6181 block = make_node (BLOCK);
6182 bind = gimple_build_bind (NULL, NULL, block);
6183 gsi_replace (gsi_p, bind, true);
6184 gimple_bind_add_stmt (bind, stmt);
6185
6186 tbody = gimple_bind_body (bind);
6187 gimplify_and_add (lock, &tbody);
6188 gimple_bind_set_body (bind, tbody);
6189
6190 lower_omp (gimple_omp_body_ptr (stmt), ctx);
6191 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
6192 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
6193 gimple_omp_set_body (stmt, NULL);
6194
6195 tbody = gimple_bind_body (bind);
6196 gimplify_and_add (unlock, &tbody);
6197 gimple_bind_set_body (bind, tbody);
6198
6199 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
6200
6201 pop_gimplify_context (bind);
6202 gimple_bind_append_vars (bind, ctx->block_vars);
6203 BLOCK_VARS (block) = gimple_bind_vars (bind);
6204 }
6205
6206
6207 /* A subroutine of lower_omp_for. Generate code to emit the predicate
6208 for a lastprivate clause. Given a loop control predicate of (V
6209 cond N2), we gate the clause on (!(V cond N2)). The lowered form
6210 is appended to *DLIST, iterator initialization is appended to
6211 *BODY_P. */
6212
6213 static void
6214 lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p,
6215 gimple_seq *dlist, struct omp_context *ctx)
6216 {
6217 tree clauses, cond, vinit;
6218 enum tree_code cond_code;
6219 gimple_seq stmts;
6220
6221 cond_code = fd->loop.cond_code;
6222 cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
6223
6224 /* When possible, use a strict equality expression. This can let VRP
6225 type optimizations deduce the value and remove a copy. */
6226 if (host_integerp (fd->loop.step, 0))
6227 {
6228 HOST_WIDE_INT step = TREE_INT_CST_LOW (fd->loop.step);
6229 if (step == 1 || step == -1)
6230 cond_code = EQ_EXPR;
6231 }
6232
6233 cond = build2 (cond_code, boolean_type_node, fd->loop.v, fd->loop.n2);
6234
6235 clauses = gimple_omp_for_clauses (fd->for_stmt);
6236 stmts = NULL;
6237 lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
6238 if (!gimple_seq_empty_p (stmts))
6239 {
6240 gimple_seq_add_seq (&stmts, *dlist);
6241 *dlist = stmts;
6242
6243 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
6244 vinit = fd->loop.n1;
6245 if (cond_code == EQ_EXPR
6246 && host_integerp (fd->loop.n2, 0)
6247 && ! integer_zerop (fd->loop.n2))
6248 vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0);
6249
6250 /* Initialize the iterator variable, so that threads that don't execute
6251 any iterations don't execute the lastprivate clauses by accident. */
6252 gimplify_assign (fd->loop.v, vinit, body_p);
6253 }
6254 }
6255
6256
6257 /* Lower code for an OpenMP loop directive. */
6258
6259 static void
6260 lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6261 {
6262 tree *rhs_p, block;
6263 struct omp_for_data fd;
6264 gimple stmt = gsi_stmt (*gsi_p), new_stmt;
6265 gimple_seq omp_for_body, body, dlist;
6266 size_t i;
6267 struct gimplify_ctx gctx;
6268
6269 push_gimplify_context (&gctx);
6270
6271 lower_omp (gimple_omp_for_pre_body_ptr (stmt), ctx);
6272 lower_omp (gimple_omp_body_ptr (stmt), ctx);
6273
6274 block = make_node (BLOCK);
6275 new_stmt = gimple_build_bind (NULL, NULL, block);
6276 /* Replace at gsi right away, so that 'stmt' is no member
6277 of a sequence anymore as we're going to add to to a different
6278 one below. */
6279 gsi_replace (gsi_p, new_stmt, true);
6280
6281 /* Move declaration of temporaries in the loop body before we make
6282 it go away. */
6283 omp_for_body = gimple_omp_body (stmt);
6284 if (!gimple_seq_empty_p (omp_for_body)
6285 && gimple_code (gimple_seq_first_stmt (omp_for_body)) == GIMPLE_BIND)
6286 {
6287 tree vars = gimple_bind_vars (gimple_seq_first_stmt (omp_for_body));
6288 gimple_bind_append_vars (new_stmt, vars);
6289 }
6290
6291 /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
6292 dlist = NULL;
6293 body = NULL;
6294 lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx);
6295 gimple_seq_add_seq (&body, gimple_omp_for_pre_body (stmt));
6296
6297 /* Lower the header expressions. At this point, we can assume that
6298 the header is of the form:
6299
6300 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
6301
6302 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
6303 using the .omp_data_s mapping, if needed. */
6304 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
6305 {
6306 rhs_p = gimple_omp_for_initial_ptr (stmt, i);
6307 if (!is_gimple_min_invariant (*rhs_p))
6308 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6309
6310 rhs_p = gimple_omp_for_final_ptr (stmt, i);
6311 if (!is_gimple_min_invariant (*rhs_p))
6312 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6313
6314 rhs_p = &TREE_OPERAND (gimple_omp_for_incr (stmt, i), 1);
6315 if (!is_gimple_min_invariant (*rhs_p))
6316 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6317 }
6318
6319 /* Once lowered, extract the bounds and clauses. */
6320 extract_omp_for_data (stmt, &fd, NULL);
6321
6322 lower_omp_for_lastprivate (&fd, &body, &dlist, ctx);
6323
6324 gimple_seq_add_stmt (&body, stmt);
6325 gimple_seq_add_seq (&body, gimple_omp_body (stmt));
6326
6327 gimple_seq_add_stmt (&body, gimple_build_omp_continue (fd.loop.v,
6328 fd.loop.v));
6329
6330 /* After the loop, add exit clauses. */
6331 lower_reduction_clauses (gimple_omp_for_clauses (stmt), &body, ctx);
6332 gimple_seq_add_seq (&body, dlist);
6333
6334 body = maybe_catch_exception (body);
6335
6336 /* Region exit marker goes at the end of the loop body. */
6337 gimple_seq_add_stmt (&body, gimple_build_omp_return (fd.have_nowait));
6338
6339 pop_gimplify_context (new_stmt);
6340
6341 gimple_bind_append_vars (new_stmt, ctx->block_vars);
6342 BLOCK_VARS (block) = gimple_bind_vars (new_stmt);
6343 if (BLOCK_VARS (block))
6344 TREE_USED (block) = 1;
6345
6346 gimple_bind_set_body (new_stmt, body);
6347 gimple_omp_set_body (stmt, NULL);
6348 gimple_omp_for_set_pre_body (stmt, NULL);
6349 }
6350
6351 /* Callback for walk_stmts. Check if the current statement only contains
6352 GIMPLE_OMP_FOR or GIMPLE_OMP_PARALLEL. */
6353
6354 static tree
6355 check_combined_parallel (gimple_stmt_iterator *gsi_p,
6356 bool *handled_ops_p,
6357 struct walk_stmt_info *wi)
6358 {
6359 int *info = (int *) wi->info;
6360 gimple stmt = gsi_stmt (*gsi_p);
6361
6362 *handled_ops_p = true;
6363 switch (gimple_code (stmt))
6364 {
6365 WALK_SUBSTMTS;
6366
6367 case GIMPLE_OMP_FOR:
6368 case GIMPLE_OMP_SECTIONS:
6369 *info = *info == 0 ? 1 : -1;
6370 break;
6371 default:
6372 *info = -1;
6373 break;
6374 }
6375 return NULL;
6376 }
6377
6378 struct omp_taskcopy_context
6379 {
6380 /* This field must be at the beginning, as we do "inheritance": Some
6381 callback functions for tree-inline.c (e.g., omp_copy_decl)
6382 receive a copy_body_data pointer that is up-casted to an
6383 omp_context pointer. */
6384 copy_body_data cb;
6385 omp_context *ctx;
6386 };
6387
6388 static tree
6389 task_copyfn_copy_decl (tree var, copy_body_data *cb)
6390 {
6391 struct omp_taskcopy_context *tcctx = (struct omp_taskcopy_context *) cb;
6392
6393 if (splay_tree_lookup (tcctx->ctx->sfield_map, (splay_tree_key) var))
6394 return create_tmp_var (TREE_TYPE (var), NULL);
6395
6396 return var;
6397 }
6398
6399 static tree
6400 task_copyfn_remap_type (struct omp_taskcopy_context *tcctx, tree orig_type)
6401 {
6402 tree name, new_fields = NULL, type, f;
6403
6404 type = lang_hooks.types.make_type (RECORD_TYPE);
6405 name = DECL_NAME (TYPE_NAME (orig_type));
6406 name = build_decl (gimple_location (tcctx->ctx->stmt),
6407 TYPE_DECL, name, type);
6408 TYPE_NAME (type) = name;
6409
6410 for (f = TYPE_FIELDS (orig_type); f ; f = TREE_CHAIN (f))
6411 {
6412 tree new_f = copy_node (f);
6413 DECL_CONTEXT (new_f) = type;
6414 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &tcctx->cb);
6415 TREE_CHAIN (new_f) = new_fields;
6416 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6417 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6418 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
6419 &tcctx->cb, NULL);
6420 new_fields = new_f;
6421 *pointer_map_insert (tcctx->cb.decl_map, f) = new_f;
6422 }
6423 TYPE_FIELDS (type) = nreverse (new_fields);
6424 layout_type (type);
6425 return type;
6426 }
6427
6428 /* Create task copyfn. */
6429
6430 static void
6431 create_task_copyfn (gimple task_stmt, omp_context *ctx)
6432 {
6433 struct function *child_cfun;
6434 tree child_fn, t, c, src, dst, f, sf, arg, sarg, decl;
6435 tree record_type, srecord_type, bind, list;
6436 bool record_needs_remap = false, srecord_needs_remap = false;
6437 splay_tree_node n;
6438 struct omp_taskcopy_context tcctx;
6439 struct gimplify_ctx gctx;
6440 location_t loc = gimple_location (task_stmt);
6441
6442 child_fn = gimple_omp_task_copy_fn (task_stmt);
6443 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
6444 gcc_assert (child_cfun->cfg == NULL);
6445 DECL_SAVED_TREE (child_fn) = alloc_stmt_list ();
6446
6447 /* Reset DECL_CONTEXT on function arguments. */
6448 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
6449 DECL_CONTEXT (t) = child_fn;
6450
6451 /* Populate the function. */
6452 push_gimplify_context (&gctx);
6453 push_cfun (child_cfun);
6454
6455 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
6456 TREE_SIDE_EFFECTS (bind) = 1;
6457 list = NULL;
6458 DECL_SAVED_TREE (child_fn) = bind;
6459 DECL_SOURCE_LOCATION (child_fn) = gimple_location (task_stmt);
6460
6461 /* Remap src and dst argument types if needed. */
6462 record_type = ctx->record_type;
6463 srecord_type = ctx->srecord_type;
6464 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
6465 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6466 {
6467 record_needs_remap = true;
6468 break;
6469 }
6470 for (f = TYPE_FIELDS (srecord_type); f ; f = DECL_CHAIN (f))
6471 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6472 {
6473 srecord_needs_remap = true;
6474 break;
6475 }
6476
6477 if (record_needs_remap || srecord_needs_remap)
6478 {
6479 memset (&tcctx, '\0', sizeof (tcctx));
6480 tcctx.cb.src_fn = ctx->cb.src_fn;
6481 tcctx.cb.dst_fn = child_fn;
6482 tcctx.cb.src_node = cgraph_get_node (tcctx.cb.src_fn);
6483 gcc_checking_assert (tcctx.cb.src_node);
6484 tcctx.cb.dst_node = tcctx.cb.src_node;
6485 tcctx.cb.src_cfun = ctx->cb.src_cfun;
6486 tcctx.cb.copy_decl = task_copyfn_copy_decl;
6487 tcctx.cb.eh_lp_nr = 0;
6488 tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE;
6489 tcctx.cb.decl_map = pointer_map_create ();
6490 tcctx.ctx = ctx;
6491
6492 if (record_needs_remap)
6493 record_type = task_copyfn_remap_type (&tcctx, record_type);
6494 if (srecord_needs_remap)
6495 srecord_type = task_copyfn_remap_type (&tcctx, srecord_type);
6496 }
6497 else
6498 tcctx.cb.decl_map = NULL;
6499
6500 arg = DECL_ARGUMENTS (child_fn);
6501 TREE_TYPE (arg) = build_pointer_type (record_type);
6502 sarg = DECL_CHAIN (arg);
6503 TREE_TYPE (sarg) = build_pointer_type (srecord_type);
6504
6505 /* First pass: initialize temporaries used in record_type and srecord_type
6506 sizes and field offsets. */
6507 if (tcctx.cb.decl_map)
6508 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6509 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6510 {
6511 tree *p;
6512
6513 decl = OMP_CLAUSE_DECL (c);
6514 p = (tree *) pointer_map_contains (tcctx.cb.decl_map, decl);
6515 if (p == NULL)
6516 continue;
6517 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6518 sf = (tree) n->value;
6519 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6520 src = build_simple_mem_ref_loc (loc, sarg);
6521 src = omp_build_component_ref (src, sf);
6522 t = build2 (MODIFY_EXPR, TREE_TYPE (*p), *p, src);
6523 append_to_statement_list (t, &list);
6524 }
6525
6526 /* Second pass: copy shared var pointers and copy construct non-VLA
6527 firstprivate vars. */
6528 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6529 switch (OMP_CLAUSE_CODE (c))
6530 {
6531 case OMP_CLAUSE_SHARED:
6532 decl = OMP_CLAUSE_DECL (c);
6533 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6534 if (n == NULL)
6535 break;
6536 f = (tree) n->value;
6537 if (tcctx.cb.decl_map)
6538 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6539 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6540 sf = (tree) n->value;
6541 if (tcctx.cb.decl_map)
6542 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6543 src = build_simple_mem_ref_loc (loc, sarg);
6544 src = omp_build_component_ref (src, sf);
6545 dst = build_simple_mem_ref_loc (loc, arg);
6546 dst = omp_build_component_ref (dst, f);
6547 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
6548 append_to_statement_list (t, &list);
6549 break;
6550 case OMP_CLAUSE_FIRSTPRIVATE:
6551 decl = OMP_CLAUSE_DECL (c);
6552 if (is_variable_sized (decl))
6553 break;
6554 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6555 if (n == NULL)
6556 break;
6557 f = (tree) n->value;
6558 if (tcctx.cb.decl_map)
6559 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6560 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6561 if (n != NULL)
6562 {
6563 sf = (tree) n->value;
6564 if (tcctx.cb.decl_map)
6565 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6566 src = build_simple_mem_ref_loc (loc, sarg);
6567 src = omp_build_component_ref (src, sf);
6568 if (use_pointer_for_field (decl, NULL) || is_reference (decl))
6569 src = build_simple_mem_ref_loc (loc, src);
6570 }
6571 else
6572 src = decl;
6573 dst = build_simple_mem_ref_loc (loc, arg);
6574 dst = omp_build_component_ref (dst, f);
6575 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6576 append_to_statement_list (t, &list);
6577 break;
6578 case OMP_CLAUSE_PRIVATE:
6579 if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c))
6580 break;
6581 decl = OMP_CLAUSE_DECL (c);
6582 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6583 f = (tree) n->value;
6584 if (tcctx.cb.decl_map)
6585 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6586 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6587 if (n != NULL)
6588 {
6589 sf = (tree) n->value;
6590 if (tcctx.cb.decl_map)
6591 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6592 src = build_simple_mem_ref_loc (loc, sarg);
6593 src = omp_build_component_ref (src, sf);
6594 if (use_pointer_for_field (decl, NULL))
6595 src = build_simple_mem_ref_loc (loc, src);
6596 }
6597 else
6598 src = decl;
6599 dst = build_simple_mem_ref_loc (loc, arg);
6600 dst = omp_build_component_ref (dst, f);
6601 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
6602 append_to_statement_list (t, &list);
6603 break;
6604 default:
6605 break;
6606 }
6607
6608 /* Last pass: handle VLA firstprivates. */
6609 if (tcctx.cb.decl_map)
6610 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6611 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6612 {
6613 tree ind, ptr, df;
6614
6615 decl = OMP_CLAUSE_DECL (c);
6616 if (!is_variable_sized (decl))
6617 continue;
6618 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6619 if (n == NULL)
6620 continue;
6621 f = (tree) n->value;
6622 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6623 gcc_assert (DECL_HAS_VALUE_EXPR_P (decl));
6624 ind = DECL_VALUE_EXPR (decl);
6625 gcc_assert (TREE_CODE (ind) == INDIRECT_REF);
6626 gcc_assert (DECL_P (TREE_OPERAND (ind, 0)));
6627 n = splay_tree_lookup (ctx->sfield_map,
6628 (splay_tree_key) TREE_OPERAND (ind, 0));
6629 sf = (tree) n->value;
6630 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6631 src = build_simple_mem_ref_loc (loc, sarg);
6632 src = omp_build_component_ref (src, sf);
6633 src = build_simple_mem_ref_loc (loc, src);
6634 dst = build_simple_mem_ref_loc (loc, arg);
6635 dst = omp_build_component_ref (dst, f);
6636 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6637 append_to_statement_list (t, &list);
6638 n = splay_tree_lookup (ctx->field_map,
6639 (splay_tree_key) TREE_OPERAND (ind, 0));
6640 df = (tree) n->value;
6641 df = *(tree *) pointer_map_contains (tcctx.cb.decl_map, df);
6642 ptr = build_simple_mem_ref_loc (loc, arg);
6643 ptr = omp_build_component_ref (ptr, df);
6644 t = build2 (MODIFY_EXPR, TREE_TYPE (ptr), ptr,
6645 build_fold_addr_expr_loc (loc, dst));
6646 append_to_statement_list (t, &list);
6647 }
6648
6649 t = build1 (RETURN_EXPR, void_type_node, NULL);
6650 append_to_statement_list (t, &list);
6651
6652 if (tcctx.cb.decl_map)
6653 pointer_map_destroy (tcctx.cb.decl_map);
6654 pop_gimplify_context (NULL);
6655 BIND_EXPR_BODY (bind) = list;
6656 pop_cfun ();
6657 }
6658
6659 /* Lower the OpenMP parallel or task directive in the current statement
6660 in GSI_P. CTX holds context information for the directive. */
6661
6662 static void
6663 lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6664 {
6665 tree clauses;
6666 tree child_fn, t;
6667 gimple stmt = gsi_stmt (*gsi_p);
6668 gimple par_bind, bind;
6669 gimple_seq par_body, olist, ilist, par_olist, par_ilist, new_body;
6670 struct gimplify_ctx gctx;
6671 location_t loc = gimple_location (stmt);
6672
6673 clauses = gimple_omp_taskreg_clauses (stmt);
6674 par_bind = gimple_seq_first_stmt (gimple_omp_body (stmt));
6675 par_body = gimple_bind_body (par_bind);
6676 child_fn = ctx->cb.dst_fn;
6677 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
6678 && !gimple_omp_parallel_combined_p (stmt))
6679 {
6680 struct walk_stmt_info wi;
6681 int ws_num = 0;
6682
6683 memset (&wi, 0, sizeof (wi));
6684 wi.info = &ws_num;
6685 wi.val_only = true;
6686 walk_gimple_seq (par_body, check_combined_parallel, NULL, &wi);
6687 if (ws_num == 1)
6688 gimple_omp_parallel_set_combined_p (stmt, true);
6689 }
6690 if (ctx->srecord_type)
6691 create_task_copyfn (stmt, ctx);
6692
6693 push_gimplify_context (&gctx);
6694
6695 par_olist = NULL;
6696 par_ilist = NULL;
6697 lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx);
6698 lower_omp (&par_body, ctx);
6699 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL)
6700 lower_reduction_clauses (clauses, &par_olist, ctx);
6701
6702 /* Declare all the variables created by mapping and the variables
6703 declared in the scope of the parallel body. */
6704 record_vars_into (ctx->block_vars, child_fn);
6705 record_vars_into (gimple_bind_vars (par_bind), child_fn);
6706
6707 if (ctx->record_type)
6708 {
6709 ctx->sender_decl
6710 = create_tmp_var (ctx->srecord_type ? ctx->srecord_type
6711 : ctx->record_type, ".omp_data_o");
6712 DECL_NAMELESS (ctx->sender_decl) = 1;
6713 TREE_ADDRESSABLE (ctx->sender_decl) = 1;
6714 gimple_omp_taskreg_set_data_arg (stmt, ctx->sender_decl);
6715 }
6716
6717 olist = NULL;
6718 ilist = NULL;
6719 lower_send_clauses (clauses, &ilist, &olist, ctx);
6720 lower_send_shared_vars (&ilist, &olist, ctx);
6721
6722 /* Once all the expansions are done, sequence all the different
6723 fragments inside gimple_omp_body. */
6724
6725 new_body = NULL;
6726
6727 if (ctx->record_type)
6728 {
6729 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
6730 /* fixup_child_record_type might have changed receiver_decl's type. */
6731 t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
6732 gimple_seq_add_stmt (&new_body,
6733 gimple_build_assign (ctx->receiver_decl, t));
6734 }
6735
6736 gimple_seq_add_seq (&new_body, par_ilist);
6737 gimple_seq_add_seq (&new_body, par_body);
6738 gimple_seq_add_seq (&new_body, par_olist);
6739 new_body = maybe_catch_exception (new_body);
6740 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
6741 gimple_omp_set_body (stmt, new_body);
6742
6743 bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
6744 gsi_replace (gsi_p, bind, true);
6745 gimple_bind_add_seq (bind, ilist);
6746 gimple_bind_add_stmt (bind, stmt);
6747 gimple_bind_add_seq (bind, olist);
6748
6749 pop_gimplify_context (NULL);
6750 }
6751
6752 /* Callback for lower_omp_1. Return non-NULL if *tp needs to be
6753 regimplified. If DATA is non-NULL, lower_omp_1 is outside
6754 of OpenMP context, but with task_shared_vars set. */
6755
6756 static tree
6757 lower_omp_regimplify_p (tree *tp, int *walk_subtrees,
6758 void *data)
6759 {
6760 tree t = *tp;
6761
6762 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
6763 if (TREE_CODE (t) == VAR_DECL && data == NULL && DECL_HAS_VALUE_EXPR_P (t))
6764 return t;
6765
6766 if (task_shared_vars
6767 && DECL_P (t)
6768 && bitmap_bit_p (task_shared_vars, DECL_UID (t)))
6769 return t;
6770
6771 /* If a global variable has been privatized, TREE_CONSTANT on
6772 ADDR_EXPR might be wrong. */
6773 if (data == NULL && TREE_CODE (t) == ADDR_EXPR)
6774 recompute_tree_invariant_for_addr_expr (t);
6775
6776 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
6777 return NULL_TREE;
6778 }
6779
6780 static void
6781 lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6782 {
6783 gimple stmt = gsi_stmt (*gsi_p);
6784 struct walk_stmt_info wi;
6785
6786 if (gimple_has_location (stmt))
6787 input_location = gimple_location (stmt);
6788
6789 if (task_shared_vars)
6790 memset (&wi, '\0', sizeof (wi));
6791
6792 /* If we have issued syntax errors, avoid doing any heavy lifting.
6793 Just replace the OpenMP directives with a NOP to avoid
6794 confusing RTL expansion. */
6795 if (seen_error () && is_gimple_omp (stmt))
6796 {
6797 gsi_replace (gsi_p, gimple_build_nop (), true);
6798 return;
6799 }
6800
6801 switch (gimple_code (stmt))
6802 {
6803 case GIMPLE_COND:
6804 if ((ctx || task_shared_vars)
6805 && (walk_tree (gimple_cond_lhs_ptr (stmt), lower_omp_regimplify_p,
6806 ctx ? NULL : &wi, NULL)
6807 || walk_tree (gimple_cond_rhs_ptr (stmt), lower_omp_regimplify_p,
6808 ctx ? NULL : &wi, NULL)))
6809 gimple_regimplify_operands (stmt, gsi_p);
6810 break;
6811 case GIMPLE_CATCH:
6812 lower_omp (gimple_catch_handler_ptr (stmt), ctx);
6813 break;
6814 case GIMPLE_EH_FILTER:
6815 lower_omp (gimple_eh_filter_failure_ptr (stmt), ctx);
6816 break;
6817 case GIMPLE_TRY:
6818 lower_omp (gimple_try_eval_ptr (stmt), ctx);
6819 lower_omp (gimple_try_cleanup_ptr (stmt), ctx);
6820 break;
6821 case GIMPLE_TRANSACTION:
6822 lower_omp (gimple_transaction_body_ptr (stmt), ctx);
6823 break;
6824 case GIMPLE_BIND:
6825 lower_omp (gimple_bind_body_ptr (stmt), ctx);
6826 break;
6827 case GIMPLE_OMP_PARALLEL:
6828 case GIMPLE_OMP_TASK:
6829 ctx = maybe_lookup_ctx (stmt);
6830 lower_omp_taskreg (gsi_p, ctx);
6831 break;
6832 case GIMPLE_OMP_FOR:
6833 ctx = maybe_lookup_ctx (stmt);
6834 gcc_assert (ctx);
6835 lower_omp_for (gsi_p, ctx);
6836 break;
6837 case GIMPLE_OMP_SECTIONS:
6838 ctx = maybe_lookup_ctx (stmt);
6839 gcc_assert (ctx);
6840 lower_omp_sections (gsi_p, ctx);
6841 break;
6842 case GIMPLE_OMP_SINGLE:
6843 ctx = maybe_lookup_ctx (stmt);
6844 gcc_assert (ctx);
6845 lower_omp_single (gsi_p, ctx);
6846 break;
6847 case GIMPLE_OMP_MASTER:
6848 ctx = maybe_lookup_ctx (stmt);
6849 gcc_assert (ctx);
6850 lower_omp_master (gsi_p, ctx);
6851 break;
6852 case GIMPLE_OMP_ORDERED:
6853 ctx = maybe_lookup_ctx (stmt);
6854 gcc_assert (ctx);
6855 lower_omp_ordered (gsi_p, ctx);
6856 break;
6857 case GIMPLE_OMP_CRITICAL:
6858 ctx = maybe_lookup_ctx (stmt);
6859 gcc_assert (ctx);
6860 lower_omp_critical (gsi_p, ctx);
6861 break;
6862 case GIMPLE_OMP_ATOMIC_LOAD:
6863 if ((ctx || task_shared_vars)
6864 && walk_tree (gimple_omp_atomic_load_rhs_ptr (stmt),
6865 lower_omp_regimplify_p, ctx ? NULL : &wi, NULL))
6866 gimple_regimplify_operands (stmt, gsi_p);
6867 break;
6868 default:
6869 if ((ctx || task_shared_vars)
6870 && walk_gimple_op (stmt, lower_omp_regimplify_p,
6871 ctx ? NULL : &wi))
6872 gimple_regimplify_operands (stmt, gsi_p);
6873 break;
6874 }
6875 }
6876
6877 static void
6878 lower_omp (gimple_seq *body, omp_context *ctx)
6879 {
6880 location_t saved_location = input_location;
6881 gimple_stmt_iterator gsi;
6882 for (gsi = gsi_start (*body); !gsi_end_p (gsi); gsi_next (&gsi))
6883 lower_omp_1 (&gsi, ctx);
6884 input_location = saved_location;
6885 }
6886 \f
6887 /* Main entry point. */
6888
6889 static unsigned int
6890 execute_lower_omp (void)
6891 {
6892 gimple_seq body;
6893
6894 /* This pass always runs, to provide PROP_gimple_lomp.
6895 But there is nothing to do unless -fopenmp is given. */
6896 if (flag_openmp == 0)
6897 return 0;
6898
6899 all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
6900 delete_omp_context);
6901
6902 body = gimple_body (current_function_decl);
6903 scan_omp (&body, NULL);
6904 gcc_assert (taskreg_nesting_level == 0);
6905
6906 if (all_contexts->root)
6907 {
6908 struct gimplify_ctx gctx;
6909
6910 if (task_shared_vars)
6911 push_gimplify_context (&gctx);
6912 lower_omp (&body, NULL);
6913 if (task_shared_vars)
6914 pop_gimplify_context (NULL);
6915 }
6916
6917 if (all_contexts)
6918 {
6919 splay_tree_delete (all_contexts);
6920 all_contexts = NULL;
6921 }
6922 BITMAP_FREE (task_shared_vars);
6923 return 0;
6924 }
6925
6926 struct gimple_opt_pass pass_lower_omp =
6927 {
6928 {
6929 GIMPLE_PASS,
6930 "omplower", /* name */
6931 OPTGROUP_NONE, /* optinfo_flags */
6932 NULL, /* gate */
6933 execute_lower_omp, /* execute */
6934 NULL, /* sub */
6935 NULL, /* next */
6936 0, /* static_pass_number */
6937 TV_NONE, /* tv_id */
6938 PROP_gimple_any, /* properties_required */
6939 PROP_gimple_lomp, /* properties_provided */
6940 0, /* properties_destroyed */
6941 0, /* todo_flags_start */
6942 0 /* todo_flags_finish */
6943 }
6944 };
6945 \f
6946 /* The following is a utility to diagnose OpenMP structured block violations.
6947 It is not part of the "omplower" pass, as that's invoked too late. It
6948 should be invoked by the respective front ends after gimplification. */
6949
6950 static splay_tree all_labels;
6951
6952 /* Check for mismatched contexts and generate an error if needed. Return
6953 true if an error is detected. */
6954
6955 static bool
6956 diagnose_sb_0 (gimple_stmt_iterator *gsi_p,
6957 gimple branch_ctx, gimple label_ctx)
6958 {
6959 if (label_ctx == branch_ctx)
6960 return false;
6961
6962
6963 /*
6964 Previously we kept track of the label's entire context in diagnose_sb_[12]
6965 so we could traverse it and issue a correct "exit" or "enter" error
6966 message upon a structured block violation.
6967
6968 We built the context by building a list with tree_cons'ing, but there is
6969 no easy counterpart in gimple tuples. It seems like far too much work
6970 for issuing exit/enter error messages. If someone really misses the
6971 distinct error message... patches welcome.
6972 */
6973
6974 #if 0
6975 /* Try to avoid confusing the user by producing and error message
6976 with correct "exit" or "enter" verbiage. We prefer "exit"
6977 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
6978 if (branch_ctx == NULL)
6979 exit_p = false;
6980 else
6981 {
6982 while (label_ctx)
6983 {
6984 if (TREE_VALUE (label_ctx) == branch_ctx)
6985 {
6986 exit_p = false;
6987 break;
6988 }
6989 label_ctx = TREE_CHAIN (label_ctx);
6990 }
6991 }
6992
6993 if (exit_p)
6994 error ("invalid exit from OpenMP structured block");
6995 else
6996 error ("invalid entry to OpenMP structured block");
6997 #endif
6998
6999 /* If it's obvious we have an invalid entry, be specific about the error. */
7000 if (branch_ctx == NULL)
7001 error ("invalid entry to OpenMP structured block");
7002 else
7003 /* Otherwise, be vague and lazy, but efficient. */
7004 error ("invalid branch to/from an OpenMP structured block");
7005
7006 gsi_replace (gsi_p, gimple_build_nop (), false);
7007 return true;
7008 }
7009
7010 /* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
7011 where each label is found. */
7012
7013 static tree
7014 diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
7015 struct walk_stmt_info *wi)
7016 {
7017 gimple context = (gimple) wi->info;
7018 gimple inner_context;
7019 gimple stmt = gsi_stmt (*gsi_p);
7020
7021 *handled_ops_p = true;
7022
7023 switch (gimple_code (stmt))
7024 {
7025 WALK_SUBSTMTS;
7026
7027 case GIMPLE_OMP_PARALLEL:
7028 case GIMPLE_OMP_TASK:
7029 case GIMPLE_OMP_SECTIONS:
7030 case GIMPLE_OMP_SINGLE:
7031 case GIMPLE_OMP_SECTION:
7032 case GIMPLE_OMP_MASTER:
7033 case GIMPLE_OMP_ORDERED:
7034 case GIMPLE_OMP_CRITICAL:
7035 /* The minimal context here is just the current OMP construct. */
7036 inner_context = stmt;
7037 wi->info = inner_context;
7038 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
7039 wi->info = context;
7040 break;
7041
7042 case GIMPLE_OMP_FOR:
7043 inner_context = stmt;
7044 wi->info = inner_context;
7045 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
7046 walk them. */
7047 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
7048 diagnose_sb_1, NULL, wi);
7049 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
7050 wi->info = context;
7051 break;
7052
7053 case GIMPLE_LABEL:
7054 splay_tree_insert (all_labels, (splay_tree_key) gimple_label_label (stmt),
7055 (splay_tree_value) context);
7056 break;
7057
7058 default:
7059 break;
7060 }
7061
7062 return NULL_TREE;
7063 }
7064
7065 /* Pass 2: Check each branch and see if its context differs from that of
7066 the destination label's context. */
7067
7068 static tree
7069 diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
7070 struct walk_stmt_info *wi)
7071 {
7072 gimple context = (gimple) wi->info;
7073 splay_tree_node n;
7074 gimple stmt = gsi_stmt (*gsi_p);
7075
7076 *handled_ops_p = true;
7077
7078 switch (gimple_code (stmt))
7079 {
7080 WALK_SUBSTMTS;
7081
7082 case GIMPLE_OMP_PARALLEL:
7083 case GIMPLE_OMP_TASK:
7084 case GIMPLE_OMP_SECTIONS:
7085 case GIMPLE_OMP_SINGLE:
7086 case GIMPLE_OMP_SECTION:
7087 case GIMPLE_OMP_MASTER:
7088 case GIMPLE_OMP_ORDERED:
7089 case GIMPLE_OMP_CRITICAL:
7090 wi->info = stmt;
7091 walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), diagnose_sb_2, NULL, wi);
7092 wi->info = context;
7093 break;
7094
7095 case GIMPLE_OMP_FOR:
7096 wi->info = stmt;
7097 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
7098 walk them. */
7099 walk_gimple_seq_mod (gimple_omp_for_pre_body_ptr (stmt),
7100 diagnose_sb_2, NULL, wi);
7101 walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), diagnose_sb_2, NULL, wi);
7102 wi->info = context;
7103 break;
7104
7105 case GIMPLE_COND:
7106 {
7107 tree lab = gimple_cond_true_label (stmt);
7108 if (lab)
7109 {
7110 n = splay_tree_lookup (all_labels,
7111 (splay_tree_key) lab);
7112 diagnose_sb_0 (gsi_p, context,
7113 n ? (gimple) n->value : NULL);
7114 }
7115 lab = gimple_cond_false_label (stmt);
7116 if (lab)
7117 {
7118 n = splay_tree_lookup (all_labels,
7119 (splay_tree_key) lab);
7120 diagnose_sb_0 (gsi_p, context,
7121 n ? (gimple) n->value : NULL);
7122 }
7123 }
7124 break;
7125
7126 case GIMPLE_GOTO:
7127 {
7128 tree lab = gimple_goto_dest (stmt);
7129 if (TREE_CODE (lab) != LABEL_DECL)
7130 break;
7131
7132 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
7133 diagnose_sb_0 (gsi_p, context, n ? (gimple) n->value : NULL);
7134 }
7135 break;
7136
7137 case GIMPLE_SWITCH:
7138 {
7139 unsigned int i;
7140 for (i = 0; i < gimple_switch_num_labels (stmt); ++i)
7141 {
7142 tree lab = CASE_LABEL (gimple_switch_label (stmt, i));
7143 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
7144 if (n && diagnose_sb_0 (gsi_p, context, (gimple) n->value))
7145 break;
7146 }
7147 }
7148 break;
7149
7150 case GIMPLE_RETURN:
7151 diagnose_sb_0 (gsi_p, context, NULL);
7152 break;
7153
7154 default:
7155 break;
7156 }
7157
7158 return NULL_TREE;
7159 }
7160
7161 static unsigned int
7162 diagnose_omp_structured_block_errors (void)
7163 {
7164 struct walk_stmt_info wi;
7165 gimple_seq body = gimple_body (current_function_decl);
7166
7167 all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
7168
7169 memset (&wi, 0, sizeof (wi));
7170 walk_gimple_seq (body, diagnose_sb_1, NULL, &wi);
7171
7172 memset (&wi, 0, sizeof (wi));
7173 wi.want_locations = true;
7174 walk_gimple_seq_mod (&body, diagnose_sb_2, NULL, &wi);
7175
7176 gimple_set_body (current_function_decl, body);
7177
7178 splay_tree_delete (all_labels);
7179 all_labels = NULL;
7180
7181 return 0;
7182 }
7183
7184 static bool
7185 gate_diagnose_omp_blocks (void)
7186 {
7187 return flag_openmp != 0;
7188 }
7189
7190 struct gimple_opt_pass pass_diagnose_omp_blocks =
7191 {
7192 {
7193 GIMPLE_PASS,
7194 "*diagnose_omp_blocks", /* name */
7195 OPTGROUP_NONE, /* optinfo_flags */
7196 gate_diagnose_omp_blocks, /* gate */
7197 diagnose_omp_structured_block_errors, /* execute */
7198 NULL, /* sub */
7199 NULL, /* next */
7200 0, /* static_pass_number */
7201 TV_NONE, /* tv_id */
7202 PROP_gimple_any, /* properties_required */
7203 0, /* properties_provided */
7204 0, /* properties_destroyed */
7205 0, /* todo_flags_start */
7206 0, /* todo_flags_finish */
7207 }
7208 };
7209
7210 #include "gt-omp-low.h"