re PR middle-end/56883 (error openmp parallel for order)
[gcc.git] / gcc / omp-low.c
1 /* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
5
6 Copyright (C) 2005-2013 Free Software Foundation, Inc.
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
14
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "tree.h"
29 #include "rtl.h"
30 #include "gimple.h"
31 #include "tree-iterator.h"
32 #include "tree-inline.h"
33 #include "langhooks.h"
34 #include "diagnostic-core.h"
35 #include "tree-flow.h"
36 #include "flags.h"
37 #include "function.h"
38 #include "expr.h"
39 #include "tree-pass.h"
40 #include "ggc.h"
41 #include "except.h"
42 #include "splay-tree.h"
43 #include "optabs.h"
44 #include "cfgloop.h"
45
46
47 /* Lowering of OpenMP parallel and workshare constructs proceeds in two
48 phases. The first phase scans the function looking for OMP statements
49 and then for variables that must be replaced to satisfy data sharing
50 clauses. The second phase expands code for the constructs, as well as
51 re-gimplifying things when variables have been replaced with complex
52 expressions.
53
54 Final code generation is done by pass_expand_omp. The flowgraph is
55 scanned for parallel regions which are then moved to a new
56 function, to be invoked by the thread library. */
57
58 /* Context structure. Used to store information about each parallel
59 directive in the code. */
60
61 typedef struct omp_context
62 {
63 /* This field must be at the beginning, as we do "inheritance": Some
64 callback functions for tree-inline.c (e.g., omp_copy_decl)
65 receive a copy_body_data pointer that is up-casted to an
66 omp_context pointer. */
67 copy_body_data cb;
68
69 /* The tree of contexts corresponding to the encountered constructs. */
70 struct omp_context *outer;
71 gimple stmt;
72
73 /* Map variables to fields in a structure that allows communication
74 between sending and receiving threads. */
75 splay_tree field_map;
76 tree record_type;
77 tree sender_decl;
78 tree receiver_decl;
79
80 /* These are used just by task contexts, if task firstprivate fn is
81 needed. srecord_type is used to communicate from the thread
82 that encountered the task construct to task firstprivate fn,
83 record_type is allocated by GOMP_task, initialized by task firstprivate
84 fn and passed to the task body fn. */
85 splay_tree sfield_map;
86 tree srecord_type;
87
88 /* A chain of variables to add to the top-level block surrounding the
89 construct. In the case of a parallel, this is in the child function. */
90 tree block_vars;
91
92 /* What to do with variables with implicitly determined sharing
93 attributes. */
94 enum omp_clause_default_kind default_kind;
95
96 /* Nesting depth of this context. Used to beautify error messages re
97 invalid gotos. The outermost ctx is depth 1, with depth 0 being
98 reserved for the main body of the function. */
99 int depth;
100
101 /* True if this parallel directive is nested within another. */
102 bool is_nested;
103 } omp_context;
104
105
106 struct omp_for_data_loop
107 {
108 tree v, n1, n2, step;
109 enum tree_code cond_code;
110 };
111
112 /* A structure describing the main elements of a parallel loop. */
113
114 struct omp_for_data
115 {
116 struct omp_for_data_loop loop;
117 tree chunk_size;
118 gimple for_stmt;
119 tree pre, iter_type;
120 int collapse;
121 bool have_nowait, have_ordered;
122 enum omp_clause_schedule_kind sched_kind;
123 struct omp_for_data_loop *loops;
124 };
125
126
127 static splay_tree all_contexts;
128 static int taskreg_nesting_level;
129 struct omp_region *root_omp_region;
130 static bitmap task_shared_vars;
131
132 static void scan_omp (gimple_seq *, omp_context *);
133 static tree scan_omp_1_op (tree *, int *, void *);
134
135 #define WALK_SUBSTMTS \
136 case GIMPLE_BIND: \
137 case GIMPLE_TRY: \
138 case GIMPLE_CATCH: \
139 case GIMPLE_EH_FILTER: \
140 case GIMPLE_TRANSACTION: \
141 /* The sub-statements for these should be walked. */ \
142 *handled_ops_p = false; \
143 break;
144
145 /* Convenience function for calling scan_omp_1_op on tree operands. */
146
147 static inline tree
148 scan_omp_op (tree *tp, omp_context *ctx)
149 {
150 struct walk_stmt_info wi;
151
152 memset (&wi, 0, sizeof (wi));
153 wi.info = ctx;
154 wi.want_locations = true;
155
156 return walk_tree (tp, scan_omp_1_op, &wi, NULL);
157 }
158
159 static void lower_omp (gimple_seq *, omp_context *);
160 static tree lookup_decl_in_outer_ctx (tree, omp_context *);
161 static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
162
163 /* Find an OpenMP clause of type KIND within CLAUSES. */
164
165 tree
166 find_omp_clause (tree clauses, enum omp_clause_code kind)
167 {
168 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
169 if (OMP_CLAUSE_CODE (clauses) == kind)
170 return clauses;
171
172 return NULL_TREE;
173 }
174
175 /* Return true if CTX is for an omp parallel. */
176
177 static inline bool
178 is_parallel_ctx (omp_context *ctx)
179 {
180 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL;
181 }
182
183
184 /* Return true if CTX is for an omp task. */
185
186 static inline bool
187 is_task_ctx (omp_context *ctx)
188 {
189 return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
190 }
191
192
193 /* Return true if CTX is for an omp parallel or omp task. */
194
195 static inline bool
196 is_taskreg_ctx (omp_context *ctx)
197 {
198 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL
199 || gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
200 }
201
202
203 /* Return true if REGION is a combined parallel+workshare region. */
204
205 static inline bool
206 is_combined_parallel (struct omp_region *region)
207 {
208 return region->is_combined_parallel;
209 }
210
211
212 /* Extract the header elements of parallel loop FOR_STMT and store
213 them into *FD. */
214
215 static void
216 extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
217 struct omp_for_data_loop *loops)
218 {
219 tree t, var, *collapse_iter, *collapse_count;
220 tree count = NULL_TREE, iter_type = long_integer_type_node;
221 struct omp_for_data_loop *loop;
222 int i;
223 struct omp_for_data_loop dummy_loop;
224 location_t loc = gimple_location (for_stmt);
225
226 fd->for_stmt = for_stmt;
227 fd->pre = NULL;
228 fd->collapse = gimple_omp_for_collapse (for_stmt);
229 if (fd->collapse > 1)
230 fd->loops = loops;
231 else
232 fd->loops = &fd->loop;
233
234 fd->have_nowait = fd->have_ordered = false;
235 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
236 fd->chunk_size = NULL_TREE;
237 collapse_iter = NULL;
238 collapse_count = NULL;
239
240 for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
241 switch (OMP_CLAUSE_CODE (t))
242 {
243 case OMP_CLAUSE_NOWAIT:
244 fd->have_nowait = true;
245 break;
246 case OMP_CLAUSE_ORDERED:
247 fd->have_ordered = true;
248 break;
249 case OMP_CLAUSE_SCHEDULE:
250 fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
251 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
252 break;
253 case OMP_CLAUSE_COLLAPSE:
254 if (fd->collapse > 1)
255 {
256 collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
257 collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
258 }
259 default:
260 break;
261 }
262
263 /* FIXME: for now map schedule(auto) to schedule(static).
264 There should be analysis to determine whether all iterations
265 are approximately the same amount of work (then schedule(static)
266 is best) or if it varies (then schedule(dynamic,N) is better). */
267 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
268 {
269 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
270 gcc_assert (fd->chunk_size == NULL);
271 }
272 gcc_assert (fd->collapse == 1 || collapse_iter != NULL);
273 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
274 gcc_assert (fd->chunk_size == NULL);
275 else if (fd->chunk_size == NULL)
276 {
277 /* We only need to compute a default chunk size for ordered
278 static loops and dynamic loops. */
279 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
280 || fd->have_ordered
281 || fd->collapse > 1)
282 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
283 ? integer_zero_node : integer_one_node;
284 }
285
286 for (i = 0; i < fd->collapse; i++)
287 {
288 if (fd->collapse == 1)
289 loop = &fd->loop;
290 else if (loops != NULL)
291 loop = loops + i;
292 else
293 loop = &dummy_loop;
294
295
296 loop->v = gimple_omp_for_index (for_stmt, i);
297 gcc_assert (SSA_VAR_P (loop->v));
298 gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
299 || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
300 var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
301 loop->n1 = gimple_omp_for_initial (for_stmt, i);
302
303 loop->cond_code = gimple_omp_for_cond (for_stmt, i);
304 loop->n2 = gimple_omp_for_final (for_stmt, i);
305 switch (loop->cond_code)
306 {
307 case LT_EXPR:
308 case GT_EXPR:
309 break;
310 case LE_EXPR:
311 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
312 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, 1);
313 else
314 loop->n2 = fold_build2_loc (loc,
315 PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
316 build_int_cst (TREE_TYPE (loop->n2), 1));
317 loop->cond_code = LT_EXPR;
318 break;
319 case GE_EXPR:
320 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
321 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, -1);
322 else
323 loop->n2 = fold_build2_loc (loc,
324 MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
325 build_int_cst (TREE_TYPE (loop->n2), 1));
326 loop->cond_code = GT_EXPR;
327 break;
328 default:
329 gcc_unreachable ();
330 }
331
332 t = gimple_omp_for_incr (for_stmt, i);
333 gcc_assert (TREE_OPERAND (t, 0) == var);
334 switch (TREE_CODE (t))
335 {
336 case PLUS_EXPR:
337 loop->step = TREE_OPERAND (t, 1);
338 break;
339 case POINTER_PLUS_EXPR:
340 loop->step = fold_convert (ssizetype, TREE_OPERAND (t, 1));
341 break;
342 case MINUS_EXPR:
343 loop->step = TREE_OPERAND (t, 1);
344 loop->step = fold_build1_loc (loc,
345 NEGATE_EXPR, TREE_TYPE (loop->step),
346 loop->step);
347 break;
348 default:
349 gcc_unreachable ();
350 }
351
352 if (iter_type != long_long_unsigned_type_node)
353 {
354 if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
355 iter_type = long_long_unsigned_type_node;
356 else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
357 && TYPE_PRECISION (TREE_TYPE (loop->v))
358 >= TYPE_PRECISION (iter_type))
359 {
360 tree n;
361
362 if (loop->cond_code == LT_EXPR)
363 n = fold_build2_loc (loc,
364 PLUS_EXPR, TREE_TYPE (loop->v),
365 loop->n2, loop->step);
366 else
367 n = loop->n1;
368 if (TREE_CODE (n) != INTEGER_CST
369 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
370 iter_type = long_long_unsigned_type_node;
371 }
372 else if (TYPE_PRECISION (TREE_TYPE (loop->v))
373 > TYPE_PRECISION (iter_type))
374 {
375 tree n1, n2;
376
377 if (loop->cond_code == LT_EXPR)
378 {
379 n1 = loop->n1;
380 n2 = fold_build2_loc (loc,
381 PLUS_EXPR, TREE_TYPE (loop->v),
382 loop->n2, loop->step);
383 }
384 else
385 {
386 n1 = fold_build2_loc (loc,
387 MINUS_EXPR, TREE_TYPE (loop->v),
388 loop->n2, loop->step);
389 n2 = loop->n1;
390 }
391 if (TREE_CODE (n1) != INTEGER_CST
392 || TREE_CODE (n2) != INTEGER_CST
393 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
394 || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
395 iter_type = long_long_unsigned_type_node;
396 }
397 }
398
399 if (collapse_count && *collapse_count == NULL)
400 {
401 if ((i == 0 || count != NULL_TREE)
402 && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
403 && TREE_CONSTANT (loop->n1)
404 && TREE_CONSTANT (loop->n2)
405 && TREE_CODE (loop->step) == INTEGER_CST)
406 {
407 tree itype = TREE_TYPE (loop->v);
408
409 if (POINTER_TYPE_P (itype))
410 itype = signed_type_for (itype);
411 t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
412 t = fold_build2_loc (loc,
413 PLUS_EXPR, itype,
414 fold_convert_loc (loc, itype, loop->step), t);
415 t = fold_build2_loc (loc, PLUS_EXPR, itype, t,
416 fold_convert_loc (loc, itype, loop->n2));
417 t = fold_build2_loc (loc, MINUS_EXPR, itype, t,
418 fold_convert_loc (loc, itype, loop->n1));
419 if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
420 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype,
421 fold_build1_loc (loc, NEGATE_EXPR, itype, t),
422 fold_build1_loc (loc, NEGATE_EXPR, itype,
423 fold_convert_loc (loc, itype,
424 loop->step)));
425 else
426 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, t,
427 fold_convert_loc (loc, itype, loop->step));
428 t = fold_convert_loc (loc, long_long_unsigned_type_node, t);
429 if (count != NULL_TREE)
430 count = fold_build2_loc (loc,
431 MULT_EXPR, long_long_unsigned_type_node,
432 count, t);
433 else
434 count = t;
435 if (TREE_CODE (count) != INTEGER_CST)
436 count = NULL_TREE;
437 }
438 else
439 count = NULL_TREE;
440 }
441 }
442
443 if (count)
444 {
445 if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
446 iter_type = long_long_unsigned_type_node;
447 else
448 iter_type = long_integer_type_node;
449 }
450 else if (collapse_iter && *collapse_iter != NULL)
451 iter_type = TREE_TYPE (*collapse_iter);
452 fd->iter_type = iter_type;
453 if (collapse_iter && *collapse_iter == NULL)
454 *collapse_iter = create_tmp_var (iter_type, ".iter");
455 if (collapse_count && *collapse_count == NULL)
456 {
457 if (count)
458 *collapse_count = fold_convert_loc (loc, iter_type, count);
459 else
460 *collapse_count = create_tmp_var (iter_type, ".count");
461 }
462
463 if (fd->collapse > 1)
464 {
465 fd->loop.v = *collapse_iter;
466 fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
467 fd->loop.n2 = *collapse_count;
468 fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
469 fd->loop.cond_code = LT_EXPR;
470 }
471 }
472
473
474 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
475 is the immediate dominator of PAR_ENTRY_BB, return true if there
476 are no data dependencies that would prevent expanding the parallel
477 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
478
479 When expanding a combined parallel+workshare region, the call to
480 the child function may need additional arguments in the case of
481 GIMPLE_OMP_FOR regions. In some cases, these arguments are
482 computed out of variables passed in from the parent to the child
483 via 'struct .omp_data_s'. For instance:
484
485 #pragma omp parallel for schedule (guided, i * 4)
486 for (j ...)
487
488 Is lowered into:
489
490 # BLOCK 2 (PAR_ENTRY_BB)
491 .omp_data_o.i = i;
492 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
493
494 # BLOCK 3 (WS_ENTRY_BB)
495 .omp_data_i = &.omp_data_o;
496 D.1667 = .omp_data_i->i;
497 D.1598 = D.1667 * 4;
498 #pragma omp for schedule (guided, D.1598)
499
500 When we outline the parallel region, the call to the child function
501 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
502 that value is computed *after* the call site. So, in principle we
503 cannot do the transformation.
504
505 To see whether the code in WS_ENTRY_BB blocks the combined
506 parallel+workshare call, we collect all the variables used in the
507 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
508 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
509 call.
510
511 FIXME. If we had the SSA form built at this point, we could merely
512 hoist the code in block 3 into block 2 and be done with it. But at
513 this point we don't have dataflow information and though we could
514 hack something up here, it is really not worth the aggravation. */
515
516 static bool
517 workshare_safe_to_combine_p (basic_block ws_entry_bb)
518 {
519 struct omp_for_data fd;
520 gimple ws_stmt = last_stmt (ws_entry_bb);
521
522 if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
523 return true;
524
525 gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR);
526
527 extract_omp_for_data (ws_stmt, &fd, NULL);
528
529 if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
530 return false;
531 if (fd.iter_type != long_integer_type_node)
532 return false;
533
534 /* FIXME. We give up too easily here. If any of these arguments
535 are not constants, they will likely involve variables that have
536 been mapped into fields of .omp_data_s for sharing with the child
537 function. With appropriate data flow, it would be possible to
538 see through this. */
539 if (!is_gimple_min_invariant (fd.loop.n1)
540 || !is_gimple_min_invariant (fd.loop.n2)
541 || !is_gimple_min_invariant (fd.loop.step)
542 || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
543 return false;
544
545 return true;
546 }
547
548
549 /* Collect additional arguments needed to emit a combined
550 parallel+workshare call. WS_STMT is the workshare directive being
551 expanded. */
552
553 static vec<tree, va_gc> *
554 get_ws_args_for (gimple ws_stmt)
555 {
556 tree t;
557 location_t loc = gimple_location (ws_stmt);
558 vec<tree, va_gc> *ws_args;
559
560 if (gimple_code (ws_stmt) == GIMPLE_OMP_FOR)
561 {
562 struct omp_for_data fd;
563
564 extract_omp_for_data (ws_stmt, &fd, NULL);
565
566 vec_alloc (ws_args, 3 + (fd.chunk_size != 0));
567
568 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n1);
569 ws_args->quick_push (t);
570
571 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n2);
572 ws_args->quick_push (t);
573
574 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.step);
575 ws_args->quick_push (t);
576
577 if (fd.chunk_size)
578 {
579 t = fold_convert_loc (loc, long_integer_type_node, fd.chunk_size);
580 ws_args->quick_push (t);
581 }
582
583 return ws_args;
584 }
585 else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
586 {
587 /* Number of sections is equal to the number of edges from the
588 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
589 the exit of the sections region. */
590 basic_block bb = single_succ (gimple_bb (ws_stmt));
591 t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
592 vec_alloc (ws_args, 1);
593 ws_args->quick_push (t);
594 return ws_args;
595 }
596
597 gcc_unreachable ();
598 }
599
600
601 /* Discover whether REGION is a combined parallel+workshare region. */
602
603 static void
604 determine_parallel_type (struct omp_region *region)
605 {
606 basic_block par_entry_bb, par_exit_bb;
607 basic_block ws_entry_bb, ws_exit_bb;
608
609 if (region == NULL || region->inner == NULL
610 || region->exit == NULL || region->inner->exit == NULL
611 || region->inner->cont == NULL)
612 return;
613
614 /* We only support parallel+for and parallel+sections. */
615 if (region->type != GIMPLE_OMP_PARALLEL
616 || (region->inner->type != GIMPLE_OMP_FOR
617 && region->inner->type != GIMPLE_OMP_SECTIONS))
618 return;
619
620 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
621 WS_EXIT_BB -> PAR_EXIT_BB. */
622 par_entry_bb = region->entry;
623 par_exit_bb = region->exit;
624 ws_entry_bb = region->inner->entry;
625 ws_exit_bb = region->inner->exit;
626
627 if (single_succ (par_entry_bb) == ws_entry_bb
628 && single_succ (ws_exit_bb) == par_exit_bb
629 && workshare_safe_to_combine_p (ws_entry_bb)
630 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb))
631 || (last_and_only_stmt (ws_entry_bb)
632 && last_and_only_stmt (par_exit_bb))))
633 {
634 gimple ws_stmt = last_stmt (ws_entry_bb);
635
636 if (region->inner->type == GIMPLE_OMP_FOR)
637 {
638 /* If this is a combined parallel loop, we need to determine
639 whether or not to use the combined library calls. There
640 are two cases where we do not apply the transformation:
641 static loops and any kind of ordered loop. In the first
642 case, we already open code the loop so there is no need
643 to do anything else. In the latter case, the combined
644 parallel loop call would still need extra synchronization
645 to implement ordered semantics, so there would not be any
646 gain in using the combined call. */
647 tree clauses = gimple_omp_for_clauses (ws_stmt);
648 tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
649 if (c == NULL
650 || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC
651 || find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
652 {
653 region->is_combined_parallel = false;
654 region->inner->is_combined_parallel = false;
655 return;
656 }
657 }
658
659 region->is_combined_parallel = true;
660 region->inner->is_combined_parallel = true;
661 region->ws_args = get_ws_args_for (ws_stmt);
662 }
663 }
664
665
666 /* Return true if EXPR is variable sized. */
667
668 static inline bool
669 is_variable_sized (const_tree expr)
670 {
671 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
672 }
673
674 /* Return true if DECL is a reference type. */
675
676 static inline bool
677 is_reference (tree decl)
678 {
679 return lang_hooks.decls.omp_privatize_by_reference (decl);
680 }
681
682 /* Lookup variables in the decl or field splay trees. The "maybe" form
683 allows for the variable form to not have been entered, otherwise we
684 assert that the variable must have been entered. */
685
686 static inline tree
687 lookup_decl (tree var, omp_context *ctx)
688 {
689 tree *n;
690 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
691 return *n;
692 }
693
694 static inline tree
695 maybe_lookup_decl (const_tree var, omp_context *ctx)
696 {
697 tree *n;
698 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
699 return n ? *n : NULL_TREE;
700 }
701
702 static inline tree
703 lookup_field (tree var, omp_context *ctx)
704 {
705 splay_tree_node n;
706 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
707 return (tree) n->value;
708 }
709
710 static inline tree
711 lookup_sfield (tree var, omp_context *ctx)
712 {
713 splay_tree_node n;
714 n = splay_tree_lookup (ctx->sfield_map
715 ? ctx->sfield_map : ctx->field_map,
716 (splay_tree_key) var);
717 return (tree) n->value;
718 }
719
720 static inline tree
721 maybe_lookup_field (tree var, omp_context *ctx)
722 {
723 splay_tree_node n;
724 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
725 return n ? (tree) n->value : NULL_TREE;
726 }
727
728 /* Return true if DECL should be copied by pointer. SHARED_CTX is
729 the parallel context if DECL is to be shared. */
730
731 static bool
732 use_pointer_for_field (tree decl, omp_context *shared_ctx)
733 {
734 if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
735 return true;
736
737 /* We can only use copy-in/copy-out semantics for shared variables
738 when we know the value is not accessible from an outer scope. */
739 if (shared_ctx)
740 {
741 /* ??? Trivially accessible from anywhere. But why would we even
742 be passing an address in this case? Should we simply assert
743 this to be false, or should we have a cleanup pass that removes
744 these from the list of mappings? */
745 if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
746 return true;
747
748 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
749 without analyzing the expression whether or not its location
750 is accessible to anyone else. In the case of nested parallel
751 regions it certainly may be. */
752 if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
753 return true;
754
755 /* Do not use copy-in/copy-out for variables that have their
756 address taken. */
757 if (TREE_ADDRESSABLE (decl))
758 return true;
759
760 /* lower_send_shared_vars only uses copy-in, but not copy-out
761 for these. */
762 if (TREE_READONLY (decl)
763 || ((TREE_CODE (decl) == RESULT_DECL
764 || TREE_CODE (decl) == PARM_DECL)
765 && DECL_BY_REFERENCE (decl)))
766 return false;
767
768 /* Disallow copy-in/out in nested parallel if
769 decl is shared in outer parallel, otherwise
770 each thread could store the shared variable
771 in its own copy-in location, making the
772 variable no longer really shared. */
773 if (shared_ctx->is_nested)
774 {
775 omp_context *up;
776
777 for (up = shared_ctx->outer; up; up = up->outer)
778 if (is_taskreg_ctx (up) && maybe_lookup_decl (decl, up))
779 break;
780
781 if (up)
782 {
783 tree c;
784
785 for (c = gimple_omp_taskreg_clauses (up->stmt);
786 c; c = OMP_CLAUSE_CHAIN (c))
787 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
788 && OMP_CLAUSE_DECL (c) == decl)
789 break;
790
791 if (c)
792 goto maybe_mark_addressable_and_ret;
793 }
794 }
795
796 /* For tasks avoid using copy-in/out. As tasks can be
797 deferred or executed in different thread, when GOMP_task
798 returns, the task hasn't necessarily terminated. */
799 if (is_task_ctx (shared_ctx))
800 {
801 tree outer;
802 maybe_mark_addressable_and_ret:
803 outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
804 if (is_gimple_reg (outer))
805 {
806 /* Taking address of OUTER in lower_send_shared_vars
807 might need regimplification of everything that uses the
808 variable. */
809 if (!task_shared_vars)
810 task_shared_vars = BITMAP_ALLOC (NULL);
811 bitmap_set_bit (task_shared_vars, DECL_UID (outer));
812 TREE_ADDRESSABLE (outer) = 1;
813 }
814 return true;
815 }
816 }
817
818 return false;
819 }
820
821 /* Create a new VAR_DECL and copy information from VAR to it. */
822
823 tree
824 copy_var_decl (tree var, tree name, tree type)
825 {
826 tree copy = build_decl (DECL_SOURCE_LOCATION (var), VAR_DECL, name, type);
827
828 TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (var);
829 TREE_THIS_VOLATILE (copy) = TREE_THIS_VOLATILE (var);
830 DECL_GIMPLE_REG_P (copy) = DECL_GIMPLE_REG_P (var);
831 DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (var);
832 DECL_IGNORED_P (copy) = DECL_IGNORED_P (var);
833 DECL_CONTEXT (copy) = DECL_CONTEXT (var);
834 TREE_USED (copy) = 1;
835 DECL_SEEN_IN_BIND_EXPR_P (copy) = 1;
836
837 return copy;
838 }
839
840 /* Construct a new automatic decl similar to VAR. */
841
842 static tree
843 omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
844 {
845 tree copy = copy_var_decl (var, name, type);
846
847 DECL_CONTEXT (copy) = current_function_decl;
848 DECL_CHAIN (copy) = ctx->block_vars;
849 ctx->block_vars = copy;
850
851 return copy;
852 }
853
854 static tree
855 omp_copy_decl_1 (tree var, omp_context *ctx)
856 {
857 return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
858 }
859
860 /* Build COMPONENT_REF and set TREE_THIS_VOLATILE and TREE_READONLY on it
861 as appropriate. */
862 static tree
863 omp_build_component_ref (tree obj, tree field)
864 {
865 tree ret = build3 (COMPONENT_REF, TREE_TYPE (field), obj, field, NULL);
866 if (TREE_THIS_VOLATILE (field))
867 TREE_THIS_VOLATILE (ret) |= 1;
868 if (TREE_READONLY (field))
869 TREE_READONLY (ret) |= 1;
870 return ret;
871 }
872
873 /* Build tree nodes to access the field for VAR on the receiver side. */
874
875 static tree
876 build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
877 {
878 tree x, field = lookup_field (var, ctx);
879
880 /* If the receiver record type was remapped in the child function,
881 remap the field into the new record type. */
882 x = maybe_lookup_field (field, ctx);
883 if (x != NULL)
884 field = x;
885
886 x = build_simple_mem_ref (ctx->receiver_decl);
887 x = omp_build_component_ref (x, field);
888 if (by_ref)
889 x = build_simple_mem_ref (x);
890
891 return x;
892 }
893
894 /* Build tree nodes to access VAR in the scope outer to CTX. In the case
895 of a parallel, this is a component reference; for workshare constructs
896 this is some variable. */
897
898 static tree
899 build_outer_var_ref (tree var, omp_context *ctx)
900 {
901 tree x;
902
903 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
904 x = var;
905 else if (is_variable_sized (var))
906 {
907 x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
908 x = build_outer_var_ref (x, ctx);
909 x = build_simple_mem_ref (x);
910 }
911 else if (is_taskreg_ctx (ctx))
912 {
913 bool by_ref = use_pointer_for_field (var, NULL);
914 x = build_receiver_ref (var, by_ref, ctx);
915 }
916 else if (ctx->outer)
917 x = lookup_decl (var, ctx->outer);
918 else if (is_reference (var))
919 /* This can happen with orphaned constructs. If var is reference, it is
920 possible it is shared and as such valid. */
921 x = var;
922 else
923 gcc_unreachable ();
924
925 if (is_reference (var))
926 x = build_simple_mem_ref (x);
927
928 return x;
929 }
930
931 /* Build tree nodes to access the field for VAR on the sender side. */
932
933 static tree
934 build_sender_ref (tree var, omp_context *ctx)
935 {
936 tree field = lookup_sfield (var, ctx);
937 return omp_build_component_ref (ctx->sender_decl, field);
938 }
939
940 /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
941
942 static void
943 install_var_field (tree var, bool by_ref, int mask, omp_context *ctx)
944 {
945 tree field, type, sfield = NULL_TREE;
946
947 gcc_assert ((mask & 1) == 0
948 || !splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
949 gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
950 || !splay_tree_lookup (ctx->sfield_map, (splay_tree_key) var));
951
952 type = TREE_TYPE (var);
953 if (by_ref)
954 type = build_pointer_type (type);
955 else if ((mask & 3) == 1 && is_reference (var))
956 type = TREE_TYPE (type);
957
958 field = build_decl (DECL_SOURCE_LOCATION (var),
959 FIELD_DECL, DECL_NAME (var), type);
960
961 /* Remember what variable this field was created for. This does have a
962 side effect of making dwarf2out ignore this member, so for helpful
963 debugging we clear it later in delete_omp_context. */
964 DECL_ABSTRACT_ORIGIN (field) = var;
965 if (type == TREE_TYPE (var))
966 {
967 DECL_ALIGN (field) = DECL_ALIGN (var);
968 DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
969 TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
970 }
971 else
972 DECL_ALIGN (field) = TYPE_ALIGN (type);
973
974 if ((mask & 3) == 3)
975 {
976 insert_field_into_struct (ctx->record_type, field);
977 if (ctx->srecord_type)
978 {
979 sfield = build_decl (DECL_SOURCE_LOCATION (var),
980 FIELD_DECL, DECL_NAME (var), type);
981 DECL_ABSTRACT_ORIGIN (sfield) = var;
982 DECL_ALIGN (sfield) = DECL_ALIGN (field);
983 DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
984 TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
985 insert_field_into_struct (ctx->srecord_type, sfield);
986 }
987 }
988 else
989 {
990 if (ctx->srecord_type == NULL_TREE)
991 {
992 tree t;
993
994 ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
995 ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
996 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
997 {
998 sfield = build_decl (DECL_SOURCE_LOCATION (var),
999 FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
1000 DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
1001 insert_field_into_struct (ctx->srecord_type, sfield);
1002 splay_tree_insert (ctx->sfield_map,
1003 (splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
1004 (splay_tree_value) sfield);
1005 }
1006 }
1007 sfield = field;
1008 insert_field_into_struct ((mask & 1) ? ctx->record_type
1009 : ctx->srecord_type, field);
1010 }
1011
1012 if (mask & 1)
1013 splay_tree_insert (ctx->field_map, (splay_tree_key) var,
1014 (splay_tree_value) field);
1015 if ((mask & 2) && ctx->sfield_map)
1016 splay_tree_insert (ctx->sfield_map, (splay_tree_key) var,
1017 (splay_tree_value) sfield);
1018 }
1019
1020 static tree
1021 install_var_local (tree var, omp_context *ctx)
1022 {
1023 tree new_var = omp_copy_decl_1 (var, ctx);
1024 insert_decl_map (&ctx->cb, var, new_var);
1025 return new_var;
1026 }
1027
1028 /* Adjust the replacement for DECL in CTX for the new context. This means
1029 copying the DECL_VALUE_EXPR, and fixing up the type. */
1030
1031 static void
1032 fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
1033 {
1034 tree new_decl, size;
1035
1036 new_decl = lookup_decl (decl, ctx);
1037
1038 TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
1039
1040 if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
1041 && DECL_HAS_VALUE_EXPR_P (decl))
1042 {
1043 tree ve = DECL_VALUE_EXPR (decl);
1044 walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL);
1045 SET_DECL_VALUE_EXPR (new_decl, ve);
1046 DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
1047 }
1048
1049 if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
1050 {
1051 size = remap_decl (DECL_SIZE (decl), &ctx->cb);
1052 if (size == error_mark_node)
1053 size = TYPE_SIZE (TREE_TYPE (new_decl));
1054 DECL_SIZE (new_decl) = size;
1055
1056 size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
1057 if (size == error_mark_node)
1058 size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
1059 DECL_SIZE_UNIT (new_decl) = size;
1060 }
1061 }
1062
1063 /* The callback for remap_decl. Search all containing contexts for a
1064 mapping of the variable; this avoids having to duplicate the splay
1065 tree ahead of time. We know a mapping doesn't already exist in the
1066 given context. Create new mappings to implement default semantics. */
1067
1068 static tree
1069 omp_copy_decl (tree var, copy_body_data *cb)
1070 {
1071 omp_context *ctx = (omp_context *) cb;
1072 tree new_var;
1073
1074 if (TREE_CODE (var) == LABEL_DECL)
1075 {
1076 new_var = create_artificial_label (DECL_SOURCE_LOCATION (var));
1077 DECL_CONTEXT (new_var) = current_function_decl;
1078 insert_decl_map (&ctx->cb, var, new_var);
1079 return new_var;
1080 }
1081
1082 while (!is_taskreg_ctx (ctx))
1083 {
1084 ctx = ctx->outer;
1085 if (ctx == NULL)
1086 return var;
1087 new_var = maybe_lookup_decl (var, ctx);
1088 if (new_var)
1089 return new_var;
1090 }
1091
1092 if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
1093 return var;
1094
1095 return error_mark_node;
1096 }
1097
1098
1099 /* Return the parallel region associated with STMT. */
1100
1101 /* Debugging dumps for parallel regions. */
1102 void dump_omp_region (FILE *, struct omp_region *, int);
1103 void debug_omp_region (struct omp_region *);
1104 void debug_all_omp_regions (void);
1105
1106 /* Dump the parallel region tree rooted at REGION. */
1107
1108 void
1109 dump_omp_region (FILE *file, struct omp_region *region, int indent)
1110 {
1111 fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
1112 gimple_code_name[region->type]);
1113
1114 if (region->inner)
1115 dump_omp_region (file, region->inner, indent + 4);
1116
1117 if (region->cont)
1118 {
1119 fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
1120 region->cont->index);
1121 }
1122
1123 if (region->exit)
1124 fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
1125 region->exit->index);
1126 else
1127 fprintf (file, "%*s[no exit marker]\n", indent, "");
1128
1129 if (region->next)
1130 dump_omp_region (file, region->next, indent);
1131 }
1132
1133 DEBUG_FUNCTION void
1134 debug_omp_region (struct omp_region *region)
1135 {
1136 dump_omp_region (stderr, region, 0);
1137 }
1138
1139 DEBUG_FUNCTION void
1140 debug_all_omp_regions (void)
1141 {
1142 dump_omp_region (stderr, root_omp_region, 0);
1143 }
1144
1145
1146 /* Create a new parallel region starting at STMT inside region PARENT. */
1147
1148 struct omp_region *
1149 new_omp_region (basic_block bb, enum gimple_code type,
1150 struct omp_region *parent)
1151 {
1152 struct omp_region *region = XCNEW (struct omp_region);
1153
1154 region->outer = parent;
1155 region->entry = bb;
1156 region->type = type;
1157
1158 if (parent)
1159 {
1160 /* This is a nested region. Add it to the list of inner
1161 regions in PARENT. */
1162 region->next = parent->inner;
1163 parent->inner = region;
1164 }
1165 else
1166 {
1167 /* This is a toplevel region. Add it to the list of toplevel
1168 regions in ROOT_OMP_REGION. */
1169 region->next = root_omp_region;
1170 root_omp_region = region;
1171 }
1172
1173 return region;
1174 }
1175
1176 /* Release the memory associated with the region tree rooted at REGION. */
1177
1178 static void
1179 free_omp_region_1 (struct omp_region *region)
1180 {
1181 struct omp_region *i, *n;
1182
1183 for (i = region->inner; i ; i = n)
1184 {
1185 n = i->next;
1186 free_omp_region_1 (i);
1187 }
1188
1189 free (region);
1190 }
1191
1192 /* Release the memory for the entire omp region tree. */
1193
1194 void
1195 free_omp_regions (void)
1196 {
1197 struct omp_region *r, *n;
1198 for (r = root_omp_region; r ; r = n)
1199 {
1200 n = r->next;
1201 free_omp_region_1 (r);
1202 }
1203 root_omp_region = NULL;
1204 }
1205
1206
1207 /* Create a new context, with OUTER_CTX being the surrounding context. */
1208
1209 static omp_context *
1210 new_omp_context (gimple stmt, omp_context *outer_ctx)
1211 {
1212 omp_context *ctx = XCNEW (omp_context);
1213
1214 splay_tree_insert (all_contexts, (splay_tree_key) stmt,
1215 (splay_tree_value) ctx);
1216 ctx->stmt = stmt;
1217
1218 if (outer_ctx)
1219 {
1220 ctx->outer = outer_ctx;
1221 ctx->cb = outer_ctx->cb;
1222 ctx->cb.block = NULL;
1223 ctx->depth = outer_ctx->depth + 1;
1224 }
1225 else
1226 {
1227 ctx->cb.src_fn = current_function_decl;
1228 ctx->cb.dst_fn = current_function_decl;
1229 ctx->cb.src_node = cgraph_get_node (current_function_decl);
1230 gcc_checking_assert (ctx->cb.src_node);
1231 ctx->cb.dst_node = ctx->cb.src_node;
1232 ctx->cb.src_cfun = cfun;
1233 ctx->cb.copy_decl = omp_copy_decl;
1234 ctx->cb.eh_lp_nr = 0;
1235 ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
1236 ctx->depth = 1;
1237 }
1238
1239 ctx->cb.decl_map = pointer_map_create ();
1240
1241 return ctx;
1242 }
1243
1244 static gimple_seq maybe_catch_exception (gimple_seq);
1245
1246 /* Finalize task copyfn. */
1247
1248 static void
1249 finalize_task_copyfn (gimple task_stmt)
1250 {
1251 struct function *child_cfun;
1252 tree child_fn;
1253 gimple_seq seq = NULL, new_seq;
1254 gimple bind;
1255
1256 child_fn = gimple_omp_task_copy_fn (task_stmt);
1257 if (child_fn == NULL_TREE)
1258 return;
1259
1260 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
1261
1262 /* Inform the callgraph about the new function. */
1263 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
1264 = cfun->curr_properties & ~PROP_loops;
1265
1266 push_cfun (child_cfun);
1267 bind = gimplify_body (child_fn, false);
1268 gimple_seq_add_stmt (&seq, bind);
1269 new_seq = maybe_catch_exception (seq);
1270 if (new_seq != seq)
1271 {
1272 bind = gimple_build_bind (NULL, new_seq, NULL);
1273 seq = NULL;
1274 gimple_seq_add_stmt (&seq, bind);
1275 }
1276 gimple_set_body (child_fn, seq);
1277 pop_cfun ();
1278
1279 cgraph_add_new_function (child_fn, false);
1280 }
1281
1282 /* Destroy a omp_context data structures. Called through the splay tree
1283 value delete callback. */
1284
1285 static void
1286 delete_omp_context (splay_tree_value value)
1287 {
1288 omp_context *ctx = (omp_context *) value;
1289
1290 pointer_map_destroy (ctx->cb.decl_map);
1291
1292 if (ctx->field_map)
1293 splay_tree_delete (ctx->field_map);
1294 if (ctx->sfield_map)
1295 splay_tree_delete (ctx->sfield_map);
1296
1297 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1298 it produces corrupt debug information. */
1299 if (ctx->record_type)
1300 {
1301 tree t;
1302 for (t = TYPE_FIELDS (ctx->record_type); t ; t = DECL_CHAIN (t))
1303 DECL_ABSTRACT_ORIGIN (t) = NULL;
1304 }
1305 if (ctx->srecord_type)
1306 {
1307 tree t;
1308 for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = DECL_CHAIN (t))
1309 DECL_ABSTRACT_ORIGIN (t) = NULL;
1310 }
1311
1312 if (is_task_ctx (ctx))
1313 finalize_task_copyfn (ctx->stmt);
1314
1315 XDELETE (ctx);
1316 }
1317
1318 /* Fix up RECEIVER_DECL with a type that has been remapped to the child
1319 context. */
1320
1321 static void
1322 fixup_child_record_type (omp_context *ctx)
1323 {
1324 tree f, type = ctx->record_type;
1325
1326 /* ??? It isn't sufficient to just call remap_type here, because
1327 variably_modified_type_p doesn't work the way we expect for
1328 record types. Testing each field for whether it needs remapping
1329 and creating a new record by hand works, however. */
1330 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
1331 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
1332 break;
1333 if (f)
1334 {
1335 tree name, new_fields = NULL;
1336
1337 type = lang_hooks.types.make_type (RECORD_TYPE);
1338 name = DECL_NAME (TYPE_NAME (ctx->record_type));
1339 name = build_decl (DECL_SOURCE_LOCATION (ctx->receiver_decl),
1340 TYPE_DECL, name, type);
1341 TYPE_NAME (type) = name;
1342
1343 for (f = TYPE_FIELDS (ctx->record_type); f ; f = DECL_CHAIN (f))
1344 {
1345 tree new_f = copy_node (f);
1346 DECL_CONTEXT (new_f) = type;
1347 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
1348 DECL_CHAIN (new_f) = new_fields;
1349 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
1350 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
1351 &ctx->cb, NULL);
1352 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
1353 &ctx->cb, NULL);
1354 new_fields = new_f;
1355
1356 /* Arrange to be able to look up the receiver field
1357 given the sender field. */
1358 splay_tree_insert (ctx->field_map, (splay_tree_key) f,
1359 (splay_tree_value) new_f);
1360 }
1361 TYPE_FIELDS (type) = nreverse (new_fields);
1362 layout_type (type);
1363 }
1364
1365 TREE_TYPE (ctx->receiver_decl) = build_pointer_type (type);
1366 }
1367
1368 /* Instantiate decls as necessary in CTX to satisfy the data sharing
1369 specified by CLAUSES. */
1370
1371 static void
1372 scan_sharing_clauses (tree clauses, omp_context *ctx)
1373 {
1374 tree c, decl;
1375 bool scan_array_reductions = false;
1376
1377 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1378 {
1379 bool by_ref;
1380
1381 switch (OMP_CLAUSE_CODE (c))
1382 {
1383 case OMP_CLAUSE_PRIVATE:
1384 decl = OMP_CLAUSE_DECL (c);
1385 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
1386 goto do_private;
1387 else if (!is_variable_sized (decl))
1388 install_var_local (decl, ctx);
1389 break;
1390
1391 case OMP_CLAUSE_SHARED:
1392 gcc_assert (is_taskreg_ctx (ctx));
1393 decl = OMP_CLAUSE_DECL (c);
1394 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
1395 || !is_variable_sized (decl));
1396 /* Global variables don't need to be copied,
1397 the receiver side will use them directly. */
1398 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1399 break;
1400 by_ref = use_pointer_for_field (decl, ctx);
1401 if (! TREE_READONLY (decl)
1402 || TREE_ADDRESSABLE (decl)
1403 || by_ref
1404 || is_reference (decl))
1405 {
1406 install_var_field (decl, by_ref, 3, ctx);
1407 install_var_local (decl, ctx);
1408 break;
1409 }
1410 /* We don't need to copy const scalar vars back. */
1411 OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
1412 goto do_private;
1413
1414 case OMP_CLAUSE_LASTPRIVATE:
1415 /* Let the corresponding firstprivate clause create
1416 the variable. */
1417 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1418 break;
1419 /* FALLTHRU */
1420
1421 case OMP_CLAUSE_FIRSTPRIVATE:
1422 case OMP_CLAUSE_REDUCTION:
1423 decl = OMP_CLAUSE_DECL (c);
1424 do_private:
1425 if (is_variable_sized (decl))
1426 {
1427 if (is_task_ctx (ctx))
1428 install_var_field (decl, false, 1, ctx);
1429 break;
1430 }
1431 else if (is_taskreg_ctx (ctx))
1432 {
1433 bool global
1434 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
1435 by_ref = use_pointer_for_field (decl, NULL);
1436
1437 if (is_task_ctx (ctx)
1438 && (global || by_ref || is_reference (decl)))
1439 {
1440 install_var_field (decl, false, 1, ctx);
1441 if (!global)
1442 install_var_field (decl, by_ref, 2, ctx);
1443 }
1444 else if (!global)
1445 install_var_field (decl, by_ref, 3, ctx);
1446 }
1447 install_var_local (decl, ctx);
1448 break;
1449
1450 case OMP_CLAUSE_COPYPRIVATE:
1451 case OMP_CLAUSE_COPYIN:
1452 decl = OMP_CLAUSE_DECL (c);
1453 by_ref = use_pointer_for_field (decl, NULL);
1454 install_var_field (decl, by_ref, 3, ctx);
1455 break;
1456
1457 case OMP_CLAUSE_DEFAULT:
1458 ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
1459 break;
1460
1461 case OMP_CLAUSE_FINAL:
1462 case OMP_CLAUSE_IF:
1463 case OMP_CLAUSE_NUM_THREADS:
1464 case OMP_CLAUSE_SCHEDULE:
1465 if (ctx->outer)
1466 scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
1467 break;
1468
1469 case OMP_CLAUSE_NOWAIT:
1470 case OMP_CLAUSE_ORDERED:
1471 case OMP_CLAUSE_COLLAPSE:
1472 case OMP_CLAUSE_UNTIED:
1473 case OMP_CLAUSE_MERGEABLE:
1474 break;
1475
1476 default:
1477 gcc_unreachable ();
1478 }
1479 }
1480
1481 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1482 {
1483 switch (OMP_CLAUSE_CODE (c))
1484 {
1485 case OMP_CLAUSE_LASTPRIVATE:
1486 /* Let the corresponding firstprivate clause create
1487 the variable. */
1488 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1489 scan_array_reductions = true;
1490 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1491 break;
1492 /* FALLTHRU */
1493
1494 case OMP_CLAUSE_PRIVATE:
1495 case OMP_CLAUSE_FIRSTPRIVATE:
1496 case OMP_CLAUSE_REDUCTION:
1497 decl = OMP_CLAUSE_DECL (c);
1498 if (is_variable_sized (decl))
1499 install_var_local (decl, ctx);
1500 fixup_remapped_decl (decl, ctx,
1501 OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
1502 && OMP_CLAUSE_PRIVATE_DEBUG (c));
1503 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1504 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1505 scan_array_reductions = true;
1506 break;
1507
1508 case OMP_CLAUSE_SHARED:
1509 decl = OMP_CLAUSE_DECL (c);
1510 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1511 fixup_remapped_decl (decl, ctx, false);
1512 break;
1513
1514 case OMP_CLAUSE_COPYPRIVATE:
1515 case OMP_CLAUSE_COPYIN:
1516 case OMP_CLAUSE_DEFAULT:
1517 case OMP_CLAUSE_IF:
1518 case OMP_CLAUSE_NUM_THREADS:
1519 case OMP_CLAUSE_SCHEDULE:
1520 case OMP_CLAUSE_NOWAIT:
1521 case OMP_CLAUSE_ORDERED:
1522 case OMP_CLAUSE_COLLAPSE:
1523 case OMP_CLAUSE_UNTIED:
1524 case OMP_CLAUSE_FINAL:
1525 case OMP_CLAUSE_MERGEABLE:
1526 break;
1527
1528 default:
1529 gcc_unreachable ();
1530 }
1531 }
1532
1533 if (scan_array_reductions)
1534 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1535 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1536 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1537 {
1538 scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
1539 scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
1540 }
1541 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
1542 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1543 scan_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
1544 }
1545
1546 /* Create a new name for omp child function. Returns an identifier. */
1547
1548 static GTY(()) unsigned int tmp_ompfn_id_num;
1549
1550 static tree
1551 create_omp_child_function_name (bool task_copy)
1552 {
1553 return (clone_function_name (current_function_decl,
1554 task_copy ? "_omp_cpyfn" : "_omp_fn"));
1555 }
1556
1557 /* Build a decl for the omp child function. It'll not contain a body
1558 yet, just the bare decl. */
1559
1560 static void
1561 create_omp_child_function (omp_context *ctx, bool task_copy)
1562 {
1563 tree decl, type, name, t;
1564
1565 name = create_omp_child_function_name (task_copy);
1566 if (task_copy)
1567 type = build_function_type_list (void_type_node, ptr_type_node,
1568 ptr_type_node, NULL_TREE);
1569 else
1570 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
1571
1572 decl = build_decl (gimple_location (ctx->stmt),
1573 FUNCTION_DECL, name, type);
1574
1575 if (!task_copy)
1576 ctx->cb.dst_fn = decl;
1577 else
1578 gimple_omp_task_set_copy_fn (ctx->stmt, decl);
1579
1580 TREE_STATIC (decl) = 1;
1581 TREE_USED (decl) = 1;
1582 DECL_ARTIFICIAL (decl) = 1;
1583 DECL_NAMELESS (decl) = 1;
1584 DECL_IGNORED_P (decl) = 0;
1585 TREE_PUBLIC (decl) = 0;
1586 DECL_UNINLINABLE (decl) = 1;
1587 DECL_EXTERNAL (decl) = 0;
1588 DECL_CONTEXT (decl) = NULL_TREE;
1589 DECL_INITIAL (decl) = make_node (BLOCK);
1590
1591 t = build_decl (DECL_SOURCE_LOCATION (decl),
1592 RESULT_DECL, NULL_TREE, void_type_node);
1593 DECL_ARTIFICIAL (t) = 1;
1594 DECL_IGNORED_P (t) = 1;
1595 DECL_CONTEXT (t) = decl;
1596 DECL_RESULT (decl) = t;
1597
1598 t = build_decl (DECL_SOURCE_LOCATION (decl),
1599 PARM_DECL, get_identifier (".omp_data_i"), ptr_type_node);
1600 DECL_ARTIFICIAL (t) = 1;
1601 DECL_NAMELESS (t) = 1;
1602 DECL_ARG_TYPE (t) = ptr_type_node;
1603 DECL_CONTEXT (t) = current_function_decl;
1604 TREE_USED (t) = 1;
1605 DECL_ARGUMENTS (decl) = t;
1606 if (!task_copy)
1607 ctx->receiver_decl = t;
1608 else
1609 {
1610 t = build_decl (DECL_SOURCE_LOCATION (decl),
1611 PARM_DECL, get_identifier (".omp_data_o"),
1612 ptr_type_node);
1613 DECL_ARTIFICIAL (t) = 1;
1614 DECL_NAMELESS (t) = 1;
1615 DECL_ARG_TYPE (t) = ptr_type_node;
1616 DECL_CONTEXT (t) = current_function_decl;
1617 TREE_USED (t) = 1;
1618 TREE_ADDRESSABLE (t) = 1;
1619 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
1620 DECL_ARGUMENTS (decl) = t;
1621 }
1622
1623 /* Allocate memory for the function structure. The call to
1624 allocate_struct_function clobbers CFUN, so we need to restore
1625 it afterward. */
1626 push_struct_function (decl);
1627 cfun->function_end_locus = gimple_location (ctx->stmt);
1628 pop_cfun ();
1629 }
1630
1631
1632 /* Scan an OpenMP parallel directive. */
1633
1634 static void
1635 scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1636 {
1637 omp_context *ctx;
1638 tree name;
1639 gimple stmt = gsi_stmt (*gsi);
1640
1641 /* Ignore parallel directives with empty bodies, unless there
1642 are copyin clauses. */
1643 if (optimize > 0
1644 && empty_body_p (gimple_omp_body (stmt))
1645 && find_omp_clause (gimple_omp_parallel_clauses (stmt),
1646 OMP_CLAUSE_COPYIN) == NULL)
1647 {
1648 gsi_replace (gsi, gimple_build_nop (), false);
1649 return;
1650 }
1651
1652 ctx = new_omp_context (stmt, outer_ctx);
1653 if (taskreg_nesting_level > 1)
1654 ctx->is_nested = true;
1655 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1656 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1657 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1658 name = create_tmp_var_name (".omp_data_s");
1659 name = build_decl (gimple_location (stmt),
1660 TYPE_DECL, name, ctx->record_type);
1661 DECL_ARTIFICIAL (name) = 1;
1662 DECL_NAMELESS (name) = 1;
1663 TYPE_NAME (ctx->record_type) = name;
1664 create_omp_child_function (ctx, false);
1665 gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
1666
1667 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx);
1668 scan_omp (gimple_omp_body_ptr (stmt), ctx);
1669
1670 if (TYPE_FIELDS (ctx->record_type) == NULL)
1671 ctx->record_type = ctx->receiver_decl = NULL;
1672 else
1673 {
1674 layout_type (ctx->record_type);
1675 fixup_child_record_type (ctx);
1676 }
1677 }
1678
1679 /* Scan an OpenMP task directive. */
1680
1681 static void
1682 scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1683 {
1684 omp_context *ctx;
1685 tree name, t;
1686 gimple stmt = gsi_stmt (*gsi);
1687 location_t loc = gimple_location (stmt);
1688
1689 /* Ignore task directives with empty bodies. */
1690 if (optimize > 0
1691 && empty_body_p (gimple_omp_body (stmt)))
1692 {
1693 gsi_replace (gsi, gimple_build_nop (), false);
1694 return;
1695 }
1696
1697 ctx = new_omp_context (stmt, outer_ctx);
1698 if (taskreg_nesting_level > 1)
1699 ctx->is_nested = true;
1700 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1701 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1702 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1703 name = create_tmp_var_name (".omp_data_s");
1704 name = build_decl (gimple_location (stmt),
1705 TYPE_DECL, name, ctx->record_type);
1706 DECL_ARTIFICIAL (name) = 1;
1707 DECL_NAMELESS (name) = 1;
1708 TYPE_NAME (ctx->record_type) = name;
1709 create_omp_child_function (ctx, false);
1710 gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
1711
1712 scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
1713
1714 if (ctx->srecord_type)
1715 {
1716 name = create_tmp_var_name (".omp_data_a");
1717 name = build_decl (gimple_location (stmt),
1718 TYPE_DECL, name, ctx->srecord_type);
1719 DECL_ARTIFICIAL (name) = 1;
1720 DECL_NAMELESS (name) = 1;
1721 TYPE_NAME (ctx->srecord_type) = name;
1722 create_omp_child_function (ctx, true);
1723 }
1724
1725 scan_omp (gimple_omp_body_ptr (stmt), ctx);
1726
1727 if (TYPE_FIELDS (ctx->record_type) == NULL)
1728 {
1729 ctx->record_type = ctx->receiver_decl = NULL;
1730 t = build_int_cst (long_integer_type_node, 0);
1731 gimple_omp_task_set_arg_size (stmt, t);
1732 t = build_int_cst (long_integer_type_node, 1);
1733 gimple_omp_task_set_arg_align (stmt, t);
1734 }
1735 else
1736 {
1737 tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
1738 /* Move VLA fields to the end. */
1739 p = &TYPE_FIELDS (ctx->record_type);
1740 while (*p)
1741 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
1742 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
1743 {
1744 *q = *p;
1745 *p = TREE_CHAIN (*p);
1746 TREE_CHAIN (*q) = NULL_TREE;
1747 q = &TREE_CHAIN (*q);
1748 }
1749 else
1750 p = &DECL_CHAIN (*p);
1751 *p = vla_fields;
1752 layout_type (ctx->record_type);
1753 fixup_child_record_type (ctx);
1754 if (ctx->srecord_type)
1755 layout_type (ctx->srecord_type);
1756 t = fold_convert_loc (loc, long_integer_type_node,
1757 TYPE_SIZE_UNIT (ctx->record_type));
1758 gimple_omp_task_set_arg_size (stmt, t);
1759 t = build_int_cst (long_integer_type_node,
1760 TYPE_ALIGN_UNIT (ctx->record_type));
1761 gimple_omp_task_set_arg_align (stmt, t);
1762 }
1763 }
1764
1765
1766 /* Scan an OpenMP loop directive. */
1767
1768 static void
1769 scan_omp_for (gimple stmt, omp_context *outer_ctx)
1770 {
1771 omp_context *ctx;
1772 size_t i;
1773
1774 ctx = new_omp_context (stmt, outer_ctx);
1775
1776 scan_sharing_clauses (gimple_omp_for_clauses (stmt), ctx);
1777
1778 scan_omp (gimple_omp_for_pre_body_ptr (stmt), ctx);
1779 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
1780 {
1781 scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx);
1782 scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx);
1783 scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx);
1784 scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx);
1785 }
1786 scan_omp (gimple_omp_body_ptr (stmt), ctx);
1787 }
1788
1789 /* Scan an OpenMP sections directive. */
1790
1791 static void
1792 scan_omp_sections (gimple stmt, omp_context *outer_ctx)
1793 {
1794 omp_context *ctx;
1795
1796 ctx = new_omp_context (stmt, outer_ctx);
1797 scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx);
1798 scan_omp (gimple_omp_body_ptr (stmt), ctx);
1799 }
1800
1801 /* Scan an OpenMP single directive. */
1802
1803 static void
1804 scan_omp_single (gimple stmt, omp_context *outer_ctx)
1805 {
1806 omp_context *ctx;
1807 tree name;
1808
1809 ctx = new_omp_context (stmt, outer_ctx);
1810 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1811 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1812 name = create_tmp_var_name (".omp_copy_s");
1813 name = build_decl (gimple_location (stmt),
1814 TYPE_DECL, name, ctx->record_type);
1815 TYPE_NAME (ctx->record_type) = name;
1816
1817 scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx);
1818 scan_omp (gimple_omp_body_ptr (stmt), ctx);
1819
1820 if (TYPE_FIELDS (ctx->record_type) == NULL)
1821 ctx->record_type = NULL;
1822 else
1823 layout_type (ctx->record_type);
1824 }
1825
1826
1827 /* Check OpenMP nesting restrictions. */
1828 static bool
1829 check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
1830 {
1831 switch (gimple_code (stmt))
1832 {
1833 case GIMPLE_OMP_FOR:
1834 case GIMPLE_OMP_SECTIONS:
1835 case GIMPLE_OMP_SINGLE:
1836 case GIMPLE_CALL:
1837 for (; ctx != NULL; ctx = ctx->outer)
1838 switch (gimple_code (ctx->stmt))
1839 {
1840 case GIMPLE_OMP_FOR:
1841 case GIMPLE_OMP_SECTIONS:
1842 case GIMPLE_OMP_SINGLE:
1843 case GIMPLE_OMP_ORDERED:
1844 case GIMPLE_OMP_MASTER:
1845 case GIMPLE_OMP_TASK:
1846 if (is_gimple_call (stmt))
1847 {
1848 error_at (gimple_location (stmt),
1849 "barrier region may not be closely nested inside "
1850 "of work-sharing, critical, ordered, master or "
1851 "explicit task region");
1852 return false;
1853 }
1854 error_at (gimple_location (stmt),
1855 "work-sharing region may not be closely nested inside "
1856 "of work-sharing, critical, ordered, master or explicit "
1857 "task region");
1858 return false;
1859 case GIMPLE_OMP_PARALLEL:
1860 return true;
1861 default:
1862 break;
1863 }
1864 break;
1865 case GIMPLE_OMP_MASTER:
1866 for (; ctx != NULL; ctx = ctx->outer)
1867 switch (gimple_code (ctx->stmt))
1868 {
1869 case GIMPLE_OMP_FOR:
1870 case GIMPLE_OMP_SECTIONS:
1871 case GIMPLE_OMP_SINGLE:
1872 case GIMPLE_OMP_TASK:
1873 error_at (gimple_location (stmt),
1874 "master region may not be closely nested inside "
1875 "of work-sharing or explicit task region");
1876 return false;
1877 case GIMPLE_OMP_PARALLEL:
1878 return true;
1879 default:
1880 break;
1881 }
1882 break;
1883 case GIMPLE_OMP_ORDERED:
1884 for (; ctx != NULL; ctx = ctx->outer)
1885 switch (gimple_code (ctx->stmt))
1886 {
1887 case GIMPLE_OMP_CRITICAL:
1888 case GIMPLE_OMP_TASK:
1889 error_at (gimple_location (stmt),
1890 "ordered region may not be closely nested inside "
1891 "of critical or explicit task region");
1892 return false;
1893 case GIMPLE_OMP_FOR:
1894 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
1895 OMP_CLAUSE_ORDERED) == NULL)
1896 {
1897 error_at (gimple_location (stmt),
1898 "ordered region must be closely nested inside "
1899 "a loop region with an ordered clause");
1900 return false;
1901 }
1902 return true;
1903 case GIMPLE_OMP_PARALLEL:
1904 return true;
1905 default:
1906 break;
1907 }
1908 break;
1909 case GIMPLE_OMP_CRITICAL:
1910 for (; ctx != NULL; ctx = ctx->outer)
1911 if (gimple_code (ctx->stmt) == GIMPLE_OMP_CRITICAL
1912 && (gimple_omp_critical_name (stmt)
1913 == gimple_omp_critical_name (ctx->stmt)))
1914 {
1915 error_at (gimple_location (stmt),
1916 "critical region may not be nested inside a critical "
1917 "region with the same name");
1918 return false;
1919 }
1920 break;
1921 default:
1922 break;
1923 }
1924 return true;
1925 }
1926
1927
1928 /* Helper function scan_omp.
1929
1930 Callback for walk_tree or operators in walk_gimple_stmt used to
1931 scan for OpenMP directives in TP. */
1932
1933 static tree
1934 scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
1935 {
1936 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
1937 omp_context *ctx = (omp_context *) wi->info;
1938 tree t = *tp;
1939
1940 switch (TREE_CODE (t))
1941 {
1942 case VAR_DECL:
1943 case PARM_DECL:
1944 case LABEL_DECL:
1945 case RESULT_DECL:
1946 if (ctx)
1947 *tp = remap_decl (t, &ctx->cb);
1948 break;
1949
1950 default:
1951 if (ctx && TYPE_P (t))
1952 *tp = remap_type (t, &ctx->cb);
1953 else if (!DECL_P (t))
1954 {
1955 *walk_subtrees = 1;
1956 if (ctx)
1957 {
1958 tree tem = remap_type (TREE_TYPE (t), &ctx->cb);
1959 if (tem != TREE_TYPE (t))
1960 {
1961 if (TREE_CODE (t) == INTEGER_CST)
1962 *tp = build_int_cst_wide (tem,
1963 TREE_INT_CST_LOW (t),
1964 TREE_INT_CST_HIGH (t));
1965 else
1966 TREE_TYPE (t) = tem;
1967 }
1968 }
1969 }
1970 break;
1971 }
1972
1973 return NULL_TREE;
1974 }
1975
1976
1977 /* Helper function for scan_omp.
1978
1979 Callback for walk_gimple_stmt used to scan for OpenMP directives in
1980 the current statement in GSI. */
1981
1982 static tree
1983 scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
1984 struct walk_stmt_info *wi)
1985 {
1986 gimple stmt = gsi_stmt (*gsi);
1987 omp_context *ctx = (omp_context *) wi->info;
1988
1989 if (gimple_has_location (stmt))
1990 input_location = gimple_location (stmt);
1991
1992 /* Check the OpenMP nesting restrictions. */
1993 if (ctx != NULL)
1994 {
1995 bool remove = false;
1996 if (is_gimple_omp (stmt))
1997 remove = !check_omp_nesting_restrictions (stmt, ctx);
1998 else if (is_gimple_call (stmt))
1999 {
2000 tree fndecl = gimple_call_fndecl (stmt);
2001 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
2002 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
2003 remove = !check_omp_nesting_restrictions (stmt, ctx);
2004 }
2005 if (remove)
2006 {
2007 stmt = gimple_build_nop ();
2008 gsi_replace (gsi, stmt, false);
2009 }
2010 }
2011
2012 *handled_ops_p = true;
2013
2014 switch (gimple_code (stmt))
2015 {
2016 case GIMPLE_OMP_PARALLEL:
2017 taskreg_nesting_level++;
2018 scan_omp_parallel (gsi, ctx);
2019 taskreg_nesting_level--;
2020 break;
2021
2022 case GIMPLE_OMP_TASK:
2023 taskreg_nesting_level++;
2024 scan_omp_task (gsi, ctx);
2025 taskreg_nesting_level--;
2026 break;
2027
2028 case GIMPLE_OMP_FOR:
2029 scan_omp_for (stmt, ctx);
2030 break;
2031
2032 case GIMPLE_OMP_SECTIONS:
2033 scan_omp_sections (stmt, ctx);
2034 break;
2035
2036 case GIMPLE_OMP_SINGLE:
2037 scan_omp_single (stmt, ctx);
2038 break;
2039
2040 case GIMPLE_OMP_SECTION:
2041 case GIMPLE_OMP_MASTER:
2042 case GIMPLE_OMP_ORDERED:
2043 case GIMPLE_OMP_CRITICAL:
2044 ctx = new_omp_context (stmt, ctx);
2045 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2046 break;
2047
2048 case GIMPLE_BIND:
2049 {
2050 tree var;
2051
2052 *handled_ops_p = false;
2053 if (ctx)
2054 for (var = gimple_bind_vars (stmt); var ; var = DECL_CHAIN (var))
2055 insert_decl_map (&ctx->cb, var, var);
2056 }
2057 break;
2058 default:
2059 *handled_ops_p = false;
2060 break;
2061 }
2062
2063 return NULL_TREE;
2064 }
2065
2066
2067 /* Scan all the statements starting at the current statement. CTX
2068 contains context information about the OpenMP directives and
2069 clauses found during the scan. */
2070
2071 static void
2072 scan_omp (gimple_seq *body_p, omp_context *ctx)
2073 {
2074 location_t saved_location;
2075 struct walk_stmt_info wi;
2076
2077 memset (&wi, 0, sizeof (wi));
2078 wi.info = ctx;
2079 wi.want_locations = true;
2080
2081 saved_location = input_location;
2082 walk_gimple_seq_mod (body_p, scan_omp_1_stmt, scan_omp_1_op, &wi);
2083 input_location = saved_location;
2084 }
2085 \f
2086 /* Re-gimplification and code generation routines. */
2087
2088 /* Build a call to GOMP_barrier. */
2089
2090 static tree
2091 build_omp_barrier (void)
2092 {
2093 return build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_BARRIER), 0);
2094 }
2095
2096 /* If a context was created for STMT when it was scanned, return it. */
2097
2098 static omp_context *
2099 maybe_lookup_ctx (gimple stmt)
2100 {
2101 splay_tree_node n;
2102 n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
2103 return n ? (omp_context *) n->value : NULL;
2104 }
2105
2106
2107 /* Find the mapping for DECL in CTX or the immediately enclosing
2108 context that has a mapping for DECL.
2109
2110 If CTX is a nested parallel directive, we may have to use the decl
2111 mappings created in CTX's parent context. Suppose that we have the
2112 following parallel nesting (variable UIDs showed for clarity):
2113
2114 iD.1562 = 0;
2115 #omp parallel shared(iD.1562) -> outer parallel
2116 iD.1562 = iD.1562 + 1;
2117
2118 #omp parallel shared (iD.1562) -> inner parallel
2119 iD.1562 = iD.1562 - 1;
2120
2121 Each parallel structure will create a distinct .omp_data_s structure
2122 for copying iD.1562 in/out of the directive:
2123
2124 outer parallel .omp_data_s.1.i -> iD.1562
2125 inner parallel .omp_data_s.2.i -> iD.1562
2126
2127 A shared variable mapping will produce a copy-out operation before
2128 the parallel directive and a copy-in operation after it. So, in
2129 this case we would have:
2130
2131 iD.1562 = 0;
2132 .omp_data_o.1.i = iD.1562;
2133 #omp parallel shared(iD.1562) -> outer parallel
2134 .omp_data_i.1 = &.omp_data_o.1
2135 .omp_data_i.1->i = .omp_data_i.1->i + 1;
2136
2137 .omp_data_o.2.i = iD.1562; -> **
2138 #omp parallel shared(iD.1562) -> inner parallel
2139 .omp_data_i.2 = &.omp_data_o.2
2140 .omp_data_i.2->i = .omp_data_i.2->i - 1;
2141
2142
2143 ** This is a problem. The symbol iD.1562 cannot be referenced
2144 inside the body of the outer parallel region. But since we are
2145 emitting this copy operation while expanding the inner parallel
2146 directive, we need to access the CTX structure of the outer
2147 parallel directive to get the correct mapping:
2148
2149 .omp_data_o.2.i = .omp_data_i.1->i
2150
2151 Since there may be other workshare or parallel directives enclosing
2152 the parallel directive, it may be necessary to walk up the context
2153 parent chain. This is not a problem in general because nested
2154 parallelism happens only rarely. */
2155
2156 static tree
2157 lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2158 {
2159 tree t;
2160 omp_context *up;
2161
2162 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2163 t = maybe_lookup_decl (decl, up);
2164
2165 gcc_assert (!ctx->is_nested || t || is_global_var (decl));
2166
2167 return t ? t : decl;
2168 }
2169
2170
2171 /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
2172 in outer contexts. */
2173
2174 static tree
2175 maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2176 {
2177 tree t = NULL;
2178 omp_context *up;
2179
2180 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2181 t = maybe_lookup_decl (decl, up);
2182
2183 return t ? t : decl;
2184 }
2185
2186
2187 /* Construct the initialization value for reduction CLAUSE. */
2188
2189 tree
2190 omp_reduction_init (tree clause, tree type)
2191 {
2192 location_t loc = OMP_CLAUSE_LOCATION (clause);
2193 switch (OMP_CLAUSE_REDUCTION_CODE (clause))
2194 {
2195 case PLUS_EXPR:
2196 case MINUS_EXPR:
2197 case BIT_IOR_EXPR:
2198 case BIT_XOR_EXPR:
2199 case TRUTH_OR_EXPR:
2200 case TRUTH_ORIF_EXPR:
2201 case TRUTH_XOR_EXPR:
2202 case NE_EXPR:
2203 return build_zero_cst (type);
2204
2205 case MULT_EXPR:
2206 case TRUTH_AND_EXPR:
2207 case TRUTH_ANDIF_EXPR:
2208 case EQ_EXPR:
2209 return fold_convert_loc (loc, type, integer_one_node);
2210
2211 case BIT_AND_EXPR:
2212 return fold_convert_loc (loc, type, integer_minus_one_node);
2213
2214 case MAX_EXPR:
2215 if (SCALAR_FLOAT_TYPE_P (type))
2216 {
2217 REAL_VALUE_TYPE max, min;
2218 if (HONOR_INFINITIES (TYPE_MODE (type)))
2219 {
2220 real_inf (&max);
2221 real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
2222 }
2223 else
2224 real_maxval (&min, 1, TYPE_MODE (type));
2225 return build_real (type, min);
2226 }
2227 else
2228 {
2229 gcc_assert (INTEGRAL_TYPE_P (type));
2230 return TYPE_MIN_VALUE (type);
2231 }
2232
2233 case MIN_EXPR:
2234 if (SCALAR_FLOAT_TYPE_P (type))
2235 {
2236 REAL_VALUE_TYPE max;
2237 if (HONOR_INFINITIES (TYPE_MODE (type)))
2238 real_inf (&max);
2239 else
2240 real_maxval (&max, 0, TYPE_MODE (type));
2241 return build_real (type, max);
2242 }
2243 else
2244 {
2245 gcc_assert (INTEGRAL_TYPE_P (type));
2246 return TYPE_MAX_VALUE (type);
2247 }
2248
2249 default:
2250 gcc_unreachable ();
2251 }
2252 }
2253
2254 /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
2255 from the receiver (aka child) side and initializers for REFERENCE_TYPE
2256 private variables. Initialization statements go in ILIST, while calls
2257 to destructors go in DLIST. */
2258
2259 static void
2260 lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
2261 omp_context *ctx)
2262 {
2263 tree c, dtor, copyin_seq, x, ptr;
2264 bool copyin_by_ref = false;
2265 bool lastprivate_firstprivate = false;
2266 int pass;
2267
2268 copyin_seq = NULL;
2269
2270 /* Do all the fixed sized types in the first pass, and the variable sized
2271 types in the second pass. This makes sure that the scalar arguments to
2272 the variable sized types are processed before we use them in the
2273 variable sized operations. */
2274 for (pass = 0; pass < 2; ++pass)
2275 {
2276 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2277 {
2278 enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
2279 tree var, new_var;
2280 bool by_ref;
2281 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2282
2283 switch (c_kind)
2284 {
2285 case OMP_CLAUSE_PRIVATE:
2286 if (OMP_CLAUSE_PRIVATE_DEBUG (c))
2287 continue;
2288 break;
2289 case OMP_CLAUSE_SHARED:
2290 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
2291 {
2292 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
2293 continue;
2294 }
2295 case OMP_CLAUSE_FIRSTPRIVATE:
2296 case OMP_CLAUSE_COPYIN:
2297 case OMP_CLAUSE_REDUCTION:
2298 break;
2299 case OMP_CLAUSE_LASTPRIVATE:
2300 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2301 {
2302 lastprivate_firstprivate = true;
2303 if (pass != 0)
2304 continue;
2305 }
2306 break;
2307 default:
2308 continue;
2309 }
2310
2311 new_var = var = OMP_CLAUSE_DECL (c);
2312 if (c_kind != OMP_CLAUSE_COPYIN)
2313 new_var = lookup_decl (var, ctx);
2314
2315 if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
2316 {
2317 if (pass != 0)
2318 continue;
2319 }
2320 else if (is_variable_sized (var))
2321 {
2322 /* For variable sized types, we need to allocate the
2323 actual storage here. Call alloca and store the
2324 result in the pointer decl that we created elsewhere. */
2325 if (pass == 0)
2326 continue;
2327
2328 if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
2329 {
2330 gimple stmt;
2331 tree tmp, atmp;
2332
2333 ptr = DECL_VALUE_EXPR (new_var);
2334 gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
2335 ptr = TREE_OPERAND (ptr, 0);
2336 gcc_assert (DECL_P (ptr));
2337 x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
2338
2339 /* void *tmp = __builtin_alloca */
2340 atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
2341 stmt = gimple_build_call (atmp, 1, x);
2342 tmp = create_tmp_var_raw (ptr_type_node, NULL);
2343 gimple_add_tmp_var (tmp);
2344 gimple_call_set_lhs (stmt, tmp);
2345
2346 gimple_seq_add_stmt (ilist, stmt);
2347
2348 x = fold_convert_loc (clause_loc, TREE_TYPE (ptr), tmp);
2349 gimplify_assign (ptr, x, ilist);
2350 }
2351 }
2352 else if (is_reference (var))
2353 {
2354 /* For references that are being privatized for Fortran,
2355 allocate new backing storage for the new pointer
2356 variable. This allows us to avoid changing all the
2357 code that expects a pointer to something that expects
2358 a direct variable. Note that this doesn't apply to
2359 C++, since reference types are disallowed in data
2360 sharing clauses there, except for NRV optimized
2361 return values. */
2362 if (pass == 0)
2363 continue;
2364
2365 x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
2366 if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
2367 {
2368 x = build_receiver_ref (var, false, ctx);
2369 x = build_fold_addr_expr_loc (clause_loc, x);
2370 }
2371 else if (TREE_CONSTANT (x))
2372 {
2373 const char *name = NULL;
2374 if (DECL_NAME (var))
2375 name = IDENTIFIER_POINTER (DECL_NAME (new_var));
2376
2377 x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
2378 name);
2379 gimple_add_tmp_var (x);
2380 TREE_ADDRESSABLE (x) = 1;
2381 x = build_fold_addr_expr_loc (clause_loc, x);
2382 }
2383 else
2384 {
2385 tree atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
2386 x = build_call_expr_loc (clause_loc, atmp, 1, x);
2387 }
2388
2389 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
2390 gimplify_assign (new_var, x, ilist);
2391
2392 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2393 }
2394 else if (c_kind == OMP_CLAUSE_REDUCTION
2395 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2396 {
2397 if (pass == 0)
2398 continue;
2399 }
2400 else if (pass != 0)
2401 continue;
2402
2403 switch (OMP_CLAUSE_CODE (c))
2404 {
2405 case OMP_CLAUSE_SHARED:
2406 /* Shared global vars are just accessed directly. */
2407 if (is_global_var (new_var))
2408 break;
2409 /* Set up the DECL_VALUE_EXPR for shared variables now. This
2410 needs to be delayed until after fixup_child_record_type so
2411 that we get the correct type during the dereference. */
2412 by_ref = use_pointer_for_field (var, ctx);
2413 x = build_receiver_ref (var, by_ref, ctx);
2414 SET_DECL_VALUE_EXPR (new_var, x);
2415 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2416
2417 /* ??? If VAR is not passed by reference, and the variable
2418 hasn't been initialized yet, then we'll get a warning for
2419 the store into the omp_data_s structure. Ideally, we'd be
2420 able to notice this and not store anything at all, but
2421 we're generating code too early. Suppress the warning. */
2422 if (!by_ref)
2423 TREE_NO_WARNING (var) = 1;
2424 break;
2425
2426 case OMP_CLAUSE_LASTPRIVATE:
2427 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2428 break;
2429 /* FALLTHRU */
2430
2431 case OMP_CLAUSE_PRIVATE:
2432 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE)
2433 x = build_outer_var_ref (var, ctx);
2434 else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2435 {
2436 if (is_task_ctx (ctx))
2437 x = build_receiver_ref (var, false, ctx);
2438 else
2439 x = build_outer_var_ref (var, ctx);
2440 }
2441 else
2442 x = NULL;
2443 x = lang_hooks.decls.omp_clause_default_ctor (c, new_var, x);
2444 if (x)
2445 gimplify_and_add (x, ilist);
2446 /* FALLTHRU */
2447
2448 do_dtor:
2449 x = lang_hooks.decls.omp_clause_dtor (c, new_var);
2450 if (x)
2451 {
2452 gimple_seq tseq = NULL;
2453
2454 dtor = x;
2455 gimplify_stmt (&dtor, &tseq);
2456 gimple_seq_add_seq (dlist, tseq);
2457 }
2458 break;
2459
2460 case OMP_CLAUSE_FIRSTPRIVATE:
2461 if (is_task_ctx (ctx))
2462 {
2463 if (is_reference (var) || is_variable_sized (var))
2464 goto do_dtor;
2465 else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var,
2466 ctx))
2467 || use_pointer_for_field (var, NULL))
2468 {
2469 x = build_receiver_ref (var, false, ctx);
2470 SET_DECL_VALUE_EXPR (new_var, x);
2471 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2472 goto do_dtor;
2473 }
2474 }
2475 x = build_outer_var_ref (var, ctx);
2476 x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x);
2477 gimplify_and_add (x, ilist);
2478 goto do_dtor;
2479 break;
2480
2481 case OMP_CLAUSE_COPYIN:
2482 by_ref = use_pointer_for_field (var, NULL);
2483 x = build_receiver_ref (var, by_ref, ctx);
2484 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
2485 append_to_statement_list (x, &copyin_seq);
2486 copyin_by_ref |= by_ref;
2487 break;
2488
2489 case OMP_CLAUSE_REDUCTION:
2490 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2491 {
2492 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2493 x = build_outer_var_ref (var, ctx);
2494
2495 if (is_reference (var))
2496 x = build_fold_addr_expr_loc (clause_loc, x);
2497 SET_DECL_VALUE_EXPR (placeholder, x);
2498 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2499 lower_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
2500 gimple_seq_add_seq (ilist,
2501 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c));
2502 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
2503 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
2504 }
2505 else
2506 {
2507 x = omp_reduction_init (c, TREE_TYPE (new_var));
2508 gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
2509 gimplify_assign (new_var, x, ilist);
2510 }
2511 break;
2512
2513 default:
2514 gcc_unreachable ();
2515 }
2516 }
2517 }
2518
2519 /* The copyin sequence is not to be executed by the main thread, since
2520 that would result in self-copies. Perhaps not visible to scalars,
2521 but it certainly is to C++ operator=. */
2522 if (copyin_seq)
2523 {
2524 x = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM),
2525 0);
2526 x = build2 (NE_EXPR, boolean_type_node, x,
2527 build_int_cst (TREE_TYPE (x), 0));
2528 x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
2529 gimplify_and_add (x, ilist);
2530 }
2531
2532 /* If any copyin variable is passed by reference, we must ensure the
2533 master thread doesn't modify it before it is copied over in all
2534 threads. Similarly for variables in both firstprivate and
2535 lastprivate clauses we need to ensure the lastprivate copying
2536 happens after firstprivate copying in all threads. */
2537 if (copyin_by_ref || lastprivate_firstprivate)
2538 gimplify_and_add (build_omp_barrier (), ilist);
2539 }
2540
2541
2542 /* Generate code to implement the LASTPRIVATE clauses. This is used for
2543 both parallel and workshare constructs. PREDICATE may be NULL if it's
2544 always true. */
2545
2546 static void
2547 lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *stmt_list,
2548 omp_context *ctx)
2549 {
2550 tree x, c, label = NULL;
2551 bool par_clauses = false;
2552
2553 /* Early exit if there are no lastprivate clauses. */
2554 clauses = find_omp_clause (clauses, OMP_CLAUSE_LASTPRIVATE);
2555 if (clauses == NULL)
2556 {
2557 /* If this was a workshare clause, see if it had been combined
2558 with its parallel. In that case, look for the clauses on the
2559 parallel statement itself. */
2560 if (is_parallel_ctx (ctx))
2561 return;
2562
2563 ctx = ctx->outer;
2564 if (ctx == NULL || !is_parallel_ctx (ctx))
2565 return;
2566
2567 clauses = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2568 OMP_CLAUSE_LASTPRIVATE);
2569 if (clauses == NULL)
2570 return;
2571 par_clauses = true;
2572 }
2573
2574 if (predicate)
2575 {
2576 gimple stmt;
2577 tree label_true, arm1, arm2;
2578
2579 label = create_artificial_label (UNKNOWN_LOCATION);
2580 label_true = create_artificial_label (UNKNOWN_LOCATION);
2581 arm1 = TREE_OPERAND (predicate, 0);
2582 arm2 = TREE_OPERAND (predicate, 1);
2583 gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
2584 gimplify_expr (&arm2, stmt_list, NULL, is_gimple_val, fb_rvalue);
2585 stmt = gimple_build_cond (TREE_CODE (predicate), arm1, arm2,
2586 label_true, label);
2587 gimple_seq_add_stmt (stmt_list, stmt);
2588 gimple_seq_add_stmt (stmt_list, gimple_build_label (label_true));
2589 }
2590
2591 for (c = clauses; c ;)
2592 {
2593 tree var, new_var;
2594 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2595
2596 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
2597 {
2598 var = OMP_CLAUSE_DECL (c);
2599 new_var = lookup_decl (var, ctx);
2600
2601 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
2602 {
2603 lower_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
2604 gimple_seq_add_seq (stmt_list,
2605 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
2606 }
2607 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) = NULL;
2608
2609 x = build_outer_var_ref (var, ctx);
2610 if (is_reference (var))
2611 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2612 x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
2613 gimplify_and_add (x, stmt_list);
2614 }
2615 c = OMP_CLAUSE_CHAIN (c);
2616 if (c == NULL && !par_clauses)
2617 {
2618 /* If this was a workshare clause, see if it had been combined
2619 with its parallel. In that case, continue looking for the
2620 clauses also on the parallel statement itself. */
2621 if (is_parallel_ctx (ctx))
2622 break;
2623
2624 ctx = ctx->outer;
2625 if (ctx == NULL || !is_parallel_ctx (ctx))
2626 break;
2627
2628 c = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2629 OMP_CLAUSE_LASTPRIVATE);
2630 par_clauses = true;
2631 }
2632 }
2633
2634 if (label)
2635 gimple_seq_add_stmt (stmt_list, gimple_build_label (label));
2636 }
2637
2638
2639 /* Generate code to implement the REDUCTION clauses. */
2640
2641 static void
2642 lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
2643 {
2644 gimple_seq sub_seq = NULL;
2645 gimple stmt;
2646 tree x, c;
2647 int count = 0;
2648
2649 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
2650 update in that case, otherwise use a lock. */
2651 for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
2652 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
2653 {
2654 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2655 {
2656 /* Never use OMP_ATOMIC for array reductions. */
2657 count = -1;
2658 break;
2659 }
2660 count++;
2661 }
2662
2663 if (count == 0)
2664 return;
2665
2666 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2667 {
2668 tree var, ref, new_var;
2669 enum tree_code code;
2670 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2671
2672 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
2673 continue;
2674
2675 var = OMP_CLAUSE_DECL (c);
2676 new_var = lookup_decl (var, ctx);
2677 if (is_reference (var))
2678 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2679 ref = build_outer_var_ref (var, ctx);
2680 code = OMP_CLAUSE_REDUCTION_CODE (c);
2681
2682 /* reduction(-:var) sums up the partial results, so it acts
2683 identically to reduction(+:var). */
2684 if (code == MINUS_EXPR)
2685 code = PLUS_EXPR;
2686
2687 if (count == 1)
2688 {
2689 tree addr = build_fold_addr_expr_loc (clause_loc, ref);
2690
2691 addr = save_expr (addr);
2692 ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
2693 x = fold_build2_loc (clause_loc, code, TREE_TYPE (ref), ref, new_var);
2694 x = build2 (OMP_ATOMIC, void_type_node, addr, x);
2695 gimplify_and_add (x, stmt_seqp);
2696 return;
2697 }
2698
2699 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2700 {
2701 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2702
2703 if (is_reference (var))
2704 ref = build_fold_addr_expr_loc (clause_loc, ref);
2705 SET_DECL_VALUE_EXPR (placeholder, ref);
2706 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2707 lower_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
2708 gimple_seq_add_seq (&sub_seq, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
2709 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
2710 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
2711 }
2712 else
2713 {
2714 x = build2 (code, TREE_TYPE (ref), ref, new_var);
2715 ref = build_outer_var_ref (var, ctx);
2716 gimplify_assign (ref, x, &sub_seq);
2717 }
2718 }
2719
2720 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START),
2721 0);
2722 gimple_seq_add_stmt (stmt_seqp, stmt);
2723
2724 gimple_seq_add_seq (stmt_seqp, sub_seq);
2725
2726 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END),
2727 0);
2728 gimple_seq_add_stmt (stmt_seqp, stmt);
2729 }
2730
2731
2732 /* Generate code to implement the COPYPRIVATE clauses. */
2733
2734 static void
2735 lower_copyprivate_clauses (tree clauses, gimple_seq *slist, gimple_seq *rlist,
2736 omp_context *ctx)
2737 {
2738 tree c;
2739
2740 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2741 {
2742 tree var, new_var, ref, x;
2743 bool by_ref;
2744 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2745
2746 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
2747 continue;
2748
2749 var = OMP_CLAUSE_DECL (c);
2750 by_ref = use_pointer_for_field (var, NULL);
2751
2752 ref = build_sender_ref (var, ctx);
2753 x = new_var = lookup_decl_in_outer_ctx (var, ctx);
2754 if (by_ref)
2755 {
2756 x = build_fold_addr_expr_loc (clause_loc, new_var);
2757 x = fold_convert_loc (clause_loc, TREE_TYPE (ref), x);
2758 }
2759 gimplify_assign (ref, x, slist);
2760
2761 ref = build_receiver_ref (var, false, ctx);
2762 if (by_ref)
2763 {
2764 ref = fold_convert_loc (clause_loc,
2765 build_pointer_type (TREE_TYPE (new_var)),
2766 ref);
2767 ref = build_fold_indirect_ref_loc (clause_loc, ref);
2768 }
2769 if (is_reference (var))
2770 {
2771 ref = fold_convert_loc (clause_loc, TREE_TYPE (new_var), ref);
2772 ref = build_simple_mem_ref_loc (clause_loc, ref);
2773 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2774 }
2775 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, ref);
2776 gimplify_and_add (x, rlist);
2777 }
2778 }
2779
2780
2781 /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
2782 and REDUCTION from the sender (aka parent) side. */
2783
2784 static void
2785 lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
2786 omp_context *ctx)
2787 {
2788 tree c;
2789
2790 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2791 {
2792 tree val, ref, x, var;
2793 bool by_ref, do_in = false, do_out = false;
2794 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2795
2796 switch (OMP_CLAUSE_CODE (c))
2797 {
2798 case OMP_CLAUSE_PRIVATE:
2799 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2800 break;
2801 continue;
2802 case OMP_CLAUSE_FIRSTPRIVATE:
2803 case OMP_CLAUSE_COPYIN:
2804 case OMP_CLAUSE_LASTPRIVATE:
2805 case OMP_CLAUSE_REDUCTION:
2806 break;
2807 default:
2808 continue;
2809 }
2810
2811 val = OMP_CLAUSE_DECL (c);
2812 var = lookup_decl_in_outer_ctx (val, ctx);
2813
2814 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
2815 && is_global_var (var))
2816 continue;
2817 if (is_variable_sized (val))
2818 continue;
2819 by_ref = use_pointer_for_field (val, NULL);
2820
2821 switch (OMP_CLAUSE_CODE (c))
2822 {
2823 case OMP_CLAUSE_PRIVATE:
2824 case OMP_CLAUSE_FIRSTPRIVATE:
2825 case OMP_CLAUSE_COPYIN:
2826 do_in = true;
2827 break;
2828
2829 case OMP_CLAUSE_LASTPRIVATE:
2830 if (by_ref || is_reference (val))
2831 {
2832 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2833 continue;
2834 do_in = true;
2835 }
2836 else
2837 {
2838 do_out = true;
2839 if (lang_hooks.decls.omp_private_outer_ref (val))
2840 do_in = true;
2841 }
2842 break;
2843
2844 case OMP_CLAUSE_REDUCTION:
2845 do_in = true;
2846 do_out = !(by_ref || is_reference (val));
2847 break;
2848
2849 default:
2850 gcc_unreachable ();
2851 }
2852
2853 if (do_in)
2854 {
2855 ref = build_sender_ref (val, ctx);
2856 x = by_ref ? build_fold_addr_expr_loc (clause_loc, var) : var;
2857 gimplify_assign (ref, x, ilist);
2858 if (is_task_ctx (ctx))
2859 DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL;
2860 }
2861
2862 if (do_out)
2863 {
2864 ref = build_sender_ref (val, ctx);
2865 gimplify_assign (var, ref, olist);
2866 }
2867 }
2868 }
2869
2870 /* Generate code to implement SHARED from the sender (aka parent)
2871 side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
2872 list things that got automatically shared. */
2873
2874 static void
2875 lower_send_shared_vars (gimple_seq *ilist, gimple_seq *olist, omp_context *ctx)
2876 {
2877 tree var, ovar, nvar, f, x, record_type;
2878
2879 if (ctx->record_type == NULL)
2880 return;
2881
2882 record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type;
2883 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
2884 {
2885 ovar = DECL_ABSTRACT_ORIGIN (f);
2886 nvar = maybe_lookup_decl (ovar, ctx);
2887 if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
2888 continue;
2889
2890 /* If CTX is a nested parallel directive. Find the immediately
2891 enclosing parallel or workshare construct that contains a
2892 mapping for OVAR. */
2893 var = lookup_decl_in_outer_ctx (ovar, ctx);
2894
2895 if (use_pointer_for_field (ovar, ctx))
2896 {
2897 x = build_sender_ref (ovar, ctx);
2898 var = build_fold_addr_expr (var);
2899 gimplify_assign (x, var, ilist);
2900 }
2901 else
2902 {
2903 x = build_sender_ref (ovar, ctx);
2904 gimplify_assign (x, var, ilist);
2905
2906 if (!TREE_READONLY (var)
2907 /* We don't need to receive a new reference to a result
2908 or parm decl. In fact we may not store to it as we will
2909 invalidate any pending RSO and generate wrong gimple
2910 during inlining. */
2911 && !((TREE_CODE (var) == RESULT_DECL
2912 || TREE_CODE (var) == PARM_DECL)
2913 && DECL_BY_REFERENCE (var)))
2914 {
2915 x = build_sender_ref (ovar, ctx);
2916 gimplify_assign (var, x, olist);
2917 }
2918 }
2919 }
2920 }
2921
2922
2923 /* A convenience function to build an empty GIMPLE_COND with just the
2924 condition. */
2925
2926 static gimple
2927 gimple_build_cond_empty (tree cond)
2928 {
2929 enum tree_code pred_code;
2930 tree lhs, rhs;
2931
2932 gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs);
2933 return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE);
2934 }
2935
2936
2937 /* Build the function calls to GOMP_parallel_start etc to actually
2938 generate the parallel operation. REGION is the parallel region
2939 being expanded. BB is the block where to insert the code. WS_ARGS
2940 will be set if this is a call to a combined parallel+workshare
2941 construct, it contains the list of additional arguments needed by
2942 the workshare construct. */
2943
2944 static void
2945 expand_parallel_call (struct omp_region *region, basic_block bb,
2946 gimple entry_stmt, vec<tree, va_gc> *ws_args)
2947 {
2948 tree t, t1, t2, val, cond, c, clauses;
2949 gimple_stmt_iterator gsi;
2950 gimple stmt;
2951 enum built_in_function start_ix;
2952 int start_ix2;
2953 location_t clause_loc;
2954 vec<tree, va_gc> *args;
2955
2956 clauses = gimple_omp_parallel_clauses (entry_stmt);
2957
2958 /* Determine what flavor of GOMP_parallel_start we will be
2959 emitting. */
2960 start_ix = BUILT_IN_GOMP_PARALLEL_START;
2961 if (is_combined_parallel (region))
2962 {
2963 switch (region->inner->type)
2964 {
2965 case GIMPLE_OMP_FOR:
2966 gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
2967 start_ix2 = ((int)BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START
2968 + (region->inner->sched_kind
2969 == OMP_CLAUSE_SCHEDULE_RUNTIME
2970 ? 3 : region->inner->sched_kind));
2971 start_ix = (enum built_in_function)start_ix2;
2972 break;
2973 case GIMPLE_OMP_SECTIONS:
2974 start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS_START;
2975 break;
2976 default:
2977 gcc_unreachable ();
2978 }
2979 }
2980
2981 /* By default, the value of NUM_THREADS is zero (selected at run time)
2982 and there is no conditional. */
2983 cond = NULL_TREE;
2984 val = build_int_cst (unsigned_type_node, 0);
2985
2986 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
2987 if (c)
2988 cond = OMP_CLAUSE_IF_EXPR (c);
2989
2990 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
2991 if (c)
2992 {
2993 val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
2994 clause_loc = OMP_CLAUSE_LOCATION (c);
2995 }
2996 else
2997 clause_loc = gimple_location (entry_stmt);
2998
2999 /* Ensure 'val' is of the correct type. */
3000 val = fold_convert_loc (clause_loc, unsigned_type_node, val);
3001
3002 /* If we found the clause 'if (cond)', build either
3003 (cond != 0) or (cond ? val : 1u). */
3004 if (cond)
3005 {
3006 gimple_stmt_iterator gsi;
3007
3008 cond = gimple_boolify (cond);
3009
3010 if (integer_zerop (val))
3011 val = fold_build2_loc (clause_loc,
3012 EQ_EXPR, unsigned_type_node, cond,
3013 build_int_cst (TREE_TYPE (cond), 0));
3014 else
3015 {
3016 basic_block cond_bb, then_bb, else_bb;
3017 edge e, e_then, e_else;
3018 tree tmp_then, tmp_else, tmp_join, tmp_var;
3019
3020 tmp_var = create_tmp_var (TREE_TYPE (val), NULL);
3021 if (gimple_in_ssa_p (cfun))
3022 {
3023 tmp_then = make_ssa_name (tmp_var, NULL);
3024 tmp_else = make_ssa_name (tmp_var, NULL);
3025 tmp_join = make_ssa_name (tmp_var, NULL);
3026 }
3027 else
3028 {
3029 tmp_then = tmp_var;
3030 tmp_else = tmp_var;
3031 tmp_join = tmp_var;
3032 }
3033
3034 e = split_block (bb, NULL);
3035 cond_bb = e->src;
3036 bb = e->dest;
3037 remove_edge (e);
3038
3039 then_bb = create_empty_bb (cond_bb);
3040 else_bb = create_empty_bb (then_bb);
3041 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
3042 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
3043
3044 stmt = gimple_build_cond_empty (cond);
3045 gsi = gsi_start_bb (cond_bb);
3046 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3047
3048 gsi = gsi_start_bb (then_bb);
3049 stmt = gimple_build_assign (tmp_then, val);
3050 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3051
3052 gsi = gsi_start_bb (else_bb);
3053 stmt = gimple_build_assign
3054 (tmp_else, build_int_cst (unsigned_type_node, 1));
3055 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3056
3057 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
3058 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
3059 e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
3060 e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
3061
3062 if (gimple_in_ssa_p (cfun))
3063 {
3064 gimple phi = create_phi_node (tmp_join, bb);
3065 add_phi_arg (phi, tmp_then, e_then, UNKNOWN_LOCATION);
3066 add_phi_arg (phi, tmp_else, e_else, UNKNOWN_LOCATION);
3067 }
3068
3069 val = tmp_join;
3070 }
3071
3072 gsi = gsi_start_bb (bb);
3073 val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE,
3074 false, GSI_CONTINUE_LINKING);
3075 }
3076
3077 gsi = gsi_last_bb (bb);
3078 t = gimple_omp_parallel_data_arg (entry_stmt);
3079 if (t == NULL)
3080 t1 = null_pointer_node;
3081 else
3082 t1 = build_fold_addr_expr (t);
3083 t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
3084
3085 vec_alloc (args, 3 + vec_safe_length (ws_args));
3086 args->quick_push (t2);
3087 args->quick_push (t1);
3088 args->quick_push (val);
3089 if (ws_args)
3090 args->splice (*ws_args);
3091
3092 t = build_call_expr_loc_vec (UNKNOWN_LOCATION,
3093 builtin_decl_explicit (start_ix), args);
3094
3095 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3096 false, GSI_CONTINUE_LINKING);
3097
3098 t = gimple_omp_parallel_data_arg (entry_stmt);
3099 if (t == NULL)
3100 t = null_pointer_node;
3101 else
3102 t = build_fold_addr_expr (t);
3103 t = build_call_expr_loc (gimple_location (entry_stmt),
3104 gimple_omp_parallel_child_fn (entry_stmt), 1, t);
3105 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3106 false, GSI_CONTINUE_LINKING);
3107
3108 t = build_call_expr_loc (gimple_location (entry_stmt),
3109 builtin_decl_explicit (BUILT_IN_GOMP_PARALLEL_END),
3110 0);
3111 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3112 false, GSI_CONTINUE_LINKING);
3113 }
3114
3115
3116 /* Build the function call to GOMP_task to actually
3117 generate the task operation. BB is the block where to insert the code. */
3118
3119 static void
3120 expand_task_call (basic_block bb, gimple entry_stmt)
3121 {
3122 tree t, t1, t2, t3, flags, cond, c, c2, clauses;
3123 gimple_stmt_iterator gsi;
3124 location_t loc = gimple_location (entry_stmt);
3125
3126 clauses = gimple_omp_task_clauses (entry_stmt);
3127
3128 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
3129 if (c)
3130 cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (c));
3131 else
3132 cond = boolean_true_node;
3133
3134 c = find_omp_clause (clauses, OMP_CLAUSE_UNTIED);
3135 c2 = find_omp_clause (clauses, OMP_CLAUSE_MERGEABLE);
3136 flags = build_int_cst (unsigned_type_node,
3137 (c ? 1 : 0) + (c2 ? 4 : 0));
3138
3139 c = find_omp_clause (clauses, OMP_CLAUSE_FINAL);
3140 if (c)
3141 {
3142 c = gimple_boolify (OMP_CLAUSE_FINAL_EXPR (c));
3143 c = fold_build3_loc (loc, COND_EXPR, unsigned_type_node, c,
3144 build_int_cst (unsigned_type_node, 2),
3145 build_int_cst (unsigned_type_node, 0));
3146 flags = fold_build2_loc (loc, PLUS_EXPR, unsigned_type_node, flags, c);
3147 }
3148
3149 gsi = gsi_last_bb (bb);
3150 t = gimple_omp_task_data_arg (entry_stmt);
3151 if (t == NULL)
3152 t2 = null_pointer_node;
3153 else
3154 t2 = build_fold_addr_expr_loc (loc, t);
3155 t1 = build_fold_addr_expr_loc (loc, gimple_omp_task_child_fn (entry_stmt));
3156 t = gimple_omp_task_copy_fn (entry_stmt);
3157 if (t == NULL)
3158 t3 = null_pointer_node;
3159 else
3160 t3 = build_fold_addr_expr_loc (loc, t);
3161
3162 t = build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_TASK),
3163 7, t1, t2, t3,
3164 gimple_omp_task_arg_size (entry_stmt),
3165 gimple_omp_task_arg_align (entry_stmt), cond, flags);
3166
3167 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3168 false, GSI_CONTINUE_LINKING);
3169 }
3170
3171
3172 /* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
3173 catch handler and return it. This prevents programs from violating the
3174 structured block semantics with throws. */
3175
3176 static gimple_seq
3177 maybe_catch_exception (gimple_seq body)
3178 {
3179 gimple g;
3180 tree decl;
3181
3182 if (!flag_exceptions)
3183 return body;
3184
3185 if (lang_hooks.eh_protect_cleanup_actions != NULL)
3186 decl = lang_hooks.eh_protect_cleanup_actions ();
3187 else
3188 decl = builtin_decl_explicit (BUILT_IN_TRAP);
3189
3190 g = gimple_build_eh_must_not_throw (decl);
3191 g = gimple_build_try (body, gimple_seq_alloc_with_stmt (g),
3192 GIMPLE_TRY_CATCH);
3193
3194 return gimple_seq_alloc_with_stmt (g);
3195 }
3196
3197 /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
3198
3199 static tree
3200 vec2chain (vec<tree, va_gc> *v)
3201 {
3202 tree chain = NULL_TREE, t;
3203 unsigned ix;
3204
3205 FOR_EACH_VEC_SAFE_ELT_REVERSE (v, ix, t)
3206 {
3207 DECL_CHAIN (t) = chain;
3208 chain = t;
3209 }
3210
3211 return chain;
3212 }
3213
3214
3215 /* Remove barriers in REGION->EXIT's block. Note that this is only
3216 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
3217 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
3218 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
3219 removed. */
3220
3221 static void
3222 remove_exit_barrier (struct omp_region *region)
3223 {
3224 gimple_stmt_iterator gsi;
3225 basic_block exit_bb;
3226 edge_iterator ei;
3227 edge e;
3228 gimple stmt;
3229 int any_addressable_vars = -1;
3230
3231 exit_bb = region->exit;
3232
3233 /* If the parallel region doesn't return, we don't have REGION->EXIT
3234 block at all. */
3235 if (! exit_bb)
3236 return;
3237
3238 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
3239 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
3240 statements that can appear in between are extremely limited -- no
3241 memory operations at all. Here, we allow nothing at all, so the
3242 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
3243 gsi = gsi_last_bb (exit_bb);
3244 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3245 gsi_prev (&gsi);
3246 if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
3247 return;
3248
3249 FOR_EACH_EDGE (e, ei, exit_bb->preds)
3250 {
3251 gsi = gsi_last_bb (e->src);
3252 if (gsi_end_p (gsi))
3253 continue;
3254 stmt = gsi_stmt (gsi);
3255 if (gimple_code (stmt) == GIMPLE_OMP_RETURN
3256 && !gimple_omp_return_nowait_p (stmt))
3257 {
3258 /* OpenMP 3.0 tasks unfortunately prevent this optimization
3259 in many cases. If there could be tasks queued, the barrier
3260 might be needed to let the tasks run before some local
3261 variable of the parallel that the task uses as shared
3262 runs out of scope. The task can be spawned either
3263 from within current function (this would be easy to check)
3264 or from some function it calls and gets passed an address
3265 of such a variable. */
3266 if (any_addressable_vars < 0)
3267 {
3268 gimple parallel_stmt = last_stmt (region->entry);
3269 tree child_fun = gimple_omp_parallel_child_fn (parallel_stmt);
3270 tree local_decls, block, decl;
3271 unsigned ix;
3272
3273 any_addressable_vars = 0;
3274 FOR_EACH_LOCAL_DECL (DECL_STRUCT_FUNCTION (child_fun), ix, decl)
3275 if (TREE_ADDRESSABLE (decl))
3276 {
3277 any_addressable_vars = 1;
3278 break;
3279 }
3280 for (block = gimple_block (stmt);
3281 !any_addressable_vars
3282 && block
3283 && TREE_CODE (block) == BLOCK;
3284 block = BLOCK_SUPERCONTEXT (block))
3285 {
3286 for (local_decls = BLOCK_VARS (block);
3287 local_decls;
3288 local_decls = DECL_CHAIN (local_decls))
3289 if (TREE_ADDRESSABLE (local_decls))
3290 {
3291 any_addressable_vars = 1;
3292 break;
3293 }
3294 if (block == gimple_block (parallel_stmt))
3295 break;
3296 }
3297 }
3298 if (!any_addressable_vars)
3299 gimple_omp_return_set_nowait (stmt);
3300 }
3301 }
3302 }
3303
3304 static void
3305 remove_exit_barriers (struct omp_region *region)
3306 {
3307 if (region->type == GIMPLE_OMP_PARALLEL)
3308 remove_exit_barrier (region);
3309
3310 if (region->inner)
3311 {
3312 region = region->inner;
3313 remove_exit_barriers (region);
3314 while (region->next)
3315 {
3316 region = region->next;
3317 remove_exit_barriers (region);
3318 }
3319 }
3320 }
3321
3322 /* Optimize omp_get_thread_num () and omp_get_num_threads ()
3323 calls. These can't be declared as const functions, but
3324 within one parallel body they are constant, so they can be
3325 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
3326 which are declared const. Similarly for task body, except
3327 that in untied task omp_get_thread_num () can change at any task
3328 scheduling point. */
3329
3330 static void
3331 optimize_omp_library_calls (gimple entry_stmt)
3332 {
3333 basic_block bb;
3334 gimple_stmt_iterator gsi;
3335 tree thr_num_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
3336 tree thr_num_id = DECL_ASSEMBLER_NAME (thr_num_tree);
3337 tree num_thr_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
3338 tree num_thr_id = DECL_ASSEMBLER_NAME (num_thr_tree);
3339 bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK
3340 && find_omp_clause (gimple_omp_task_clauses (entry_stmt),
3341 OMP_CLAUSE_UNTIED) != NULL);
3342
3343 FOR_EACH_BB (bb)
3344 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3345 {
3346 gimple call = gsi_stmt (gsi);
3347 tree decl;
3348
3349 if (is_gimple_call (call)
3350 && (decl = gimple_call_fndecl (call))
3351 && DECL_EXTERNAL (decl)
3352 && TREE_PUBLIC (decl)
3353 && DECL_INITIAL (decl) == NULL)
3354 {
3355 tree built_in;
3356
3357 if (DECL_NAME (decl) == thr_num_id)
3358 {
3359 /* In #pragma omp task untied omp_get_thread_num () can change
3360 during the execution of the task region. */
3361 if (untied_task)
3362 continue;
3363 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
3364 }
3365 else if (DECL_NAME (decl) == num_thr_id)
3366 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
3367 else
3368 continue;
3369
3370 if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
3371 || gimple_call_num_args (call) != 0)
3372 continue;
3373
3374 if (flag_exceptions && !TREE_NOTHROW (decl))
3375 continue;
3376
3377 if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
3378 || !types_compatible_p (TREE_TYPE (TREE_TYPE (decl)),
3379 TREE_TYPE (TREE_TYPE (built_in))))
3380 continue;
3381
3382 gimple_call_set_fndecl (call, built_in);
3383 }
3384 }
3385 }
3386
3387 /* Expand the OpenMP parallel or task directive starting at REGION. */
3388
3389 static void
3390 expand_omp_taskreg (struct omp_region *region)
3391 {
3392 basic_block entry_bb, exit_bb, new_bb;
3393 struct function *child_cfun;
3394 tree child_fn, block, t;
3395 gimple_stmt_iterator gsi;
3396 gimple entry_stmt, stmt;
3397 edge e;
3398 vec<tree, va_gc> *ws_args;
3399
3400 entry_stmt = last_stmt (region->entry);
3401 child_fn = gimple_omp_taskreg_child_fn (entry_stmt);
3402 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
3403
3404 entry_bb = region->entry;
3405 exit_bb = region->exit;
3406
3407 if (is_combined_parallel (region))
3408 ws_args = region->ws_args;
3409 else
3410 ws_args = NULL;
3411
3412 if (child_cfun->cfg)
3413 {
3414 /* Due to inlining, it may happen that we have already outlined
3415 the region, in which case all we need to do is make the
3416 sub-graph unreachable and emit the parallel call. */
3417 edge entry_succ_e, exit_succ_e;
3418 gimple_stmt_iterator gsi;
3419
3420 entry_succ_e = single_succ_edge (entry_bb);
3421
3422 gsi = gsi_last_bb (entry_bb);
3423 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL
3424 || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK);
3425 gsi_remove (&gsi, true);
3426
3427 new_bb = entry_bb;
3428 if (exit_bb)
3429 {
3430 exit_succ_e = single_succ_edge (exit_bb);
3431 make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
3432 }
3433 remove_edge_and_dominated_blocks (entry_succ_e);
3434 }
3435 else
3436 {
3437 unsigned srcidx, dstidx, num;
3438
3439 /* If the parallel region needs data sent from the parent
3440 function, then the very first statement (except possible
3441 tree profile counter updates) of the parallel body
3442 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
3443 &.OMP_DATA_O is passed as an argument to the child function,
3444 we need to replace it with the argument as seen by the child
3445 function.
3446
3447 In most cases, this will end up being the identity assignment
3448 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
3449 a function call that has been inlined, the original PARM_DECL
3450 .OMP_DATA_I may have been converted into a different local
3451 variable. In which case, we need to keep the assignment. */
3452 if (gimple_omp_taskreg_data_arg (entry_stmt))
3453 {
3454 basic_block entry_succ_bb = single_succ (entry_bb);
3455 gimple_stmt_iterator gsi;
3456 tree arg, narg;
3457 gimple parcopy_stmt = NULL;
3458
3459 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
3460 {
3461 gimple stmt;
3462
3463 gcc_assert (!gsi_end_p (gsi));
3464 stmt = gsi_stmt (gsi);
3465 if (gimple_code (stmt) != GIMPLE_ASSIGN)
3466 continue;
3467
3468 if (gimple_num_ops (stmt) == 2)
3469 {
3470 tree arg = gimple_assign_rhs1 (stmt);
3471
3472 /* We're ignore the subcode because we're
3473 effectively doing a STRIP_NOPS. */
3474
3475 if (TREE_CODE (arg) == ADDR_EXPR
3476 && TREE_OPERAND (arg, 0)
3477 == gimple_omp_taskreg_data_arg (entry_stmt))
3478 {
3479 parcopy_stmt = stmt;
3480 break;
3481 }
3482 }
3483 }
3484
3485 gcc_assert (parcopy_stmt != NULL);
3486 arg = DECL_ARGUMENTS (child_fn);
3487
3488 if (!gimple_in_ssa_p (cfun))
3489 {
3490 if (gimple_assign_lhs (parcopy_stmt) == arg)
3491 gsi_remove (&gsi, true);
3492 else
3493 {
3494 /* ?? Is setting the subcode really necessary ?? */
3495 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg));
3496 gimple_assign_set_rhs1 (parcopy_stmt, arg);
3497 }
3498 }
3499 else
3500 {
3501 /* If we are in ssa form, we must load the value from the default
3502 definition of the argument. That should not be defined now,
3503 since the argument is not used uninitialized. */
3504 gcc_assert (ssa_default_def (cfun, arg) == NULL);
3505 narg = make_ssa_name (arg, gimple_build_nop ());
3506 set_ssa_default_def (cfun, arg, narg);
3507 /* ?? Is setting the subcode really necessary ?? */
3508 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (narg));
3509 gimple_assign_set_rhs1 (parcopy_stmt, narg);
3510 update_stmt (parcopy_stmt);
3511 }
3512 }
3513
3514 /* Declare local variables needed in CHILD_CFUN. */
3515 block = DECL_INITIAL (child_fn);
3516 BLOCK_VARS (block) = vec2chain (child_cfun->local_decls);
3517 /* The gimplifier could record temporaries in parallel/task block
3518 rather than in containing function's local_decls chain,
3519 which would mean cgraph missed finalizing them. Do it now. */
3520 for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t))
3521 if (TREE_CODE (t) == VAR_DECL
3522 && TREE_STATIC (t)
3523 && !DECL_EXTERNAL (t))
3524 varpool_finalize_decl (t);
3525 DECL_SAVED_TREE (child_fn) = NULL;
3526 /* We'll create a CFG for child_fn, so no gimple body is needed. */
3527 gimple_set_body (child_fn, NULL);
3528 TREE_USED (block) = 1;
3529
3530 /* Reset DECL_CONTEXT on function arguments. */
3531 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
3532 DECL_CONTEXT (t) = child_fn;
3533
3534 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
3535 so that it can be moved to the child function. */
3536 gsi = gsi_last_bb (entry_bb);
3537 stmt = gsi_stmt (gsi);
3538 gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
3539 || gimple_code (stmt) == GIMPLE_OMP_TASK));
3540 gsi_remove (&gsi, true);
3541 e = split_block (entry_bb, stmt);
3542 entry_bb = e->dest;
3543 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
3544
3545 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
3546 if (exit_bb)
3547 {
3548 gsi = gsi_last_bb (exit_bb);
3549 gcc_assert (!gsi_end_p (gsi)
3550 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3551 stmt = gimple_build_return (NULL);
3552 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
3553 gsi_remove (&gsi, true);
3554 }
3555
3556 /* Move the parallel region into CHILD_CFUN. */
3557
3558 if (gimple_in_ssa_p (cfun))
3559 {
3560 init_tree_ssa (child_cfun);
3561 init_ssa_operands (child_cfun);
3562 child_cfun->gimple_df->in_ssa_p = true;
3563 block = NULL_TREE;
3564 }
3565 else
3566 block = gimple_block (entry_stmt);
3567
3568 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
3569 if (exit_bb)
3570 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
3571
3572 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
3573 num = vec_safe_length (child_cfun->local_decls);
3574 for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
3575 {
3576 t = (*child_cfun->local_decls)[srcidx];
3577 if (DECL_CONTEXT (t) == cfun->decl)
3578 continue;
3579 if (srcidx != dstidx)
3580 (*child_cfun->local_decls)[dstidx] = t;
3581 dstidx++;
3582 }
3583 if (dstidx != num)
3584 vec_safe_truncate (child_cfun->local_decls, dstidx);
3585
3586 /* Inform the callgraph about the new function. */
3587 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
3588 = cfun->curr_properties & ~PROP_loops;
3589 cgraph_add_new_function (child_fn, true);
3590
3591 /* Fix the callgraph edges for child_cfun. Those for cfun will be
3592 fixed in a following pass. */
3593 push_cfun (child_cfun);
3594 if (optimize)
3595 optimize_omp_library_calls (entry_stmt);
3596 rebuild_cgraph_edges ();
3597
3598 /* Some EH regions might become dead, see PR34608. If
3599 pass_cleanup_cfg isn't the first pass to happen with the
3600 new child, these dead EH edges might cause problems.
3601 Clean them up now. */
3602 if (flag_exceptions)
3603 {
3604 basic_block bb;
3605 bool changed = false;
3606
3607 FOR_EACH_BB (bb)
3608 changed |= gimple_purge_dead_eh_edges (bb);
3609 if (changed)
3610 cleanup_tree_cfg ();
3611 }
3612 if (gimple_in_ssa_p (cfun))
3613 update_ssa (TODO_update_ssa);
3614 pop_cfun ();
3615 }
3616
3617 /* Emit a library call to launch the children threads. */
3618 if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
3619 expand_parallel_call (region, new_bb, entry_stmt, ws_args);
3620 else
3621 expand_task_call (new_bb, entry_stmt);
3622 if (gimple_in_ssa_p (cfun))
3623 update_ssa (TODO_update_ssa_only_virtuals);
3624 }
3625
3626
3627 /* A subroutine of expand_omp_for. Generate code for a parallel
3628 loop with any schedule. Given parameters:
3629
3630 for (V = N1; V cond N2; V += STEP) BODY;
3631
3632 where COND is "<" or ">", we generate pseudocode
3633
3634 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
3635 if (more) goto L0; else goto L3;
3636 L0:
3637 V = istart0;
3638 iend = iend0;
3639 L1:
3640 BODY;
3641 V += STEP;
3642 if (V cond iend) goto L1; else goto L2;
3643 L2:
3644 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3645 L3:
3646
3647 If this is a combined omp parallel loop, instead of the call to
3648 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
3649
3650 For collapsed loops, given parameters:
3651 collapse(3)
3652 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
3653 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
3654 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
3655 BODY;
3656
3657 we generate pseudocode
3658
3659 if (cond3 is <)
3660 adj = STEP3 - 1;
3661 else
3662 adj = STEP3 + 1;
3663 count3 = (adj + N32 - N31) / STEP3;
3664 if (cond2 is <)
3665 adj = STEP2 - 1;
3666 else
3667 adj = STEP2 + 1;
3668 count2 = (adj + N22 - N21) / STEP2;
3669 if (cond1 is <)
3670 adj = STEP1 - 1;
3671 else
3672 adj = STEP1 + 1;
3673 count1 = (adj + N12 - N11) / STEP1;
3674 count = count1 * count2 * count3;
3675 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
3676 if (more) goto L0; else goto L3;
3677 L0:
3678 V = istart0;
3679 T = V;
3680 V3 = N31 + (T % count3) * STEP3;
3681 T = T / count3;
3682 V2 = N21 + (T % count2) * STEP2;
3683 T = T / count2;
3684 V1 = N11 + T * STEP1;
3685 iend = iend0;
3686 L1:
3687 BODY;
3688 V += 1;
3689 if (V < iend) goto L10; else goto L2;
3690 L10:
3691 V3 += STEP3;
3692 if (V3 cond3 N32) goto L1; else goto L11;
3693 L11:
3694 V3 = N31;
3695 V2 += STEP2;
3696 if (V2 cond2 N22) goto L1; else goto L12;
3697 L12:
3698 V2 = N21;
3699 V1 += STEP1;
3700 goto L1;
3701 L2:
3702 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3703 L3:
3704
3705 */
3706
3707 static void
3708 expand_omp_for_generic (struct omp_region *region,
3709 struct omp_for_data *fd,
3710 enum built_in_function start_fn,
3711 enum built_in_function next_fn)
3712 {
3713 tree type, istart0, iend0, iend;
3714 tree t, vmain, vback, bias = NULL_TREE;
3715 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb;
3716 basic_block l2_bb = NULL, l3_bb = NULL;
3717 gimple_stmt_iterator gsi;
3718 gimple stmt;
3719 bool in_combined_parallel = is_combined_parallel (region);
3720 bool broken_loop = region->cont == NULL;
3721 edge e, ne;
3722 tree *counts = NULL;
3723 int i;
3724
3725 gcc_assert (!broken_loop || !in_combined_parallel);
3726 gcc_assert (fd->iter_type == long_integer_type_node
3727 || !in_combined_parallel);
3728
3729 type = TREE_TYPE (fd->loop.v);
3730 istart0 = create_tmp_var (fd->iter_type, ".istart0");
3731 iend0 = create_tmp_var (fd->iter_type, ".iend0");
3732 TREE_ADDRESSABLE (istart0) = 1;
3733 TREE_ADDRESSABLE (iend0) = 1;
3734
3735 /* See if we need to bias by LLONG_MIN. */
3736 if (fd->iter_type == long_long_unsigned_type_node
3737 && TREE_CODE (type) == INTEGER_TYPE
3738 && !TYPE_UNSIGNED (type))
3739 {
3740 tree n1, n2;
3741
3742 if (fd->loop.cond_code == LT_EXPR)
3743 {
3744 n1 = fd->loop.n1;
3745 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
3746 }
3747 else
3748 {
3749 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
3750 n2 = fd->loop.n1;
3751 }
3752 if (TREE_CODE (n1) != INTEGER_CST
3753 || TREE_CODE (n2) != INTEGER_CST
3754 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
3755 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
3756 }
3757
3758 entry_bb = region->entry;
3759 cont_bb = region->cont;
3760 collapse_bb = NULL;
3761 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
3762 gcc_assert (broken_loop
3763 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
3764 l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
3765 l1_bb = single_succ (l0_bb);
3766 if (!broken_loop)
3767 {
3768 l2_bb = create_empty_bb (cont_bb);
3769 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb);
3770 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
3771 }
3772 else
3773 l2_bb = NULL;
3774 l3_bb = BRANCH_EDGE (entry_bb)->dest;
3775 exit_bb = region->exit;
3776
3777 gsi = gsi_last_bb (entry_bb);
3778
3779 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
3780 if (fd->collapse > 1)
3781 {
3782 /* collapsed loops need work for expansion in SSA form. */
3783 gcc_assert (!gimple_in_ssa_p (cfun));
3784 counts = (tree *) alloca (fd->collapse * sizeof (tree));
3785 for (i = 0; i < fd->collapse; i++)
3786 {
3787 tree itype = TREE_TYPE (fd->loops[i].v);
3788
3789 if (POINTER_TYPE_P (itype))
3790 itype = signed_type_for (itype);
3791 t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR
3792 ? -1 : 1));
3793 t = fold_build2 (PLUS_EXPR, itype,
3794 fold_convert (itype, fd->loops[i].step), t);
3795 t = fold_build2 (PLUS_EXPR, itype, t,
3796 fold_convert (itype, fd->loops[i].n2));
3797 t = fold_build2 (MINUS_EXPR, itype, t,
3798 fold_convert (itype, fd->loops[i].n1));
3799 if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
3800 t = fold_build2 (TRUNC_DIV_EXPR, itype,
3801 fold_build1 (NEGATE_EXPR, itype, t),
3802 fold_build1 (NEGATE_EXPR, itype,
3803 fold_convert (itype,
3804 fd->loops[i].step)));
3805 else
3806 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
3807 fold_convert (itype, fd->loops[i].step));
3808 t = fold_convert (type, t);
3809 if (TREE_CODE (t) == INTEGER_CST)
3810 counts[i] = t;
3811 else
3812 {
3813 counts[i] = create_tmp_reg (type, ".count");
3814 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3815 true, GSI_SAME_STMT);
3816 stmt = gimple_build_assign (counts[i], t);
3817 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3818 }
3819 if (SSA_VAR_P (fd->loop.n2))
3820 {
3821 if (i == 0)
3822 t = counts[0];
3823 else
3824 {
3825 t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]);
3826 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3827 true, GSI_SAME_STMT);
3828 }
3829 stmt = gimple_build_assign (fd->loop.n2, t);
3830 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3831 }
3832 }
3833 }
3834 if (in_combined_parallel)
3835 {
3836 /* In a combined parallel loop, emit a call to
3837 GOMP_loop_foo_next. */
3838 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
3839 build_fold_addr_expr (istart0),
3840 build_fold_addr_expr (iend0));
3841 }
3842 else
3843 {
3844 tree t0, t1, t2, t3, t4;
3845 /* If this is not a combined parallel loop, emit a call to
3846 GOMP_loop_foo_start in ENTRY_BB. */
3847 t4 = build_fold_addr_expr (iend0);
3848 t3 = build_fold_addr_expr (istart0);
3849 t2 = fold_convert (fd->iter_type, fd->loop.step);
3850 if (POINTER_TYPE_P (type)
3851 && TYPE_PRECISION (type) != TYPE_PRECISION (fd->iter_type))
3852 {
3853 /* Avoid casting pointers to integer of a different size. */
3854 tree itype = signed_type_for (type);
3855 t1 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n2));
3856 t0 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n1));
3857 }
3858 else
3859 {
3860 t1 = fold_convert (fd->iter_type, fd->loop.n2);
3861 t0 = fold_convert (fd->iter_type, fd->loop.n1);
3862 }
3863 if (bias)
3864 {
3865 t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
3866 t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
3867 }
3868 if (fd->iter_type == long_integer_type_node)
3869 {
3870 if (fd->chunk_size)
3871 {
3872 t = fold_convert (fd->iter_type, fd->chunk_size);
3873 t = build_call_expr (builtin_decl_explicit (start_fn),
3874 6, t0, t1, t2, t, t3, t4);
3875 }
3876 else
3877 t = build_call_expr (builtin_decl_explicit (start_fn),
3878 5, t0, t1, t2, t3, t4);
3879 }
3880 else
3881 {
3882 tree t5;
3883 tree c_bool_type;
3884 tree bfn_decl;
3885
3886 /* The GOMP_loop_ull_*start functions have additional boolean
3887 argument, true for < loops and false for > loops.
3888 In Fortran, the C bool type can be different from
3889 boolean_type_node. */
3890 bfn_decl = builtin_decl_explicit (start_fn);
3891 c_bool_type = TREE_TYPE (TREE_TYPE (bfn_decl));
3892 t5 = build_int_cst (c_bool_type,
3893 fd->loop.cond_code == LT_EXPR ? 1 : 0);
3894 if (fd->chunk_size)
3895 {
3896 tree bfn_decl = builtin_decl_explicit (start_fn);
3897 t = fold_convert (fd->iter_type, fd->chunk_size);
3898 t = build_call_expr (bfn_decl, 7, t5, t0, t1, t2, t, t3, t4);
3899 }
3900 else
3901 t = build_call_expr (builtin_decl_explicit (start_fn),
3902 6, t5, t0, t1, t2, t3, t4);
3903 }
3904 }
3905 if (TREE_TYPE (t) != boolean_type_node)
3906 t = fold_build2 (NE_EXPR, boolean_type_node,
3907 t, build_int_cst (TREE_TYPE (t), 0));
3908 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3909 true, GSI_SAME_STMT);
3910 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
3911
3912 /* Remove the GIMPLE_OMP_FOR statement. */
3913 gsi_remove (&gsi, true);
3914
3915 /* Iteration setup for sequential loop goes in L0_BB. */
3916 gsi = gsi_start_bb (l0_bb);
3917 t = istart0;
3918 if (bias)
3919 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
3920 if (POINTER_TYPE_P (type))
3921 t = fold_convert (signed_type_for (type), t);
3922 t = fold_convert (type, t);
3923 t = force_gimple_operand_gsi (&gsi, t,
3924 DECL_P (fd->loop.v)
3925 && TREE_ADDRESSABLE (fd->loop.v),
3926 NULL_TREE, false, GSI_CONTINUE_LINKING);
3927 stmt = gimple_build_assign (fd->loop.v, t);
3928 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3929
3930 t = iend0;
3931 if (bias)
3932 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
3933 if (POINTER_TYPE_P (type))
3934 t = fold_convert (signed_type_for (type), t);
3935 t = fold_convert (type, t);
3936 iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3937 false, GSI_CONTINUE_LINKING);
3938 if (fd->collapse > 1)
3939 {
3940 tree tem = create_tmp_reg (type, ".tem");
3941 stmt = gimple_build_assign (tem, fd->loop.v);
3942 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3943 for (i = fd->collapse - 1; i >= 0; i--)
3944 {
3945 tree vtype = TREE_TYPE (fd->loops[i].v), itype;
3946 itype = vtype;
3947 if (POINTER_TYPE_P (vtype))
3948 itype = signed_type_for (vtype);
3949 t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]);
3950 t = fold_convert (itype, t);
3951 t = fold_build2 (MULT_EXPR, itype, t,
3952 fold_convert (itype, fd->loops[i].step));
3953 if (POINTER_TYPE_P (vtype))
3954 t = fold_build_pointer_plus (fd->loops[i].n1, t);
3955 else
3956 t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t);
3957 t = force_gimple_operand_gsi (&gsi, t,
3958 DECL_P (fd->loops[i].v)
3959 && TREE_ADDRESSABLE (fd->loops[i].v),
3960 NULL_TREE, false,
3961 GSI_CONTINUE_LINKING);
3962 stmt = gimple_build_assign (fd->loops[i].v, t);
3963 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3964 if (i != 0)
3965 {
3966 t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]);
3967 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3968 false, GSI_CONTINUE_LINKING);
3969 stmt = gimple_build_assign (tem, t);
3970 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3971 }
3972 }
3973 }
3974
3975 if (!broken_loop)
3976 {
3977 /* Code to control the increment and predicate for the sequential
3978 loop goes in the CONT_BB. */
3979 gsi = gsi_last_bb (cont_bb);
3980 stmt = gsi_stmt (gsi);
3981 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
3982 vmain = gimple_omp_continue_control_use (stmt);
3983 vback = gimple_omp_continue_control_def (stmt);
3984
3985 if (POINTER_TYPE_P (type))
3986 t = fold_build_pointer_plus (vmain, fd->loop.step);
3987 else
3988 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
3989 t = force_gimple_operand_gsi (&gsi, t,
3990 DECL_P (vback) && TREE_ADDRESSABLE (vback),
3991 NULL_TREE, true, GSI_SAME_STMT);
3992 stmt = gimple_build_assign (vback, t);
3993 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3994
3995 t = build2 (fd->loop.cond_code, boolean_type_node,
3996 DECL_P (vback) && TREE_ADDRESSABLE (vback) ? t : vback,
3997 iend);
3998 stmt = gimple_build_cond_empty (t);
3999 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4000
4001 /* Remove GIMPLE_OMP_CONTINUE. */
4002 gsi_remove (&gsi, true);
4003
4004 if (fd->collapse > 1)
4005 {
4006 basic_block last_bb, bb;
4007
4008 last_bb = cont_bb;
4009 for (i = fd->collapse - 1; i >= 0; i--)
4010 {
4011 tree vtype = TREE_TYPE (fd->loops[i].v);
4012
4013 bb = create_empty_bb (last_bb);
4014 gsi = gsi_start_bb (bb);
4015
4016 if (i < fd->collapse - 1)
4017 {
4018 e = make_edge (last_bb, bb, EDGE_FALSE_VALUE);
4019 e->probability = REG_BR_PROB_BASE / 8;
4020
4021 t = fd->loops[i + 1].n1;
4022 t = force_gimple_operand_gsi (&gsi, t,
4023 DECL_P (fd->loops[i + 1].v)
4024 && TREE_ADDRESSABLE
4025 (fd->loops[i + 1].v),
4026 NULL_TREE, false,
4027 GSI_CONTINUE_LINKING);
4028 stmt = gimple_build_assign (fd->loops[i + 1].v, t);
4029 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4030 }
4031 else
4032 collapse_bb = bb;
4033
4034 set_immediate_dominator (CDI_DOMINATORS, bb, last_bb);
4035
4036 if (POINTER_TYPE_P (vtype))
4037 t = fold_build_pointer_plus (fd->loops[i].v, fd->loops[i].step);
4038 else
4039 t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v,
4040 fd->loops[i].step);
4041 t = force_gimple_operand_gsi (&gsi, t,
4042 DECL_P (fd->loops[i].v)
4043 && TREE_ADDRESSABLE (fd->loops[i].v),
4044 NULL_TREE, false,
4045 GSI_CONTINUE_LINKING);
4046 stmt = gimple_build_assign (fd->loops[i].v, t);
4047 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4048
4049 if (i > 0)
4050 {
4051 t = fd->loops[i].n2;
4052 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4053 false, GSI_CONTINUE_LINKING);
4054 tree v = fd->loops[i].v;
4055 if (DECL_P (v) && TREE_ADDRESSABLE (v))
4056 v = force_gimple_operand_gsi (&gsi, v, true, NULL_TREE,
4057 false, GSI_CONTINUE_LINKING);
4058 t = fold_build2 (fd->loops[i].cond_code, boolean_type_node,
4059 v, t);
4060 stmt = gimple_build_cond_empty (t);
4061 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4062 e = make_edge (bb, l1_bb, EDGE_TRUE_VALUE);
4063 e->probability = REG_BR_PROB_BASE * 7 / 8;
4064 }
4065 else
4066 make_edge (bb, l1_bb, EDGE_FALLTHRU);
4067 last_bb = bb;
4068 }
4069 }
4070
4071 /* Emit code to get the next parallel iteration in L2_BB. */
4072 gsi = gsi_start_bb (l2_bb);
4073
4074 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
4075 build_fold_addr_expr (istart0),
4076 build_fold_addr_expr (iend0));
4077 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4078 false, GSI_CONTINUE_LINKING);
4079 if (TREE_TYPE (t) != boolean_type_node)
4080 t = fold_build2 (NE_EXPR, boolean_type_node,
4081 t, build_int_cst (TREE_TYPE (t), 0));
4082 stmt = gimple_build_cond_empty (t);
4083 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4084 }
4085
4086 /* Add the loop cleanup function. */
4087 gsi = gsi_last_bb (exit_bb);
4088 if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4089 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_NOWAIT);
4090 else
4091 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END);
4092 stmt = gimple_build_call (t, 0);
4093 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
4094 gsi_remove (&gsi, true);
4095
4096 /* Connect the new blocks. */
4097 find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
4098 find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
4099
4100 if (!broken_loop)
4101 {
4102 gimple_seq phis;
4103
4104 e = find_edge (cont_bb, l3_bb);
4105 ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
4106
4107 phis = phi_nodes (l3_bb);
4108 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
4109 {
4110 gimple phi = gsi_stmt (gsi);
4111 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
4112 PHI_ARG_DEF_FROM_EDGE (phi, e));
4113 }
4114 remove_edge (e);
4115
4116 make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
4117 if (fd->collapse > 1)
4118 {
4119 e = find_edge (cont_bb, l1_bb);
4120 remove_edge (e);
4121 e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
4122 }
4123 else
4124 {
4125 e = find_edge (cont_bb, l1_bb);
4126 e->flags = EDGE_TRUE_VALUE;
4127 }
4128 e->probability = REG_BR_PROB_BASE * 7 / 8;
4129 find_edge (cont_bb, l2_bb)->probability = REG_BR_PROB_BASE / 8;
4130 make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
4131
4132 set_immediate_dominator (CDI_DOMINATORS, l2_bb,
4133 recompute_dominator (CDI_DOMINATORS, l2_bb));
4134 set_immediate_dominator (CDI_DOMINATORS, l3_bb,
4135 recompute_dominator (CDI_DOMINATORS, l3_bb));
4136 set_immediate_dominator (CDI_DOMINATORS, l0_bb,
4137 recompute_dominator (CDI_DOMINATORS, l0_bb));
4138 set_immediate_dominator (CDI_DOMINATORS, l1_bb,
4139 recompute_dominator (CDI_DOMINATORS, l1_bb));
4140 }
4141 }
4142
4143
4144 /* A subroutine of expand_omp_for. Generate code for a parallel
4145 loop with static schedule and no specified chunk size. Given
4146 parameters:
4147
4148 for (V = N1; V cond N2; V += STEP) BODY;
4149
4150 where COND is "<" or ">", we generate pseudocode
4151
4152 if (cond is <)
4153 adj = STEP - 1;
4154 else
4155 adj = STEP + 1;
4156 if ((__typeof (V)) -1 > 0 && cond is >)
4157 n = -(adj + N2 - N1) / -STEP;
4158 else
4159 n = (adj + N2 - N1) / STEP;
4160 q = n / nthreads;
4161 tt = n % nthreads;
4162 if (threadid < tt) goto L3; else goto L4;
4163 L3:
4164 tt = 0;
4165 q = q + 1;
4166 L4:
4167 s0 = q * threadid + tt;
4168 e0 = s0 + q;
4169 V = s0 * STEP + N1;
4170 if (s0 >= e0) goto L2; else goto L0;
4171 L0:
4172 e = e0 * STEP + N1;
4173 L1:
4174 BODY;
4175 V += STEP;
4176 if (V cond e) goto L1;
4177 L2:
4178 */
4179
4180 static void
4181 expand_omp_for_static_nochunk (struct omp_region *region,
4182 struct omp_for_data *fd)
4183 {
4184 tree n, q, s0, e0, e, t, tt, nthreads, threadid;
4185 tree type, itype, vmain, vback;
4186 basic_block entry_bb, second_bb, third_bb, exit_bb, seq_start_bb;
4187 basic_block body_bb, cont_bb;
4188 basic_block fin_bb;
4189 gimple_stmt_iterator gsi;
4190 gimple stmt;
4191 edge ep;
4192
4193 itype = type = TREE_TYPE (fd->loop.v);
4194 if (POINTER_TYPE_P (type))
4195 itype = signed_type_for (type);
4196
4197 entry_bb = region->entry;
4198 cont_bb = region->cont;
4199 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
4200 gcc_assert (BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
4201 seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
4202 body_bb = single_succ (seq_start_bb);
4203 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4204 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4205 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4206 exit_bb = region->exit;
4207
4208 /* Iteration space partitioning goes in ENTRY_BB. */
4209 gsi = gsi_last_bb (entry_bb);
4210 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4211
4212 t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS), 0);
4213 t = fold_convert (itype, t);
4214 nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4215 true, GSI_SAME_STMT);
4216
4217 t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM), 0);
4218 t = fold_convert (itype, t);
4219 threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4220 true, GSI_SAME_STMT);
4221
4222 fd->loop.n1
4223 = force_gimple_operand_gsi (&gsi, fold_convert (type, fd->loop.n1),
4224 true, NULL_TREE, true, GSI_SAME_STMT);
4225 fd->loop.n2
4226 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.n2),
4227 true, NULL_TREE, true, GSI_SAME_STMT);
4228 fd->loop.step
4229 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.step),
4230 true, NULL_TREE, true, GSI_SAME_STMT);
4231
4232 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4233 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4234 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4235 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4236 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4237 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4238 fold_build1 (NEGATE_EXPR, itype, t),
4239 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4240 else
4241 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4242 t = fold_convert (itype, t);
4243 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4244
4245 q = create_tmp_reg (itype, "q");
4246 t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads);
4247 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
4248 gsi_insert_before (&gsi, gimple_build_assign (q, t), GSI_SAME_STMT);
4249
4250 tt = create_tmp_reg (itype, "tt");
4251 t = fold_build2 (TRUNC_MOD_EXPR, itype, n, nthreads);
4252 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
4253 gsi_insert_before (&gsi, gimple_build_assign (tt, t), GSI_SAME_STMT);
4254
4255 t = build2 (LT_EXPR, boolean_type_node, threadid, tt);
4256 stmt = gimple_build_cond_empty (t);
4257 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4258
4259 second_bb = split_block (entry_bb, stmt)->dest;
4260 gsi = gsi_last_bb (second_bb);
4261 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4262
4263 gsi_insert_before (&gsi, gimple_build_assign (tt, build_int_cst (itype, 0)),
4264 GSI_SAME_STMT);
4265 stmt = gimple_build_assign_with_ops (PLUS_EXPR, q, q,
4266 build_int_cst (itype, 1));
4267 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4268
4269 third_bb = split_block (second_bb, stmt)->dest;
4270 gsi = gsi_last_bb (third_bb);
4271 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4272
4273 t = build2 (MULT_EXPR, itype, q, threadid);
4274 t = build2 (PLUS_EXPR, itype, t, tt);
4275 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4276
4277 t = fold_build2 (PLUS_EXPR, itype, s0, q);
4278 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4279
4280 t = build2 (GE_EXPR, boolean_type_node, s0, e0);
4281 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4282
4283 /* Remove the GIMPLE_OMP_FOR statement. */
4284 gsi_remove (&gsi, true);
4285
4286 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4287 gsi = gsi_start_bb (seq_start_bb);
4288
4289 t = fold_convert (itype, s0);
4290 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4291 if (POINTER_TYPE_P (type))
4292 t = fold_build_pointer_plus (fd->loop.n1, t);
4293 else
4294 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4295 t = force_gimple_operand_gsi (&gsi, t,
4296 DECL_P (fd->loop.v)
4297 && TREE_ADDRESSABLE (fd->loop.v),
4298 NULL_TREE, false, GSI_CONTINUE_LINKING);
4299 stmt = gimple_build_assign (fd->loop.v, t);
4300 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4301
4302 t = fold_convert (itype, e0);
4303 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4304 if (POINTER_TYPE_P (type))
4305 t = fold_build_pointer_plus (fd->loop.n1, t);
4306 else
4307 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4308 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4309 false, GSI_CONTINUE_LINKING);
4310
4311 /* The code controlling the sequential loop replaces the
4312 GIMPLE_OMP_CONTINUE. */
4313 gsi = gsi_last_bb (cont_bb);
4314 stmt = gsi_stmt (gsi);
4315 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4316 vmain = gimple_omp_continue_control_use (stmt);
4317 vback = gimple_omp_continue_control_def (stmt);
4318
4319 if (POINTER_TYPE_P (type))
4320 t = fold_build_pointer_plus (vmain, fd->loop.step);
4321 else
4322 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
4323 t = force_gimple_operand_gsi (&gsi, t,
4324 DECL_P (vback) && TREE_ADDRESSABLE (vback),
4325 NULL_TREE, true, GSI_SAME_STMT);
4326 stmt = gimple_build_assign (vback, t);
4327 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4328
4329 t = build2 (fd->loop.cond_code, boolean_type_node,
4330 DECL_P (vback) && TREE_ADDRESSABLE (vback) ? t : vback, e);
4331 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4332
4333 /* Remove the GIMPLE_OMP_CONTINUE statement. */
4334 gsi_remove (&gsi, true);
4335
4336 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4337 gsi = gsi_last_bb (exit_bb);
4338 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4339 force_gimple_operand_gsi (&gsi, build_omp_barrier (), false, NULL_TREE,
4340 false, GSI_SAME_STMT);
4341 gsi_remove (&gsi, true);
4342
4343 /* Connect all the blocks. */
4344 ep = make_edge (entry_bb, third_bb, EDGE_FALSE_VALUE);
4345 ep->probability = REG_BR_PROB_BASE / 4 * 3;
4346 ep = find_edge (entry_bb, second_bb);
4347 ep->flags = EDGE_TRUE_VALUE;
4348 ep->probability = REG_BR_PROB_BASE / 4;
4349 find_edge (third_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
4350 find_edge (third_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
4351
4352 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4353 find_edge (cont_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4354
4355 set_immediate_dominator (CDI_DOMINATORS, second_bb, entry_bb);
4356 set_immediate_dominator (CDI_DOMINATORS, third_bb, entry_bb);
4357 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, third_bb);
4358 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4359 recompute_dominator (CDI_DOMINATORS, body_bb));
4360 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4361 recompute_dominator (CDI_DOMINATORS, fin_bb));
4362 }
4363
4364
4365 /* A subroutine of expand_omp_for. Generate code for a parallel
4366 loop with static schedule and a specified chunk size. Given
4367 parameters:
4368
4369 for (V = N1; V cond N2; V += STEP) BODY;
4370
4371 where COND is "<" or ">", we generate pseudocode
4372
4373 if (cond is <)
4374 adj = STEP - 1;
4375 else
4376 adj = STEP + 1;
4377 if ((__typeof (V)) -1 > 0 && cond is >)
4378 n = -(adj + N2 - N1) / -STEP;
4379 else
4380 n = (adj + N2 - N1) / STEP;
4381 trip = 0;
4382 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
4383 here so that V is defined
4384 if the loop is not entered
4385 L0:
4386 s0 = (trip * nthreads + threadid) * CHUNK;
4387 e0 = min(s0 + CHUNK, n);
4388 if (s0 < n) goto L1; else goto L4;
4389 L1:
4390 V = s0 * STEP + N1;
4391 e = e0 * STEP + N1;
4392 L2:
4393 BODY;
4394 V += STEP;
4395 if (V cond e) goto L2; else goto L3;
4396 L3:
4397 trip += 1;
4398 goto L0;
4399 L4:
4400 */
4401
4402 static void
4403 expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
4404 {
4405 tree n, s0, e0, e, t;
4406 tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
4407 tree type, itype, v_main, v_back, v_extra;
4408 basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
4409 basic_block trip_update_bb, cont_bb, fin_bb;
4410 gimple_stmt_iterator si;
4411 gimple stmt;
4412 edge se;
4413
4414 itype = type = TREE_TYPE (fd->loop.v);
4415 if (POINTER_TYPE_P (type))
4416 itype = signed_type_for (type);
4417
4418 entry_bb = region->entry;
4419 se = split_block (entry_bb, last_stmt (entry_bb));
4420 entry_bb = se->src;
4421 iter_part_bb = se->dest;
4422 cont_bb = region->cont;
4423 gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
4424 gcc_assert (BRANCH_EDGE (iter_part_bb)->dest
4425 == FALLTHRU_EDGE (cont_bb)->dest);
4426 seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
4427 body_bb = single_succ (seq_start_bb);
4428 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4429 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4430 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4431 trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
4432 exit_bb = region->exit;
4433
4434 /* Trip and adjustment setup goes in ENTRY_BB. */
4435 si = gsi_last_bb (entry_bb);
4436 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_FOR);
4437
4438 t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS), 0);
4439 t = fold_convert (itype, t);
4440 nthreads = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4441 true, GSI_SAME_STMT);
4442
4443 t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM), 0);
4444 t = fold_convert (itype, t);
4445 threadid = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4446 true, GSI_SAME_STMT);
4447
4448 fd->loop.n1
4449 = force_gimple_operand_gsi (&si, fold_convert (type, fd->loop.n1),
4450 true, NULL_TREE, true, GSI_SAME_STMT);
4451 fd->loop.n2
4452 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.n2),
4453 true, NULL_TREE, true, GSI_SAME_STMT);
4454 fd->loop.step
4455 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.step),
4456 true, NULL_TREE, true, GSI_SAME_STMT);
4457 fd->chunk_size
4458 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->chunk_size),
4459 true, NULL_TREE, true, GSI_SAME_STMT);
4460
4461 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4462 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4463 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4464 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4465 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4466 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4467 fold_build1 (NEGATE_EXPR, itype, t),
4468 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4469 else
4470 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4471 t = fold_convert (itype, t);
4472 n = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4473 true, GSI_SAME_STMT);
4474
4475 trip_var = create_tmp_reg (itype, ".trip");
4476 if (gimple_in_ssa_p (cfun))
4477 {
4478 trip_init = make_ssa_name (trip_var, NULL);
4479 trip_main = make_ssa_name (trip_var, NULL);
4480 trip_back = make_ssa_name (trip_var, NULL);
4481 }
4482 else
4483 {
4484 trip_init = trip_var;
4485 trip_main = trip_var;
4486 trip_back = trip_var;
4487 }
4488
4489 stmt = gimple_build_assign (trip_init, build_int_cst (itype, 0));
4490 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4491
4492 t = fold_build2 (MULT_EXPR, itype, threadid, fd->chunk_size);
4493 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4494 if (POINTER_TYPE_P (type))
4495 t = fold_build_pointer_plus (fd->loop.n1, t);
4496 else
4497 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4498 v_extra = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4499 true, GSI_SAME_STMT);
4500
4501 /* Remove the GIMPLE_OMP_FOR. */
4502 gsi_remove (&si, true);
4503
4504 /* Iteration space partitioning goes in ITER_PART_BB. */
4505 si = gsi_last_bb (iter_part_bb);
4506
4507 t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads);
4508 t = fold_build2 (PLUS_EXPR, itype, t, threadid);
4509 t = fold_build2 (MULT_EXPR, itype, t, fd->chunk_size);
4510 s0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4511 false, GSI_CONTINUE_LINKING);
4512
4513 t = fold_build2 (PLUS_EXPR, itype, s0, fd->chunk_size);
4514 t = fold_build2 (MIN_EXPR, itype, t, n);
4515 e0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4516 false, GSI_CONTINUE_LINKING);
4517
4518 t = build2 (LT_EXPR, boolean_type_node, s0, n);
4519 gsi_insert_after (&si, gimple_build_cond_empty (t), GSI_CONTINUE_LINKING);
4520
4521 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4522 si = gsi_start_bb (seq_start_bb);
4523
4524 t = fold_convert (itype, s0);
4525 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4526 if (POINTER_TYPE_P (type))
4527 t = fold_build_pointer_plus (fd->loop.n1, t);
4528 else
4529 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4530 t = force_gimple_operand_gsi (&si, t,
4531 DECL_P (fd->loop.v)
4532 && TREE_ADDRESSABLE (fd->loop.v),
4533 NULL_TREE, false, GSI_CONTINUE_LINKING);
4534 stmt = gimple_build_assign (fd->loop.v, t);
4535 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4536
4537 t = fold_convert (itype, e0);
4538 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4539 if (POINTER_TYPE_P (type))
4540 t = fold_build_pointer_plus (fd->loop.n1, t);
4541 else
4542 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4543 e = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4544 false, GSI_CONTINUE_LINKING);
4545
4546 /* The code controlling the sequential loop goes in CONT_BB,
4547 replacing the GIMPLE_OMP_CONTINUE. */
4548 si = gsi_last_bb (cont_bb);
4549 stmt = gsi_stmt (si);
4550 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4551 v_main = gimple_omp_continue_control_use (stmt);
4552 v_back = gimple_omp_continue_control_def (stmt);
4553
4554 if (POINTER_TYPE_P (type))
4555 t = fold_build_pointer_plus (v_main, fd->loop.step);
4556 else
4557 t = fold_build2 (PLUS_EXPR, type, v_main, fd->loop.step);
4558 if (DECL_P (v_back) && TREE_ADDRESSABLE (v_back))
4559 t = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4560 true, GSI_SAME_STMT);
4561 stmt = gimple_build_assign (v_back, t);
4562 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4563
4564 t = build2 (fd->loop.cond_code, boolean_type_node,
4565 DECL_P (v_back) && TREE_ADDRESSABLE (v_back)
4566 ? t : v_back, e);
4567 gsi_insert_before (&si, gimple_build_cond_empty (t), GSI_SAME_STMT);
4568
4569 /* Remove GIMPLE_OMP_CONTINUE. */
4570 gsi_remove (&si, true);
4571
4572 /* Trip update code goes into TRIP_UPDATE_BB. */
4573 si = gsi_start_bb (trip_update_bb);
4574
4575 t = build_int_cst (itype, 1);
4576 t = build2 (PLUS_EXPR, itype, trip_main, t);
4577 stmt = gimple_build_assign (trip_back, t);
4578 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4579
4580 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4581 si = gsi_last_bb (exit_bb);
4582 if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
4583 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4584 false, GSI_SAME_STMT);
4585 gsi_remove (&si, true);
4586
4587 /* Connect the new blocks. */
4588 find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
4589 find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4590
4591 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4592 find_edge (cont_bb, trip_update_bb)->flags = EDGE_FALSE_VALUE;
4593
4594 redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
4595
4596 if (gimple_in_ssa_p (cfun))
4597 {
4598 gimple_stmt_iterator psi;
4599 gimple phi;
4600 edge re, ene;
4601 edge_var_map_vector *head;
4602 edge_var_map *vm;
4603 size_t i;
4604
4605 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
4606 remove arguments of the phi nodes in fin_bb. We need to create
4607 appropriate phi nodes in iter_part_bb instead. */
4608 se = single_pred_edge (fin_bb);
4609 re = single_succ_edge (trip_update_bb);
4610 head = redirect_edge_var_map_vector (re);
4611 ene = single_succ_edge (entry_bb);
4612
4613 psi = gsi_start_phis (fin_bb);
4614 for (i = 0; !gsi_end_p (psi) && head->iterate (i, &vm);
4615 gsi_next (&psi), ++i)
4616 {
4617 gimple nphi;
4618 source_location locus;
4619
4620 phi = gsi_stmt (psi);
4621 t = gimple_phi_result (phi);
4622 gcc_assert (t == redirect_edge_var_map_result (vm));
4623 nphi = create_phi_node (t, iter_part_bb);
4624
4625 t = PHI_ARG_DEF_FROM_EDGE (phi, se);
4626 locus = gimple_phi_arg_location_from_edge (phi, se);
4627
4628 /* A special case -- fd->loop.v is not yet computed in
4629 iter_part_bb, we need to use v_extra instead. */
4630 if (t == fd->loop.v)
4631 t = v_extra;
4632 add_phi_arg (nphi, t, ene, locus);
4633 locus = redirect_edge_var_map_location (vm);
4634 add_phi_arg (nphi, redirect_edge_var_map_def (vm), re, locus);
4635 }
4636 gcc_assert (!gsi_end_p (psi) && i == head->length ());
4637 redirect_edge_var_map_clear (re);
4638 while (1)
4639 {
4640 psi = gsi_start_phis (fin_bb);
4641 if (gsi_end_p (psi))
4642 break;
4643 remove_phi_node (&psi, false);
4644 }
4645
4646 /* Make phi node for trip. */
4647 phi = create_phi_node (trip_main, iter_part_bb);
4648 add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb),
4649 UNKNOWN_LOCATION);
4650 add_phi_arg (phi, trip_init, single_succ_edge (entry_bb),
4651 UNKNOWN_LOCATION);
4652 }
4653
4654 set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
4655 set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
4656 recompute_dominator (CDI_DOMINATORS, iter_part_bb));
4657 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4658 recompute_dominator (CDI_DOMINATORS, fin_bb));
4659 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
4660 recompute_dominator (CDI_DOMINATORS, seq_start_bb));
4661 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4662 recompute_dominator (CDI_DOMINATORS, body_bb));
4663 }
4664
4665
4666 /* Expand the OpenMP loop defined by REGION. */
4667
4668 static void
4669 expand_omp_for (struct omp_region *region)
4670 {
4671 struct omp_for_data fd;
4672 struct omp_for_data_loop *loops;
4673
4674 loops
4675 = (struct omp_for_data_loop *)
4676 alloca (gimple_omp_for_collapse (last_stmt (region->entry))
4677 * sizeof (struct omp_for_data_loop));
4678 extract_omp_for_data (last_stmt (region->entry), &fd, loops);
4679 region->sched_kind = fd.sched_kind;
4680
4681 gcc_assert (EDGE_COUNT (region->entry->succs) == 2);
4682 BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4683 FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4684 if (region->cont)
4685 {
4686 gcc_assert (EDGE_COUNT (region->cont->succs) == 2);
4687 BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4688 FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4689 }
4690
4691 if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
4692 && !fd.have_ordered
4693 && fd.collapse == 1
4694 && region->cont != NULL)
4695 {
4696 if (fd.chunk_size == NULL)
4697 expand_omp_for_static_nochunk (region, &fd);
4698 else
4699 expand_omp_for_static_chunk (region, &fd);
4700 }
4701 else
4702 {
4703 int fn_index, start_ix, next_ix;
4704
4705 if (fd.chunk_size == NULL
4706 && fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
4707 fd.chunk_size = integer_zero_node;
4708 gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
4709 fn_index = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
4710 ? 3 : fd.sched_kind;
4711 fn_index += fd.have_ordered * 4;
4712 start_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_START) + fn_index;
4713 next_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_NEXT) + fn_index;
4714 if (fd.iter_type == long_long_unsigned_type_node)
4715 {
4716 start_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_START
4717 - (int)BUILT_IN_GOMP_LOOP_STATIC_START);
4718 next_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
4719 - (int)BUILT_IN_GOMP_LOOP_STATIC_NEXT);
4720 }
4721 expand_omp_for_generic (region, &fd, (enum built_in_function) start_ix,
4722 (enum built_in_function) next_ix);
4723 }
4724
4725 if (gimple_in_ssa_p (cfun))
4726 update_ssa (TODO_update_ssa_only_virtuals);
4727 }
4728
4729
4730 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
4731
4732 v = GOMP_sections_start (n);
4733 L0:
4734 switch (v)
4735 {
4736 case 0:
4737 goto L2;
4738 case 1:
4739 section 1;
4740 goto L1;
4741 case 2:
4742 ...
4743 case n:
4744 ...
4745 default:
4746 abort ();
4747 }
4748 L1:
4749 v = GOMP_sections_next ();
4750 goto L0;
4751 L2:
4752 reduction;
4753
4754 If this is a combined parallel sections, replace the call to
4755 GOMP_sections_start with call to GOMP_sections_next. */
4756
4757 static void
4758 expand_omp_sections (struct omp_region *region)
4759 {
4760 tree t, u, vin = NULL, vmain, vnext, l2;
4761 vec<tree> label_vec;
4762 unsigned len;
4763 basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
4764 gimple_stmt_iterator si, switch_si;
4765 gimple sections_stmt, stmt, cont;
4766 edge_iterator ei;
4767 edge e;
4768 struct omp_region *inner;
4769 unsigned i, casei;
4770 bool exit_reachable = region->cont != NULL;
4771
4772 gcc_assert (region->exit != NULL);
4773 entry_bb = region->entry;
4774 l0_bb = single_succ (entry_bb);
4775 l1_bb = region->cont;
4776 l2_bb = region->exit;
4777 if (single_pred_p (l2_bb) && single_pred (l2_bb) == l0_bb)
4778 l2 = gimple_block_label (l2_bb);
4779 else
4780 {
4781 /* This can happen if there are reductions. */
4782 len = EDGE_COUNT (l0_bb->succs);
4783 gcc_assert (len > 0);
4784 e = EDGE_SUCC (l0_bb, len - 1);
4785 si = gsi_last_bb (e->dest);
4786 l2 = NULL_TREE;
4787 if (gsi_end_p (si)
4788 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4789 l2 = gimple_block_label (e->dest);
4790 else
4791 FOR_EACH_EDGE (e, ei, l0_bb->succs)
4792 {
4793 si = gsi_last_bb (e->dest);
4794 if (gsi_end_p (si)
4795 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4796 {
4797 l2 = gimple_block_label (e->dest);
4798 break;
4799 }
4800 }
4801 }
4802 if (exit_reachable)
4803 default_bb = create_empty_bb (l1_bb->prev_bb);
4804 else
4805 default_bb = create_empty_bb (l0_bb);
4806
4807 /* We will build a switch() with enough cases for all the
4808 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
4809 and a default case to abort if something goes wrong. */
4810 len = EDGE_COUNT (l0_bb->succs);
4811
4812 /* Use vec::quick_push on label_vec throughout, since we know the size
4813 in advance. */
4814 label_vec.create (len);
4815
4816 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
4817 GIMPLE_OMP_SECTIONS statement. */
4818 si = gsi_last_bb (entry_bb);
4819 sections_stmt = gsi_stmt (si);
4820 gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS);
4821 vin = gimple_omp_sections_control (sections_stmt);
4822 if (!is_combined_parallel (region))
4823 {
4824 /* If we are not inside a combined parallel+sections region,
4825 call GOMP_sections_start. */
4826 t = build_int_cst (unsigned_type_node,
4827 exit_reachable ? len - 1 : len);
4828 u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_START);
4829 stmt = gimple_build_call (u, 1, t);
4830 }
4831 else
4832 {
4833 /* Otherwise, call GOMP_sections_next. */
4834 u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
4835 stmt = gimple_build_call (u, 0);
4836 }
4837 gimple_call_set_lhs (stmt, vin);
4838 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4839 gsi_remove (&si, true);
4840
4841 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
4842 L0_BB. */
4843 switch_si = gsi_last_bb (l0_bb);
4844 gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH);
4845 if (exit_reachable)
4846 {
4847 cont = last_stmt (l1_bb);
4848 gcc_assert (gimple_code (cont) == GIMPLE_OMP_CONTINUE);
4849 vmain = gimple_omp_continue_control_use (cont);
4850 vnext = gimple_omp_continue_control_def (cont);
4851 }
4852 else
4853 {
4854 vmain = vin;
4855 vnext = NULL_TREE;
4856 }
4857
4858 t = build_case_label (build_int_cst (unsigned_type_node, 0), NULL, l2);
4859 label_vec.quick_push (t);
4860 i = 1;
4861
4862 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
4863 for (inner = region->inner, casei = 1;
4864 inner;
4865 inner = inner->next, i++, casei++)
4866 {
4867 basic_block s_entry_bb, s_exit_bb;
4868
4869 /* Skip optional reduction region. */
4870 if (inner->type == GIMPLE_OMP_ATOMIC_LOAD)
4871 {
4872 --i;
4873 --casei;
4874 continue;
4875 }
4876
4877 s_entry_bb = inner->entry;
4878 s_exit_bb = inner->exit;
4879
4880 t = gimple_block_label (s_entry_bb);
4881 u = build_int_cst (unsigned_type_node, casei);
4882 u = build_case_label (u, NULL, t);
4883 label_vec.quick_push (u);
4884
4885 si = gsi_last_bb (s_entry_bb);
4886 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION);
4887 gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si)));
4888 gsi_remove (&si, true);
4889 single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
4890
4891 if (s_exit_bb == NULL)
4892 continue;
4893
4894 si = gsi_last_bb (s_exit_bb);
4895 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4896 gsi_remove (&si, true);
4897
4898 single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
4899 }
4900
4901 /* Error handling code goes in DEFAULT_BB. */
4902 t = gimple_block_label (default_bb);
4903 u = build_case_label (NULL, NULL, t);
4904 make_edge (l0_bb, default_bb, 0);
4905
4906 stmt = gimple_build_switch (vmain, u, label_vec);
4907 gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT);
4908 gsi_remove (&switch_si, true);
4909 label_vec.release ();
4910
4911 si = gsi_start_bb (default_bb);
4912 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP), 0);
4913 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4914
4915 if (exit_reachable)
4916 {
4917 tree bfn_decl;
4918
4919 /* Code to get the next section goes in L1_BB. */
4920 si = gsi_last_bb (l1_bb);
4921 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE);
4922
4923 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
4924 stmt = gimple_build_call (bfn_decl, 0);
4925 gimple_call_set_lhs (stmt, vnext);
4926 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4927 gsi_remove (&si, true);
4928
4929 single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
4930 }
4931
4932 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
4933 si = gsi_last_bb (l2_bb);
4934 if (gimple_omp_return_nowait_p (gsi_stmt (si)))
4935 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_NOWAIT);
4936 else
4937 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END);
4938 stmt = gimple_build_call (t, 0);
4939 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4940 gsi_remove (&si, true);
4941
4942 set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
4943 }
4944
4945
4946 /* Expand code for an OpenMP single directive. We've already expanded
4947 much of the code, here we simply place the GOMP_barrier call. */
4948
4949 static void
4950 expand_omp_single (struct omp_region *region)
4951 {
4952 basic_block entry_bb, exit_bb;
4953 gimple_stmt_iterator si;
4954 bool need_barrier = false;
4955
4956 entry_bb = region->entry;
4957 exit_bb = region->exit;
4958
4959 si = gsi_last_bb (entry_bb);
4960 /* The terminal barrier at the end of a GOMP_single_copy sequence cannot
4961 be removed. We need to ensure that the thread that entered the single
4962 does not exit before the data is copied out by the other threads. */
4963 if (find_omp_clause (gimple_omp_single_clauses (gsi_stmt (si)),
4964 OMP_CLAUSE_COPYPRIVATE))
4965 need_barrier = true;
4966 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE);
4967 gsi_remove (&si, true);
4968 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4969
4970 si = gsi_last_bb (exit_bb);
4971 if (!gimple_omp_return_nowait_p (gsi_stmt (si)) || need_barrier)
4972 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4973 false, GSI_SAME_STMT);
4974 gsi_remove (&si, true);
4975 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4976 }
4977
4978
4979 /* Generic expansion for OpenMP synchronization directives: master,
4980 ordered and critical. All we need to do here is remove the entry
4981 and exit markers for REGION. */
4982
4983 static void
4984 expand_omp_synch (struct omp_region *region)
4985 {
4986 basic_block entry_bb, exit_bb;
4987 gimple_stmt_iterator si;
4988
4989 entry_bb = region->entry;
4990 exit_bb = region->exit;
4991
4992 si = gsi_last_bb (entry_bb);
4993 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE
4994 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER
4995 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED
4996 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL);
4997 gsi_remove (&si, true);
4998 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4999
5000 if (exit_bb)
5001 {
5002 si = gsi_last_bb (exit_bb);
5003 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
5004 gsi_remove (&si, true);
5005 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
5006 }
5007 }
5008
5009 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
5010 operation as a normal volatile load. */
5011
5012 static bool
5013 expand_omp_atomic_load (basic_block load_bb, tree addr,
5014 tree loaded_val, int index)
5015 {
5016 enum built_in_function tmpbase;
5017 gimple_stmt_iterator gsi;
5018 basic_block store_bb;
5019 location_t loc;
5020 gimple stmt;
5021 tree decl, call, type, itype;
5022
5023 gsi = gsi_last_bb (load_bb);
5024 stmt = gsi_stmt (gsi);
5025 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
5026 loc = gimple_location (stmt);
5027
5028 /* ??? If the target does not implement atomic_load_optab[mode], and mode
5029 is smaller than word size, then expand_atomic_load assumes that the load
5030 is atomic. We could avoid the builtin entirely in this case. */
5031
5032 tmpbase = (enum built_in_function) (BUILT_IN_ATOMIC_LOAD_N + index + 1);
5033 decl = builtin_decl_explicit (tmpbase);
5034 if (decl == NULL_TREE)
5035 return false;
5036
5037 type = TREE_TYPE (loaded_val);
5038 itype = TREE_TYPE (TREE_TYPE (decl));
5039
5040 call = build_call_expr_loc (loc, decl, 2, addr,
5041 build_int_cst (NULL, MEMMODEL_RELAXED));
5042 if (!useless_type_conversion_p (type, itype))
5043 call = fold_build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
5044 call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
5045
5046 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
5047 gsi_remove (&gsi, true);
5048
5049 store_bb = single_succ (load_bb);
5050 gsi = gsi_last_bb (store_bb);
5051 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
5052 gsi_remove (&gsi, true);
5053
5054 if (gimple_in_ssa_p (cfun))
5055 update_ssa (TODO_update_ssa_no_phi);
5056
5057 return true;
5058 }
5059
5060 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
5061 operation as a normal volatile store. */
5062
5063 static bool
5064 expand_omp_atomic_store (basic_block load_bb, tree addr,
5065 tree loaded_val, tree stored_val, int index)
5066 {
5067 enum built_in_function tmpbase;
5068 gimple_stmt_iterator gsi;
5069 basic_block store_bb = single_succ (load_bb);
5070 location_t loc;
5071 gimple stmt;
5072 tree decl, call, type, itype;
5073 enum machine_mode imode;
5074 bool exchange;
5075
5076 gsi = gsi_last_bb (load_bb);
5077 stmt = gsi_stmt (gsi);
5078 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
5079
5080 /* If the load value is needed, then this isn't a store but an exchange. */
5081 exchange = gimple_omp_atomic_need_value_p (stmt);
5082
5083 gsi = gsi_last_bb (store_bb);
5084 stmt = gsi_stmt (gsi);
5085 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_STORE);
5086 loc = gimple_location (stmt);
5087
5088 /* ??? If the target does not implement atomic_store_optab[mode], and mode
5089 is smaller than word size, then expand_atomic_store assumes that the store
5090 is atomic. We could avoid the builtin entirely in this case. */
5091
5092 tmpbase = (exchange ? BUILT_IN_ATOMIC_EXCHANGE_N : BUILT_IN_ATOMIC_STORE_N);
5093 tmpbase = (enum built_in_function) ((int) tmpbase + index + 1);
5094 decl = builtin_decl_explicit (tmpbase);
5095 if (decl == NULL_TREE)
5096 return false;
5097
5098 type = TREE_TYPE (stored_val);
5099
5100 /* Dig out the type of the function's second argument. */
5101 itype = TREE_TYPE (decl);
5102 itype = TYPE_ARG_TYPES (itype);
5103 itype = TREE_CHAIN (itype);
5104 itype = TREE_VALUE (itype);
5105 imode = TYPE_MODE (itype);
5106
5107 if (exchange && !can_atomic_exchange_p (imode, true))
5108 return false;
5109
5110 if (!useless_type_conversion_p (itype, type))
5111 stored_val = fold_build1_loc (loc, VIEW_CONVERT_EXPR, itype, stored_val);
5112 call = build_call_expr_loc (loc, decl, 3, addr, stored_val,
5113 build_int_cst (NULL, MEMMODEL_RELAXED));
5114 if (exchange)
5115 {
5116 if (!useless_type_conversion_p (type, itype))
5117 call = build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
5118 call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
5119 }
5120
5121 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
5122 gsi_remove (&gsi, true);
5123
5124 /* Remove the GIMPLE_OMP_ATOMIC_LOAD that we verified above. */
5125 gsi = gsi_last_bb (load_bb);
5126 gsi_remove (&gsi, true);
5127
5128 if (gimple_in_ssa_p (cfun))
5129 update_ssa (TODO_update_ssa_no_phi);
5130
5131 return true;
5132 }
5133
5134 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
5135 operation as a __atomic_fetch_op builtin. INDEX is log2 of the
5136 size of the data type, and thus usable to find the index of the builtin
5137 decl. Returns false if the expression is not of the proper form. */
5138
5139 static bool
5140 expand_omp_atomic_fetch_op (basic_block load_bb,
5141 tree addr, tree loaded_val,
5142 tree stored_val, int index)
5143 {
5144 enum built_in_function oldbase, newbase, tmpbase;
5145 tree decl, itype, call;
5146 tree lhs, rhs;
5147 basic_block store_bb = single_succ (load_bb);
5148 gimple_stmt_iterator gsi;
5149 gimple stmt;
5150 location_t loc;
5151 enum tree_code code;
5152 bool need_old, need_new;
5153 enum machine_mode imode;
5154
5155 /* We expect to find the following sequences:
5156
5157 load_bb:
5158 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
5159
5160 store_bb:
5161 val = tmp OP something; (or: something OP tmp)
5162 GIMPLE_OMP_STORE (val)
5163
5164 ???FIXME: Allow a more flexible sequence.
5165 Perhaps use data flow to pick the statements.
5166
5167 */
5168
5169 gsi = gsi_after_labels (store_bb);
5170 stmt = gsi_stmt (gsi);
5171 loc = gimple_location (stmt);
5172 if (!is_gimple_assign (stmt))
5173 return false;
5174 gsi_next (&gsi);
5175 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE)
5176 return false;
5177 need_new = gimple_omp_atomic_need_value_p (gsi_stmt (gsi));
5178 need_old = gimple_omp_atomic_need_value_p (last_stmt (load_bb));
5179 gcc_checking_assert (!need_old || !need_new);
5180
5181 if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0))
5182 return false;
5183
5184 /* Check for one of the supported fetch-op operations. */
5185 code = gimple_assign_rhs_code (stmt);
5186 switch (code)
5187 {
5188 case PLUS_EXPR:
5189 case POINTER_PLUS_EXPR:
5190 oldbase = BUILT_IN_ATOMIC_FETCH_ADD_N;
5191 newbase = BUILT_IN_ATOMIC_ADD_FETCH_N;
5192 break;
5193 case MINUS_EXPR:
5194 oldbase = BUILT_IN_ATOMIC_FETCH_SUB_N;
5195 newbase = BUILT_IN_ATOMIC_SUB_FETCH_N;
5196 break;
5197 case BIT_AND_EXPR:
5198 oldbase = BUILT_IN_ATOMIC_FETCH_AND_N;
5199 newbase = BUILT_IN_ATOMIC_AND_FETCH_N;
5200 break;
5201 case BIT_IOR_EXPR:
5202 oldbase = BUILT_IN_ATOMIC_FETCH_OR_N;
5203 newbase = BUILT_IN_ATOMIC_OR_FETCH_N;
5204 break;
5205 case BIT_XOR_EXPR:
5206 oldbase = BUILT_IN_ATOMIC_FETCH_XOR_N;
5207 newbase = BUILT_IN_ATOMIC_XOR_FETCH_N;
5208 break;
5209 default:
5210 return false;
5211 }
5212
5213 /* Make sure the expression is of the proper form. */
5214 if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0))
5215 rhs = gimple_assign_rhs2 (stmt);
5216 else if (commutative_tree_code (gimple_assign_rhs_code (stmt))
5217 && operand_equal_p (gimple_assign_rhs2 (stmt), loaded_val, 0))
5218 rhs = gimple_assign_rhs1 (stmt);
5219 else
5220 return false;
5221
5222 tmpbase = ((enum built_in_function)
5223 ((need_new ? newbase : oldbase) + index + 1));
5224 decl = builtin_decl_explicit (tmpbase);
5225 if (decl == NULL_TREE)
5226 return false;
5227 itype = TREE_TYPE (TREE_TYPE (decl));
5228 imode = TYPE_MODE (itype);
5229
5230 /* We could test all of the various optabs involved, but the fact of the
5231 matter is that (with the exception of i486 vs i586 and xadd) all targets
5232 that support any atomic operaton optab also implements compare-and-swap.
5233 Let optabs.c take care of expanding any compare-and-swap loop. */
5234 if (!can_compare_and_swap_p (imode, true))
5235 return false;
5236
5237 gsi = gsi_last_bb (load_bb);
5238 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD);
5239
5240 /* OpenMP does not imply any barrier-like semantics on its atomic ops.
5241 It only requires that the operation happen atomically. Thus we can
5242 use the RELAXED memory model. */
5243 call = build_call_expr_loc (loc, decl, 3, addr,
5244 fold_convert_loc (loc, itype, rhs),
5245 build_int_cst (NULL, MEMMODEL_RELAXED));
5246
5247 if (need_old || need_new)
5248 {
5249 lhs = need_old ? loaded_val : stored_val;
5250 call = fold_convert_loc (loc, TREE_TYPE (lhs), call);
5251 call = build2_loc (loc, MODIFY_EXPR, void_type_node, lhs, call);
5252 }
5253 else
5254 call = fold_convert_loc (loc, void_type_node, call);
5255 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
5256 gsi_remove (&gsi, true);
5257
5258 gsi = gsi_last_bb (store_bb);
5259 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
5260 gsi_remove (&gsi, true);
5261 gsi = gsi_last_bb (store_bb);
5262 gsi_remove (&gsi, true);
5263
5264 if (gimple_in_ssa_p (cfun))
5265 update_ssa (TODO_update_ssa_no_phi);
5266
5267 return true;
5268 }
5269
5270 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5271
5272 oldval = *addr;
5273 repeat:
5274 newval = rhs; // with oldval replacing *addr in rhs
5275 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
5276 if (oldval != newval)
5277 goto repeat;
5278
5279 INDEX is log2 of the size of the data type, and thus usable to find the
5280 index of the builtin decl. */
5281
5282 static bool
5283 expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
5284 tree addr, tree loaded_val, tree stored_val,
5285 int index)
5286 {
5287 tree loadedi, storedi, initial, new_storedi, old_vali;
5288 tree type, itype, cmpxchg, iaddr;
5289 gimple_stmt_iterator si;
5290 basic_block loop_header = single_succ (load_bb);
5291 gimple phi, stmt;
5292 edge e;
5293 enum built_in_function fncode;
5294
5295 /* ??? We need a non-pointer interface to __atomic_compare_exchange in
5296 order to use the RELAXED memory model effectively. */
5297 fncode = (enum built_in_function)((int)BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_N
5298 + index + 1);
5299 cmpxchg = builtin_decl_explicit (fncode);
5300 if (cmpxchg == NULL_TREE)
5301 return false;
5302 type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5303 itype = TREE_TYPE (TREE_TYPE (cmpxchg));
5304
5305 if (!can_compare_and_swap_p (TYPE_MODE (itype), true))
5306 return false;
5307
5308 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
5309 si = gsi_last_bb (load_bb);
5310 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
5311
5312 /* For floating-point values, we'll need to view-convert them to integers
5313 so that we can perform the atomic compare and swap. Simplify the
5314 following code by always setting up the "i"ntegral variables. */
5315 if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
5316 {
5317 tree iaddr_val;
5318
5319 iaddr = create_tmp_reg (build_pointer_type_for_mode (itype, ptr_mode,
5320 true), NULL);
5321 iaddr_val
5322 = force_gimple_operand_gsi (&si,
5323 fold_convert (TREE_TYPE (iaddr), addr),
5324 false, NULL_TREE, true, GSI_SAME_STMT);
5325 stmt = gimple_build_assign (iaddr, iaddr_val);
5326 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5327 loadedi = create_tmp_var (itype, NULL);
5328 if (gimple_in_ssa_p (cfun))
5329 loadedi = make_ssa_name (loadedi, NULL);
5330 }
5331 else
5332 {
5333 iaddr = addr;
5334 loadedi = loaded_val;
5335 }
5336
5337 initial
5338 = force_gimple_operand_gsi (&si,
5339 build2 (MEM_REF, TREE_TYPE (TREE_TYPE (iaddr)),
5340 iaddr,
5341 build_int_cst (TREE_TYPE (iaddr), 0)),
5342 true, NULL_TREE, true, GSI_SAME_STMT);
5343
5344 /* Move the value to the LOADEDI temporary. */
5345 if (gimple_in_ssa_p (cfun))
5346 {
5347 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header)));
5348 phi = create_phi_node (loadedi, loop_header);
5349 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
5350 initial);
5351 }
5352 else
5353 gsi_insert_before (&si,
5354 gimple_build_assign (loadedi, initial),
5355 GSI_SAME_STMT);
5356 if (loadedi != loaded_val)
5357 {
5358 gimple_stmt_iterator gsi2;
5359 tree x;
5360
5361 x = build1 (VIEW_CONVERT_EXPR, type, loadedi);
5362 gsi2 = gsi_start_bb (loop_header);
5363 if (gimple_in_ssa_p (cfun))
5364 {
5365 gimple stmt;
5366 x = force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5367 true, GSI_SAME_STMT);
5368 stmt = gimple_build_assign (loaded_val, x);
5369 gsi_insert_before (&gsi2, stmt, GSI_SAME_STMT);
5370 }
5371 else
5372 {
5373 x = build2 (MODIFY_EXPR, TREE_TYPE (loaded_val), loaded_val, x);
5374 force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5375 true, GSI_SAME_STMT);
5376 }
5377 }
5378 gsi_remove (&si, true);
5379
5380 si = gsi_last_bb (store_bb);
5381 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
5382
5383 if (iaddr == addr)
5384 storedi = stored_val;
5385 else
5386 storedi =
5387 force_gimple_operand_gsi (&si,
5388 build1 (VIEW_CONVERT_EXPR, itype,
5389 stored_val), true, NULL_TREE, true,
5390 GSI_SAME_STMT);
5391
5392 /* Build the compare&swap statement. */
5393 new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
5394 new_storedi = force_gimple_operand_gsi (&si,
5395 fold_convert (TREE_TYPE (loadedi),
5396 new_storedi),
5397 true, NULL_TREE,
5398 true, GSI_SAME_STMT);
5399
5400 if (gimple_in_ssa_p (cfun))
5401 old_vali = loadedi;
5402 else
5403 {
5404 old_vali = create_tmp_var (TREE_TYPE (loadedi), NULL);
5405 stmt = gimple_build_assign (old_vali, loadedi);
5406 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5407
5408 stmt = gimple_build_assign (loadedi, new_storedi);
5409 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5410 }
5411
5412 /* Note that we always perform the comparison as an integer, even for
5413 floating point. This allows the atomic operation to properly
5414 succeed even with NaNs and -0.0. */
5415 stmt = gimple_build_cond_empty
5416 (build2 (NE_EXPR, boolean_type_node,
5417 new_storedi, old_vali));
5418 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5419
5420 /* Update cfg. */
5421 e = single_succ_edge (store_bb);
5422 e->flags &= ~EDGE_FALLTHRU;
5423 e->flags |= EDGE_FALSE_VALUE;
5424
5425 e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
5426
5427 /* Copy the new value to loadedi (we already did that before the condition
5428 if we are not in SSA). */
5429 if (gimple_in_ssa_p (cfun))
5430 {
5431 phi = gimple_seq_first_stmt (phi_nodes (loop_header));
5432 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi);
5433 }
5434
5435 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
5436 gsi_remove (&si, true);
5437
5438 if (gimple_in_ssa_p (cfun))
5439 update_ssa (TODO_update_ssa_no_phi);
5440
5441 return true;
5442 }
5443
5444 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5445
5446 GOMP_atomic_start ();
5447 *addr = rhs;
5448 GOMP_atomic_end ();
5449
5450 The result is not globally atomic, but works so long as all parallel
5451 references are within #pragma omp atomic directives. According to
5452 responses received from omp@openmp.org, appears to be within spec.
5453 Which makes sense, since that's how several other compilers handle
5454 this situation as well.
5455 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
5456 expanding. STORED_VAL is the operand of the matching
5457 GIMPLE_OMP_ATOMIC_STORE.
5458
5459 We replace
5460 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
5461 loaded_val = *addr;
5462
5463 and replace
5464 GIMPLE_OMP_ATOMIC_STORE (stored_val) with
5465 *addr = stored_val;
5466 */
5467
5468 static bool
5469 expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
5470 tree addr, tree loaded_val, tree stored_val)
5471 {
5472 gimple_stmt_iterator si;
5473 gimple stmt;
5474 tree t;
5475
5476 si = gsi_last_bb (load_bb);
5477 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
5478
5479 t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START);
5480 t = build_call_expr (t, 0);
5481 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5482
5483 stmt = gimple_build_assign (loaded_val, build_simple_mem_ref (addr));
5484 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5485 gsi_remove (&si, true);
5486
5487 si = gsi_last_bb (store_bb);
5488 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
5489
5490 stmt = gimple_build_assign (build_simple_mem_ref (unshare_expr (addr)),
5491 stored_val);
5492 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5493
5494 t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END);
5495 t = build_call_expr (t, 0);
5496 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5497 gsi_remove (&si, true);
5498
5499 if (gimple_in_ssa_p (cfun))
5500 update_ssa (TODO_update_ssa_no_phi);
5501 return true;
5502 }
5503
5504 /* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
5505 using expand_omp_atomic_fetch_op. If it failed, we try to
5506 call expand_omp_atomic_pipeline, and if it fails too, the
5507 ultimate fallback is wrapping the operation in a mutex
5508 (expand_omp_atomic_mutex). REGION is the atomic region built
5509 by build_omp_regions_1(). */
5510
5511 static void
5512 expand_omp_atomic (struct omp_region *region)
5513 {
5514 basic_block load_bb = region->entry, store_bb = region->exit;
5515 gimple load = last_stmt (load_bb), store = last_stmt (store_bb);
5516 tree loaded_val = gimple_omp_atomic_load_lhs (load);
5517 tree addr = gimple_omp_atomic_load_rhs (load);
5518 tree stored_val = gimple_omp_atomic_store_val (store);
5519 tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5520 HOST_WIDE_INT index;
5521
5522 /* Make sure the type is one of the supported sizes. */
5523 index = tree_low_cst (TYPE_SIZE_UNIT (type), 1);
5524 index = exact_log2 (index);
5525 if (index >= 0 && index <= 4)
5526 {
5527 unsigned int align = TYPE_ALIGN_UNIT (type);
5528
5529 /* __sync builtins require strict data alignment. */
5530 if (exact_log2 (align) >= index)
5531 {
5532 /* Atomic load. */
5533 if (loaded_val == stored_val
5534 && (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
5535 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
5536 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
5537 && expand_omp_atomic_load (load_bb, addr, loaded_val, index))
5538 return;
5539
5540 /* Atomic store. */
5541 if ((GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
5542 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
5543 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
5544 && store_bb == single_succ (load_bb)
5545 && first_stmt (store_bb) == store
5546 && expand_omp_atomic_store (load_bb, addr, loaded_val,
5547 stored_val, index))
5548 return;
5549
5550 /* When possible, use specialized atomic update functions. */
5551 if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
5552 && store_bb == single_succ (load_bb)
5553 && expand_omp_atomic_fetch_op (load_bb, addr,
5554 loaded_val, stored_val, index))
5555 return;
5556
5557 /* If we don't have specialized __sync builtins, try and implement
5558 as a compare and swap loop. */
5559 if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
5560 loaded_val, stored_val, index))
5561 return;
5562 }
5563 }
5564
5565 /* The ultimate fallback is wrapping the operation in a mutex. */
5566 expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
5567 }
5568
5569
5570 /* Expand the parallel region tree rooted at REGION. Expansion
5571 proceeds in depth-first order. Innermost regions are expanded
5572 first. This way, parallel regions that require a new function to
5573 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
5574 internal dependencies in their body. */
5575
5576 static void
5577 expand_omp (struct omp_region *region)
5578 {
5579 while (region)
5580 {
5581 location_t saved_location;
5582
5583 /* First, determine whether this is a combined parallel+workshare
5584 region. */
5585 if (region->type == GIMPLE_OMP_PARALLEL)
5586 determine_parallel_type (region);
5587
5588 if (region->inner)
5589 expand_omp (region->inner);
5590
5591 saved_location = input_location;
5592 if (gimple_has_location (last_stmt (region->entry)))
5593 input_location = gimple_location (last_stmt (region->entry));
5594
5595 switch (region->type)
5596 {
5597 case GIMPLE_OMP_PARALLEL:
5598 case GIMPLE_OMP_TASK:
5599 expand_omp_taskreg (region);
5600 break;
5601
5602 case GIMPLE_OMP_FOR:
5603 expand_omp_for (region);
5604 break;
5605
5606 case GIMPLE_OMP_SECTIONS:
5607 expand_omp_sections (region);
5608 break;
5609
5610 case GIMPLE_OMP_SECTION:
5611 /* Individual omp sections are handled together with their
5612 parent GIMPLE_OMP_SECTIONS region. */
5613 break;
5614
5615 case GIMPLE_OMP_SINGLE:
5616 expand_omp_single (region);
5617 break;
5618
5619 case GIMPLE_OMP_MASTER:
5620 case GIMPLE_OMP_ORDERED:
5621 case GIMPLE_OMP_CRITICAL:
5622 expand_omp_synch (region);
5623 break;
5624
5625 case GIMPLE_OMP_ATOMIC_LOAD:
5626 expand_omp_atomic (region);
5627 break;
5628
5629 default:
5630 gcc_unreachable ();
5631 }
5632
5633 input_location = saved_location;
5634 region = region->next;
5635 }
5636 }
5637
5638
5639 /* Helper for build_omp_regions. Scan the dominator tree starting at
5640 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
5641 true, the function ends once a single tree is built (otherwise, whole
5642 forest of OMP constructs may be built). */
5643
5644 static void
5645 build_omp_regions_1 (basic_block bb, struct omp_region *parent,
5646 bool single_tree)
5647 {
5648 gimple_stmt_iterator gsi;
5649 gimple stmt;
5650 basic_block son;
5651
5652 gsi = gsi_last_bb (bb);
5653 if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi)))
5654 {
5655 struct omp_region *region;
5656 enum gimple_code code;
5657
5658 stmt = gsi_stmt (gsi);
5659 code = gimple_code (stmt);
5660 if (code == GIMPLE_OMP_RETURN)
5661 {
5662 /* STMT is the return point out of region PARENT. Mark it
5663 as the exit point and make PARENT the immediately
5664 enclosing region. */
5665 gcc_assert (parent);
5666 region = parent;
5667 region->exit = bb;
5668 parent = parent->outer;
5669 }
5670 else if (code == GIMPLE_OMP_ATOMIC_STORE)
5671 {
5672 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
5673 GIMPLE_OMP_RETURN, but matches with
5674 GIMPLE_OMP_ATOMIC_LOAD. */
5675 gcc_assert (parent);
5676 gcc_assert (parent->type == GIMPLE_OMP_ATOMIC_LOAD);
5677 region = parent;
5678 region->exit = bb;
5679 parent = parent->outer;
5680 }
5681
5682 else if (code == GIMPLE_OMP_CONTINUE)
5683 {
5684 gcc_assert (parent);
5685 parent->cont = bb;
5686 }
5687 else if (code == GIMPLE_OMP_SECTIONS_SWITCH)
5688 {
5689 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
5690 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
5691 ;
5692 }
5693 else
5694 {
5695 /* Otherwise, this directive becomes the parent for a new
5696 region. */
5697 region = new_omp_region (bb, code, parent);
5698 parent = region;
5699 }
5700 }
5701
5702 if (single_tree && !parent)
5703 return;
5704
5705 for (son = first_dom_son (CDI_DOMINATORS, bb);
5706 son;
5707 son = next_dom_son (CDI_DOMINATORS, son))
5708 build_omp_regions_1 (son, parent, single_tree);
5709 }
5710
5711 /* Builds the tree of OMP regions rooted at ROOT, storing it to
5712 root_omp_region. */
5713
5714 static void
5715 build_omp_regions_root (basic_block root)
5716 {
5717 gcc_assert (root_omp_region == NULL);
5718 build_omp_regions_1 (root, NULL, true);
5719 gcc_assert (root_omp_region != NULL);
5720 }
5721
5722 /* Expands omp construct (and its subconstructs) starting in HEAD. */
5723
5724 void
5725 omp_expand_local (basic_block head)
5726 {
5727 build_omp_regions_root (head);
5728 if (dump_file && (dump_flags & TDF_DETAILS))
5729 {
5730 fprintf (dump_file, "\nOMP region tree\n\n");
5731 dump_omp_region (dump_file, root_omp_region, 0);
5732 fprintf (dump_file, "\n");
5733 }
5734
5735 remove_exit_barriers (root_omp_region);
5736 expand_omp (root_omp_region);
5737
5738 free_omp_regions ();
5739 }
5740
5741 /* Scan the CFG and build a tree of OMP regions. Return the root of
5742 the OMP region tree. */
5743
5744 static void
5745 build_omp_regions (void)
5746 {
5747 gcc_assert (root_omp_region == NULL);
5748 calculate_dominance_info (CDI_DOMINATORS);
5749 build_omp_regions_1 (ENTRY_BLOCK_PTR, NULL, false);
5750 }
5751
5752 /* Main entry point for expanding OMP-GIMPLE into runtime calls. */
5753
5754 static unsigned int
5755 execute_expand_omp (void)
5756 {
5757 build_omp_regions ();
5758
5759 if (!root_omp_region)
5760 return 0;
5761
5762 if (dump_file)
5763 {
5764 fprintf (dump_file, "\nOMP region tree\n\n");
5765 dump_omp_region (dump_file, root_omp_region, 0);
5766 fprintf (dump_file, "\n");
5767 }
5768
5769 remove_exit_barriers (root_omp_region);
5770
5771 expand_omp (root_omp_region);
5772
5773 cleanup_tree_cfg ();
5774
5775 free_omp_regions ();
5776
5777 return 0;
5778 }
5779
5780 /* OMP expansion -- the default pass, run before creation of SSA form. */
5781
5782 static bool
5783 gate_expand_omp (void)
5784 {
5785 return (flag_openmp != 0 && !seen_error ());
5786 }
5787
5788 struct gimple_opt_pass pass_expand_omp =
5789 {
5790 {
5791 GIMPLE_PASS,
5792 "ompexp", /* name */
5793 OPTGROUP_NONE, /* optinfo_flags */
5794 gate_expand_omp, /* gate */
5795 execute_expand_omp, /* execute */
5796 NULL, /* sub */
5797 NULL, /* next */
5798 0, /* static_pass_number */
5799 TV_NONE, /* tv_id */
5800 PROP_gimple_any, /* properties_required */
5801 0, /* properties_provided */
5802 0, /* properties_destroyed */
5803 0, /* todo_flags_start */
5804 0 /* todo_flags_finish */
5805 }
5806 };
5807 \f
5808 /* Routines to lower OpenMP directives into OMP-GIMPLE. */
5809
5810 /* Lower the OpenMP sections directive in the current statement in GSI_P.
5811 CTX is the enclosing OMP context for the current statement. */
5812
5813 static void
5814 lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5815 {
5816 tree block, control;
5817 gimple_stmt_iterator tgsi;
5818 gimple stmt, new_stmt, bind, t;
5819 gimple_seq ilist, dlist, olist, new_body;
5820 struct gimplify_ctx gctx;
5821
5822 stmt = gsi_stmt (*gsi_p);
5823
5824 push_gimplify_context (&gctx);
5825
5826 dlist = NULL;
5827 ilist = NULL;
5828 lower_rec_input_clauses (gimple_omp_sections_clauses (stmt),
5829 &ilist, &dlist, ctx);
5830
5831 new_body = gimple_omp_body (stmt);
5832 gimple_omp_set_body (stmt, NULL);
5833 tgsi = gsi_start (new_body);
5834 for (; !gsi_end_p (tgsi); gsi_next (&tgsi))
5835 {
5836 omp_context *sctx;
5837 gimple sec_start;
5838
5839 sec_start = gsi_stmt (tgsi);
5840 sctx = maybe_lookup_ctx (sec_start);
5841 gcc_assert (sctx);
5842
5843 lower_omp (gimple_omp_body_ptr (sec_start), sctx);
5844 gsi_insert_seq_after (&tgsi, gimple_omp_body (sec_start),
5845 GSI_CONTINUE_LINKING);
5846 gimple_omp_set_body (sec_start, NULL);
5847
5848 if (gsi_one_before_end_p (tgsi))
5849 {
5850 gimple_seq l = NULL;
5851 lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt), NULL,
5852 &l, ctx);
5853 gsi_insert_seq_after (&tgsi, l, GSI_CONTINUE_LINKING);
5854 gimple_omp_section_set_last (sec_start);
5855 }
5856
5857 gsi_insert_after (&tgsi, gimple_build_omp_return (false),
5858 GSI_CONTINUE_LINKING);
5859 }
5860
5861 block = make_node (BLOCK);
5862 bind = gimple_build_bind (NULL, new_body, block);
5863
5864 olist = NULL;
5865 lower_reduction_clauses (gimple_omp_sections_clauses (stmt), &olist, ctx);
5866
5867 block = make_node (BLOCK);
5868 new_stmt = gimple_build_bind (NULL, NULL, block);
5869 gsi_replace (gsi_p, new_stmt, true);
5870
5871 pop_gimplify_context (new_stmt);
5872 gimple_bind_append_vars (new_stmt, ctx->block_vars);
5873 BLOCK_VARS (block) = gimple_bind_vars (bind);
5874 if (BLOCK_VARS (block))
5875 TREE_USED (block) = 1;
5876
5877 new_body = NULL;
5878 gimple_seq_add_seq (&new_body, ilist);
5879 gimple_seq_add_stmt (&new_body, stmt);
5880 gimple_seq_add_stmt (&new_body, gimple_build_omp_sections_switch ());
5881 gimple_seq_add_stmt (&new_body, bind);
5882
5883 control = create_tmp_var (unsigned_type_node, ".section");
5884 t = gimple_build_omp_continue (control, control);
5885 gimple_omp_sections_set_control (stmt, control);
5886 gimple_seq_add_stmt (&new_body, t);
5887
5888 gimple_seq_add_seq (&new_body, olist);
5889 gimple_seq_add_seq (&new_body, dlist);
5890
5891 new_body = maybe_catch_exception (new_body);
5892
5893 t = gimple_build_omp_return
5894 (!!find_omp_clause (gimple_omp_sections_clauses (stmt),
5895 OMP_CLAUSE_NOWAIT));
5896 gimple_seq_add_stmt (&new_body, t);
5897
5898 gimple_bind_set_body (new_stmt, new_body);
5899 }
5900
5901
5902 /* A subroutine of lower_omp_single. Expand the simple form of
5903 a GIMPLE_OMP_SINGLE, without a copyprivate clause:
5904
5905 if (GOMP_single_start ())
5906 BODY;
5907 [ GOMP_barrier (); ] -> unless 'nowait' is present.
5908
5909 FIXME. It may be better to delay expanding the logic of this until
5910 pass_expand_omp. The expanded logic may make the job more difficult
5911 to a synchronization analysis pass. */
5912
5913 static void
5914 lower_omp_single_simple (gimple single_stmt, gimple_seq *pre_p)
5915 {
5916 location_t loc = gimple_location (single_stmt);
5917 tree tlabel = create_artificial_label (loc);
5918 tree flabel = create_artificial_label (loc);
5919 gimple call, cond;
5920 tree lhs, decl;
5921
5922 decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_START);
5923 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (decl)), NULL);
5924 call = gimple_build_call (decl, 0);
5925 gimple_call_set_lhs (call, lhs);
5926 gimple_seq_add_stmt (pre_p, call);
5927
5928 cond = gimple_build_cond (EQ_EXPR, lhs,
5929 fold_convert_loc (loc, TREE_TYPE (lhs),
5930 boolean_true_node),
5931 tlabel, flabel);
5932 gimple_seq_add_stmt (pre_p, cond);
5933 gimple_seq_add_stmt (pre_p, gimple_build_label (tlabel));
5934 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5935 gimple_seq_add_stmt (pre_p, gimple_build_label (flabel));
5936 }
5937
5938
5939 /* A subroutine of lower_omp_single. Expand the simple form of
5940 a GIMPLE_OMP_SINGLE, with a copyprivate clause:
5941
5942 #pragma omp single copyprivate (a, b, c)
5943
5944 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
5945
5946 {
5947 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
5948 {
5949 BODY;
5950 copyout.a = a;
5951 copyout.b = b;
5952 copyout.c = c;
5953 GOMP_single_copy_end (&copyout);
5954 }
5955 else
5956 {
5957 a = copyout_p->a;
5958 b = copyout_p->b;
5959 c = copyout_p->c;
5960 }
5961 GOMP_barrier ();
5962 }
5963
5964 FIXME. It may be better to delay expanding the logic of this until
5965 pass_expand_omp. The expanded logic may make the job more difficult
5966 to a synchronization analysis pass. */
5967
5968 static void
5969 lower_omp_single_copy (gimple single_stmt, gimple_seq *pre_p, omp_context *ctx)
5970 {
5971 tree ptr_type, t, l0, l1, l2, bfn_decl;
5972 gimple_seq copyin_seq;
5973 location_t loc = gimple_location (single_stmt);
5974
5975 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
5976
5977 ptr_type = build_pointer_type (ctx->record_type);
5978 ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
5979
5980 l0 = create_artificial_label (loc);
5981 l1 = create_artificial_label (loc);
5982 l2 = create_artificial_label (loc);
5983
5984 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_START);
5985 t = build_call_expr_loc (loc, bfn_decl, 0);
5986 t = fold_convert_loc (loc, ptr_type, t);
5987 gimplify_assign (ctx->receiver_decl, t, pre_p);
5988
5989 t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
5990 build_int_cst (ptr_type, 0));
5991 t = build3 (COND_EXPR, void_type_node, t,
5992 build_and_jump (&l0), build_and_jump (&l1));
5993 gimplify_and_add (t, pre_p);
5994
5995 gimple_seq_add_stmt (pre_p, gimple_build_label (l0));
5996
5997 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5998
5999 copyin_seq = NULL;
6000 lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt), pre_p,
6001 &copyin_seq, ctx);
6002
6003 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
6004 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_END);
6005 t = build_call_expr_loc (loc, bfn_decl, 1, t);
6006 gimplify_and_add (t, pre_p);
6007
6008 t = build_and_jump (&l2);
6009 gimplify_and_add (t, pre_p);
6010
6011 gimple_seq_add_stmt (pre_p, gimple_build_label (l1));
6012
6013 gimple_seq_add_seq (pre_p, copyin_seq);
6014
6015 gimple_seq_add_stmt (pre_p, gimple_build_label (l2));
6016 }
6017
6018
6019 /* Expand code for an OpenMP single directive. */
6020
6021 static void
6022 lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6023 {
6024 tree block;
6025 gimple t, bind, single_stmt = gsi_stmt (*gsi_p);
6026 gimple_seq bind_body, dlist;
6027 struct gimplify_ctx gctx;
6028
6029 push_gimplify_context (&gctx);
6030
6031 block = make_node (BLOCK);
6032 bind = gimple_build_bind (NULL, NULL, block);
6033 gsi_replace (gsi_p, bind, true);
6034 bind_body = NULL;
6035 dlist = NULL;
6036 lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt),
6037 &bind_body, &dlist, ctx);
6038 lower_omp (gimple_omp_body_ptr (single_stmt), ctx);
6039
6040 gimple_seq_add_stmt (&bind_body, single_stmt);
6041
6042 if (ctx->record_type)
6043 lower_omp_single_copy (single_stmt, &bind_body, ctx);
6044 else
6045 lower_omp_single_simple (single_stmt, &bind_body);
6046
6047 gimple_omp_set_body (single_stmt, NULL);
6048
6049 gimple_seq_add_seq (&bind_body, dlist);
6050
6051 bind_body = maybe_catch_exception (bind_body);
6052
6053 t = gimple_build_omp_return
6054 (!!find_omp_clause (gimple_omp_single_clauses (single_stmt),
6055 OMP_CLAUSE_NOWAIT));
6056 gimple_seq_add_stmt (&bind_body, t);
6057 gimple_bind_set_body (bind, bind_body);
6058
6059 pop_gimplify_context (bind);
6060
6061 gimple_bind_append_vars (bind, ctx->block_vars);
6062 BLOCK_VARS (block) = ctx->block_vars;
6063 if (BLOCK_VARS (block))
6064 TREE_USED (block) = 1;
6065 }
6066
6067
6068 /* Expand code for an OpenMP master directive. */
6069
6070 static void
6071 lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6072 {
6073 tree block, lab = NULL, x, bfn_decl;
6074 gimple stmt = gsi_stmt (*gsi_p), bind;
6075 location_t loc = gimple_location (stmt);
6076 gimple_seq tseq;
6077 struct gimplify_ctx gctx;
6078
6079 push_gimplify_context (&gctx);
6080
6081 block = make_node (BLOCK);
6082 bind = gimple_build_bind (NULL, NULL, block);
6083 gsi_replace (gsi_p, bind, true);
6084 gimple_bind_add_stmt (bind, stmt);
6085
6086 bfn_decl = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
6087 x = build_call_expr_loc (loc, bfn_decl, 0);
6088 x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
6089 x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
6090 tseq = NULL;
6091 gimplify_and_add (x, &tseq);
6092 gimple_bind_add_seq (bind, tseq);
6093
6094 lower_omp (gimple_omp_body_ptr (stmt), ctx);
6095 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
6096 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
6097 gimple_omp_set_body (stmt, NULL);
6098
6099 gimple_bind_add_stmt (bind, gimple_build_label (lab));
6100
6101 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
6102
6103 pop_gimplify_context (bind);
6104
6105 gimple_bind_append_vars (bind, ctx->block_vars);
6106 BLOCK_VARS (block) = ctx->block_vars;
6107 }
6108
6109
6110 /* Expand code for an OpenMP ordered directive. */
6111
6112 static void
6113 lower_omp_ordered (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6114 {
6115 tree block;
6116 gimple stmt = gsi_stmt (*gsi_p), bind, x;
6117 struct gimplify_ctx gctx;
6118
6119 push_gimplify_context (&gctx);
6120
6121 block = make_node (BLOCK);
6122 bind = gimple_build_bind (NULL, NULL, block);
6123 gsi_replace (gsi_p, bind, true);
6124 gimple_bind_add_stmt (bind, stmt);
6125
6126 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_START),
6127 0);
6128 gimple_bind_add_stmt (bind, x);
6129
6130 lower_omp (gimple_omp_body_ptr (stmt), ctx);
6131 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
6132 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
6133 gimple_omp_set_body (stmt, NULL);
6134
6135 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_END), 0);
6136 gimple_bind_add_stmt (bind, x);
6137
6138 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
6139
6140 pop_gimplify_context (bind);
6141
6142 gimple_bind_append_vars (bind, ctx->block_vars);
6143 BLOCK_VARS (block) = gimple_bind_vars (bind);
6144 }
6145
6146
6147 /* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
6148 substitution of a couple of function calls. But in the NAMED case,
6149 requires that languages coordinate a symbol name. It is therefore
6150 best put here in common code. */
6151
6152 static GTY((param1_is (tree), param2_is (tree)))
6153 splay_tree critical_name_mutexes;
6154
6155 static void
6156 lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6157 {
6158 tree block;
6159 tree name, lock, unlock;
6160 gimple stmt = gsi_stmt (*gsi_p), bind;
6161 location_t loc = gimple_location (stmt);
6162 gimple_seq tbody;
6163 struct gimplify_ctx gctx;
6164
6165 name = gimple_omp_critical_name (stmt);
6166 if (name)
6167 {
6168 tree decl;
6169 splay_tree_node n;
6170
6171 if (!critical_name_mutexes)
6172 critical_name_mutexes
6173 = splay_tree_new_ggc (splay_tree_compare_pointers,
6174 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_s,
6175 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_node_s);
6176
6177 n = splay_tree_lookup (critical_name_mutexes, (splay_tree_key) name);
6178 if (n == NULL)
6179 {
6180 char *new_str;
6181
6182 decl = create_tmp_var_raw (ptr_type_node, NULL);
6183
6184 new_str = ACONCAT ((".gomp_critical_user_",
6185 IDENTIFIER_POINTER (name), NULL));
6186 DECL_NAME (decl) = get_identifier (new_str);
6187 TREE_PUBLIC (decl) = 1;
6188 TREE_STATIC (decl) = 1;
6189 DECL_COMMON (decl) = 1;
6190 DECL_ARTIFICIAL (decl) = 1;
6191 DECL_IGNORED_P (decl) = 1;
6192 varpool_finalize_decl (decl);
6193
6194 splay_tree_insert (critical_name_mutexes, (splay_tree_key) name,
6195 (splay_tree_value) decl);
6196 }
6197 else
6198 decl = (tree) n->value;
6199
6200 lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_START);
6201 lock = build_call_expr_loc (loc, lock, 1, build_fold_addr_expr_loc (loc, decl));
6202
6203 unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_END);
6204 unlock = build_call_expr_loc (loc, unlock, 1,
6205 build_fold_addr_expr_loc (loc, decl));
6206 }
6207 else
6208 {
6209 lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_START);
6210 lock = build_call_expr_loc (loc, lock, 0);
6211
6212 unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_END);
6213 unlock = build_call_expr_loc (loc, unlock, 0);
6214 }
6215
6216 push_gimplify_context (&gctx);
6217
6218 block = make_node (BLOCK);
6219 bind = gimple_build_bind (NULL, NULL, block);
6220 gsi_replace (gsi_p, bind, true);
6221 gimple_bind_add_stmt (bind, stmt);
6222
6223 tbody = gimple_bind_body (bind);
6224 gimplify_and_add (lock, &tbody);
6225 gimple_bind_set_body (bind, tbody);
6226
6227 lower_omp (gimple_omp_body_ptr (stmt), ctx);
6228 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
6229 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
6230 gimple_omp_set_body (stmt, NULL);
6231
6232 tbody = gimple_bind_body (bind);
6233 gimplify_and_add (unlock, &tbody);
6234 gimple_bind_set_body (bind, tbody);
6235
6236 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
6237
6238 pop_gimplify_context (bind);
6239 gimple_bind_append_vars (bind, ctx->block_vars);
6240 BLOCK_VARS (block) = gimple_bind_vars (bind);
6241 }
6242
6243
6244 /* A subroutine of lower_omp_for. Generate code to emit the predicate
6245 for a lastprivate clause. Given a loop control predicate of (V
6246 cond N2), we gate the clause on (!(V cond N2)). The lowered form
6247 is appended to *DLIST, iterator initialization is appended to
6248 *BODY_P. */
6249
6250 static void
6251 lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p,
6252 gimple_seq *dlist, struct omp_context *ctx)
6253 {
6254 tree clauses, cond, vinit;
6255 enum tree_code cond_code;
6256 gimple_seq stmts;
6257
6258 cond_code = fd->loop.cond_code;
6259 cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
6260
6261 /* When possible, use a strict equality expression. This can let VRP
6262 type optimizations deduce the value and remove a copy. */
6263 if (host_integerp (fd->loop.step, 0))
6264 {
6265 HOST_WIDE_INT step = TREE_INT_CST_LOW (fd->loop.step);
6266 if (step == 1 || step == -1)
6267 cond_code = EQ_EXPR;
6268 }
6269
6270 cond = build2 (cond_code, boolean_type_node, fd->loop.v, fd->loop.n2);
6271
6272 clauses = gimple_omp_for_clauses (fd->for_stmt);
6273 stmts = NULL;
6274 lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
6275 if (!gimple_seq_empty_p (stmts))
6276 {
6277 gimple_seq_add_seq (&stmts, *dlist);
6278 *dlist = stmts;
6279
6280 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
6281 vinit = fd->loop.n1;
6282 if (cond_code == EQ_EXPR
6283 && host_integerp (fd->loop.n2, 0)
6284 && ! integer_zerop (fd->loop.n2))
6285 vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0);
6286
6287 /* Initialize the iterator variable, so that threads that don't execute
6288 any iterations don't execute the lastprivate clauses by accident. */
6289 gimplify_assign (fd->loop.v, vinit, body_p);
6290 }
6291 }
6292
6293
6294 /* Lower code for an OpenMP loop directive. */
6295
6296 static void
6297 lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6298 {
6299 tree *rhs_p, block;
6300 struct omp_for_data fd;
6301 gimple stmt = gsi_stmt (*gsi_p), new_stmt;
6302 gimple_seq omp_for_body, body, dlist;
6303 size_t i;
6304 struct gimplify_ctx gctx;
6305
6306 push_gimplify_context (&gctx);
6307
6308 lower_omp (gimple_omp_for_pre_body_ptr (stmt), ctx);
6309 lower_omp (gimple_omp_body_ptr (stmt), ctx);
6310
6311 block = make_node (BLOCK);
6312 new_stmt = gimple_build_bind (NULL, NULL, block);
6313 /* Replace at gsi right away, so that 'stmt' is no member
6314 of a sequence anymore as we're going to add to to a different
6315 one below. */
6316 gsi_replace (gsi_p, new_stmt, true);
6317
6318 /* Move declaration of temporaries in the loop body before we make
6319 it go away. */
6320 omp_for_body = gimple_omp_body (stmt);
6321 if (!gimple_seq_empty_p (omp_for_body)
6322 && gimple_code (gimple_seq_first_stmt (omp_for_body)) == GIMPLE_BIND)
6323 {
6324 tree vars = gimple_bind_vars (gimple_seq_first_stmt (omp_for_body));
6325 gimple_bind_append_vars (new_stmt, vars);
6326 }
6327
6328 /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
6329 dlist = NULL;
6330 body = NULL;
6331 lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx);
6332 gimple_seq_add_seq (&body, gimple_omp_for_pre_body (stmt));
6333
6334 /* Lower the header expressions. At this point, we can assume that
6335 the header is of the form:
6336
6337 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
6338
6339 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
6340 using the .omp_data_s mapping, if needed. */
6341 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
6342 {
6343 rhs_p = gimple_omp_for_initial_ptr (stmt, i);
6344 if (!is_gimple_min_invariant (*rhs_p))
6345 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6346
6347 rhs_p = gimple_omp_for_final_ptr (stmt, i);
6348 if (!is_gimple_min_invariant (*rhs_p))
6349 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6350
6351 rhs_p = &TREE_OPERAND (gimple_omp_for_incr (stmt, i), 1);
6352 if (!is_gimple_min_invariant (*rhs_p))
6353 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6354 }
6355
6356 /* Once lowered, extract the bounds and clauses. */
6357 extract_omp_for_data (stmt, &fd, NULL);
6358
6359 lower_omp_for_lastprivate (&fd, &body, &dlist, ctx);
6360
6361 gimple_seq_add_stmt (&body, stmt);
6362 gimple_seq_add_seq (&body, gimple_omp_body (stmt));
6363
6364 gimple_seq_add_stmt (&body, gimple_build_omp_continue (fd.loop.v,
6365 fd.loop.v));
6366
6367 /* After the loop, add exit clauses. */
6368 lower_reduction_clauses (gimple_omp_for_clauses (stmt), &body, ctx);
6369 gimple_seq_add_seq (&body, dlist);
6370
6371 body = maybe_catch_exception (body);
6372
6373 /* Region exit marker goes at the end of the loop body. */
6374 gimple_seq_add_stmt (&body, gimple_build_omp_return (fd.have_nowait));
6375
6376 pop_gimplify_context (new_stmt);
6377
6378 gimple_bind_append_vars (new_stmt, ctx->block_vars);
6379 BLOCK_VARS (block) = gimple_bind_vars (new_stmt);
6380 if (BLOCK_VARS (block))
6381 TREE_USED (block) = 1;
6382
6383 gimple_bind_set_body (new_stmt, body);
6384 gimple_omp_set_body (stmt, NULL);
6385 gimple_omp_for_set_pre_body (stmt, NULL);
6386 }
6387
6388 /* Callback for walk_stmts. Check if the current statement only contains
6389 GIMPLE_OMP_FOR or GIMPLE_OMP_PARALLEL. */
6390
6391 static tree
6392 check_combined_parallel (gimple_stmt_iterator *gsi_p,
6393 bool *handled_ops_p,
6394 struct walk_stmt_info *wi)
6395 {
6396 int *info = (int *) wi->info;
6397 gimple stmt = gsi_stmt (*gsi_p);
6398
6399 *handled_ops_p = true;
6400 switch (gimple_code (stmt))
6401 {
6402 WALK_SUBSTMTS;
6403
6404 case GIMPLE_OMP_FOR:
6405 case GIMPLE_OMP_SECTIONS:
6406 *info = *info == 0 ? 1 : -1;
6407 break;
6408 default:
6409 *info = -1;
6410 break;
6411 }
6412 return NULL;
6413 }
6414
6415 struct omp_taskcopy_context
6416 {
6417 /* This field must be at the beginning, as we do "inheritance": Some
6418 callback functions for tree-inline.c (e.g., omp_copy_decl)
6419 receive a copy_body_data pointer that is up-casted to an
6420 omp_context pointer. */
6421 copy_body_data cb;
6422 omp_context *ctx;
6423 };
6424
6425 static tree
6426 task_copyfn_copy_decl (tree var, copy_body_data *cb)
6427 {
6428 struct omp_taskcopy_context *tcctx = (struct omp_taskcopy_context *) cb;
6429
6430 if (splay_tree_lookup (tcctx->ctx->sfield_map, (splay_tree_key) var))
6431 return create_tmp_var (TREE_TYPE (var), NULL);
6432
6433 return var;
6434 }
6435
6436 static tree
6437 task_copyfn_remap_type (struct omp_taskcopy_context *tcctx, tree orig_type)
6438 {
6439 tree name, new_fields = NULL, type, f;
6440
6441 type = lang_hooks.types.make_type (RECORD_TYPE);
6442 name = DECL_NAME (TYPE_NAME (orig_type));
6443 name = build_decl (gimple_location (tcctx->ctx->stmt),
6444 TYPE_DECL, name, type);
6445 TYPE_NAME (type) = name;
6446
6447 for (f = TYPE_FIELDS (orig_type); f ; f = TREE_CHAIN (f))
6448 {
6449 tree new_f = copy_node (f);
6450 DECL_CONTEXT (new_f) = type;
6451 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &tcctx->cb);
6452 TREE_CHAIN (new_f) = new_fields;
6453 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6454 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6455 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
6456 &tcctx->cb, NULL);
6457 new_fields = new_f;
6458 *pointer_map_insert (tcctx->cb.decl_map, f) = new_f;
6459 }
6460 TYPE_FIELDS (type) = nreverse (new_fields);
6461 layout_type (type);
6462 return type;
6463 }
6464
6465 /* Create task copyfn. */
6466
6467 static void
6468 create_task_copyfn (gimple task_stmt, omp_context *ctx)
6469 {
6470 struct function *child_cfun;
6471 tree child_fn, t, c, src, dst, f, sf, arg, sarg, decl;
6472 tree record_type, srecord_type, bind, list;
6473 bool record_needs_remap = false, srecord_needs_remap = false;
6474 splay_tree_node n;
6475 struct omp_taskcopy_context tcctx;
6476 struct gimplify_ctx gctx;
6477 location_t loc = gimple_location (task_stmt);
6478
6479 child_fn = gimple_omp_task_copy_fn (task_stmt);
6480 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
6481 gcc_assert (child_cfun->cfg == NULL);
6482 DECL_SAVED_TREE (child_fn) = alloc_stmt_list ();
6483
6484 /* Reset DECL_CONTEXT on function arguments. */
6485 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
6486 DECL_CONTEXT (t) = child_fn;
6487
6488 /* Populate the function. */
6489 push_gimplify_context (&gctx);
6490 push_cfun (child_cfun);
6491
6492 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
6493 TREE_SIDE_EFFECTS (bind) = 1;
6494 list = NULL;
6495 DECL_SAVED_TREE (child_fn) = bind;
6496 DECL_SOURCE_LOCATION (child_fn) = gimple_location (task_stmt);
6497
6498 /* Remap src and dst argument types if needed. */
6499 record_type = ctx->record_type;
6500 srecord_type = ctx->srecord_type;
6501 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
6502 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6503 {
6504 record_needs_remap = true;
6505 break;
6506 }
6507 for (f = TYPE_FIELDS (srecord_type); f ; f = DECL_CHAIN (f))
6508 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6509 {
6510 srecord_needs_remap = true;
6511 break;
6512 }
6513
6514 if (record_needs_remap || srecord_needs_remap)
6515 {
6516 memset (&tcctx, '\0', sizeof (tcctx));
6517 tcctx.cb.src_fn = ctx->cb.src_fn;
6518 tcctx.cb.dst_fn = child_fn;
6519 tcctx.cb.src_node = cgraph_get_node (tcctx.cb.src_fn);
6520 gcc_checking_assert (tcctx.cb.src_node);
6521 tcctx.cb.dst_node = tcctx.cb.src_node;
6522 tcctx.cb.src_cfun = ctx->cb.src_cfun;
6523 tcctx.cb.copy_decl = task_copyfn_copy_decl;
6524 tcctx.cb.eh_lp_nr = 0;
6525 tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE;
6526 tcctx.cb.decl_map = pointer_map_create ();
6527 tcctx.ctx = ctx;
6528
6529 if (record_needs_remap)
6530 record_type = task_copyfn_remap_type (&tcctx, record_type);
6531 if (srecord_needs_remap)
6532 srecord_type = task_copyfn_remap_type (&tcctx, srecord_type);
6533 }
6534 else
6535 tcctx.cb.decl_map = NULL;
6536
6537 arg = DECL_ARGUMENTS (child_fn);
6538 TREE_TYPE (arg) = build_pointer_type (record_type);
6539 sarg = DECL_CHAIN (arg);
6540 TREE_TYPE (sarg) = build_pointer_type (srecord_type);
6541
6542 /* First pass: initialize temporaries used in record_type and srecord_type
6543 sizes and field offsets. */
6544 if (tcctx.cb.decl_map)
6545 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6546 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6547 {
6548 tree *p;
6549
6550 decl = OMP_CLAUSE_DECL (c);
6551 p = (tree *) pointer_map_contains (tcctx.cb.decl_map, decl);
6552 if (p == NULL)
6553 continue;
6554 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6555 sf = (tree) n->value;
6556 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6557 src = build_simple_mem_ref_loc (loc, sarg);
6558 src = omp_build_component_ref (src, sf);
6559 t = build2 (MODIFY_EXPR, TREE_TYPE (*p), *p, src);
6560 append_to_statement_list (t, &list);
6561 }
6562
6563 /* Second pass: copy shared var pointers and copy construct non-VLA
6564 firstprivate vars. */
6565 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6566 switch (OMP_CLAUSE_CODE (c))
6567 {
6568 case OMP_CLAUSE_SHARED:
6569 decl = OMP_CLAUSE_DECL (c);
6570 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6571 if (n == NULL)
6572 break;
6573 f = (tree) n->value;
6574 if (tcctx.cb.decl_map)
6575 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6576 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6577 sf = (tree) n->value;
6578 if (tcctx.cb.decl_map)
6579 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6580 src = build_simple_mem_ref_loc (loc, sarg);
6581 src = omp_build_component_ref (src, sf);
6582 dst = build_simple_mem_ref_loc (loc, arg);
6583 dst = omp_build_component_ref (dst, f);
6584 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
6585 append_to_statement_list (t, &list);
6586 break;
6587 case OMP_CLAUSE_FIRSTPRIVATE:
6588 decl = OMP_CLAUSE_DECL (c);
6589 if (is_variable_sized (decl))
6590 break;
6591 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6592 if (n == NULL)
6593 break;
6594 f = (tree) n->value;
6595 if (tcctx.cb.decl_map)
6596 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6597 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6598 if (n != NULL)
6599 {
6600 sf = (tree) n->value;
6601 if (tcctx.cb.decl_map)
6602 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6603 src = build_simple_mem_ref_loc (loc, sarg);
6604 src = omp_build_component_ref (src, sf);
6605 if (use_pointer_for_field (decl, NULL) || is_reference (decl))
6606 src = build_simple_mem_ref_loc (loc, src);
6607 }
6608 else
6609 src = decl;
6610 dst = build_simple_mem_ref_loc (loc, arg);
6611 dst = omp_build_component_ref (dst, f);
6612 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6613 append_to_statement_list (t, &list);
6614 break;
6615 case OMP_CLAUSE_PRIVATE:
6616 if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c))
6617 break;
6618 decl = OMP_CLAUSE_DECL (c);
6619 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6620 f = (tree) n->value;
6621 if (tcctx.cb.decl_map)
6622 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6623 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6624 if (n != NULL)
6625 {
6626 sf = (tree) n->value;
6627 if (tcctx.cb.decl_map)
6628 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6629 src = build_simple_mem_ref_loc (loc, sarg);
6630 src = omp_build_component_ref (src, sf);
6631 if (use_pointer_for_field (decl, NULL))
6632 src = build_simple_mem_ref_loc (loc, src);
6633 }
6634 else
6635 src = decl;
6636 dst = build_simple_mem_ref_loc (loc, arg);
6637 dst = omp_build_component_ref (dst, f);
6638 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
6639 append_to_statement_list (t, &list);
6640 break;
6641 default:
6642 break;
6643 }
6644
6645 /* Last pass: handle VLA firstprivates. */
6646 if (tcctx.cb.decl_map)
6647 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6648 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6649 {
6650 tree ind, ptr, df;
6651
6652 decl = OMP_CLAUSE_DECL (c);
6653 if (!is_variable_sized (decl))
6654 continue;
6655 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6656 if (n == NULL)
6657 continue;
6658 f = (tree) n->value;
6659 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6660 gcc_assert (DECL_HAS_VALUE_EXPR_P (decl));
6661 ind = DECL_VALUE_EXPR (decl);
6662 gcc_assert (TREE_CODE (ind) == INDIRECT_REF);
6663 gcc_assert (DECL_P (TREE_OPERAND (ind, 0)));
6664 n = splay_tree_lookup (ctx->sfield_map,
6665 (splay_tree_key) TREE_OPERAND (ind, 0));
6666 sf = (tree) n->value;
6667 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6668 src = build_simple_mem_ref_loc (loc, sarg);
6669 src = omp_build_component_ref (src, sf);
6670 src = build_simple_mem_ref_loc (loc, src);
6671 dst = build_simple_mem_ref_loc (loc, arg);
6672 dst = omp_build_component_ref (dst, f);
6673 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6674 append_to_statement_list (t, &list);
6675 n = splay_tree_lookup (ctx->field_map,
6676 (splay_tree_key) TREE_OPERAND (ind, 0));
6677 df = (tree) n->value;
6678 df = *(tree *) pointer_map_contains (tcctx.cb.decl_map, df);
6679 ptr = build_simple_mem_ref_loc (loc, arg);
6680 ptr = omp_build_component_ref (ptr, df);
6681 t = build2 (MODIFY_EXPR, TREE_TYPE (ptr), ptr,
6682 build_fold_addr_expr_loc (loc, dst));
6683 append_to_statement_list (t, &list);
6684 }
6685
6686 t = build1 (RETURN_EXPR, void_type_node, NULL);
6687 append_to_statement_list (t, &list);
6688
6689 if (tcctx.cb.decl_map)
6690 pointer_map_destroy (tcctx.cb.decl_map);
6691 pop_gimplify_context (NULL);
6692 BIND_EXPR_BODY (bind) = list;
6693 pop_cfun ();
6694 }
6695
6696 /* Lower the OpenMP parallel or task directive in the current statement
6697 in GSI_P. CTX holds context information for the directive. */
6698
6699 static void
6700 lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6701 {
6702 tree clauses;
6703 tree child_fn, t;
6704 gimple stmt = gsi_stmt (*gsi_p);
6705 gimple par_bind, bind;
6706 gimple_seq par_body, olist, ilist, par_olist, par_ilist, new_body;
6707 struct gimplify_ctx gctx;
6708 location_t loc = gimple_location (stmt);
6709
6710 clauses = gimple_omp_taskreg_clauses (stmt);
6711 par_bind = gimple_seq_first_stmt (gimple_omp_body (stmt));
6712 par_body = gimple_bind_body (par_bind);
6713 child_fn = ctx->cb.dst_fn;
6714 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
6715 && !gimple_omp_parallel_combined_p (stmt))
6716 {
6717 struct walk_stmt_info wi;
6718 int ws_num = 0;
6719
6720 memset (&wi, 0, sizeof (wi));
6721 wi.info = &ws_num;
6722 wi.val_only = true;
6723 walk_gimple_seq (par_body, check_combined_parallel, NULL, &wi);
6724 if (ws_num == 1)
6725 gimple_omp_parallel_set_combined_p (stmt, true);
6726 }
6727 if (ctx->srecord_type)
6728 create_task_copyfn (stmt, ctx);
6729
6730 push_gimplify_context (&gctx);
6731
6732 par_olist = NULL;
6733 par_ilist = NULL;
6734 lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx);
6735 lower_omp (&par_body, ctx);
6736 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL)
6737 lower_reduction_clauses (clauses, &par_olist, ctx);
6738
6739 /* Declare all the variables created by mapping and the variables
6740 declared in the scope of the parallel body. */
6741 record_vars_into (ctx->block_vars, child_fn);
6742 record_vars_into (gimple_bind_vars (par_bind), child_fn);
6743
6744 if (ctx->record_type)
6745 {
6746 ctx->sender_decl
6747 = create_tmp_var (ctx->srecord_type ? ctx->srecord_type
6748 : ctx->record_type, ".omp_data_o");
6749 DECL_NAMELESS (ctx->sender_decl) = 1;
6750 TREE_ADDRESSABLE (ctx->sender_decl) = 1;
6751 gimple_omp_taskreg_set_data_arg (stmt, ctx->sender_decl);
6752 }
6753
6754 olist = NULL;
6755 ilist = NULL;
6756 lower_send_clauses (clauses, &ilist, &olist, ctx);
6757 lower_send_shared_vars (&ilist, &olist, ctx);
6758
6759 /* Once all the expansions are done, sequence all the different
6760 fragments inside gimple_omp_body. */
6761
6762 new_body = NULL;
6763
6764 if (ctx->record_type)
6765 {
6766 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
6767 /* fixup_child_record_type might have changed receiver_decl's type. */
6768 t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
6769 gimple_seq_add_stmt (&new_body,
6770 gimple_build_assign (ctx->receiver_decl, t));
6771 }
6772
6773 gimple_seq_add_seq (&new_body, par_ilist);
6774 gimple_seq_add_seq (&new_body, par_body);
6775 gimple_seq_add_seq (&new_body, par_olist);
6776 new_body = maybe_catch_exception (new_body);
6777 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
6778 gimple_omp_set_body (stmt, new_body);
6779
6780 bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
6781 gsi_replace (gsi_p, bind, true);
6782 gimple_bind_add_seq (bind, ilist);
6783 gimple_bind_add_stmt (bind, stmt);
6784 gimple_bind_add_seq (bind, olist);
6785
6786 pop_gimplify_context (NULL);
6787 }
6788
6789 /* Callback for lower_omp_1. Return non-NULL if *tp needs to be
6790 regimplified. If DATA is non-NULL, lower_omp_1 is outside
6791 of OpenMP context, but with task_shared_vars set. */
6792
6793 static tree
6794 lower_omp_regimplify_p (tree *tp, int *walk_subtrees,
6795 void *data)
6796 {
6797 tree t = *tp;
6798
6799 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
6800 if (TREE_CODE (t) == VAR_DECL && data == NULL && DECL_HAS_VALUE_EXPR_P (t))
6801 return t;
6802
6803 if (task_shared_vars
6804 && DECL_P (t)
6805 && bitmap_bit_p (task_shared_vars, DECL_UID (t)))
6806 return t;
6807
6808 /* If a global variable has been privatized, TREE_CONSTANT on
6809 ADDR_EXPR might be wrong. */
6810 if (data == NULL && TREE_CODE (t) == ADDR_EXPR)
6811 recompute_tree_invariant_for_addr_expr (t);
6812
6813 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
6814 return NULL_TREE;
6815 }
6816
6817 static void
6818 lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6819 {
6820 gimple stmt = gsi_stmt (*gsi_p);
6821 struct walk_stmt_info wi;
6822
6823 if (gimple_has_location (stmt))
6824 input_location = gimple_location (stmt);
6825
6826 if (task_shared_vars)
6827 memset (&wi, '\0', sizeof (wi));
6828
6829 /* If we have issued syntax errors, avoid doing any heavy lifting.
6830 Just replace the OpenMP directives with a NOP to avoid
6831 confusing RTL expansion. */
6832 if (seen_error () && is_gimple_omp (stmt))
6833 {
6834 gsi_replace (gsi_p, gimple_build_nop (), true);
6835 return;
6836 }
6837
6838 switch (gimple_code (stmt))
6839 {
6840 case GIMPLE_COND:
6841 if ((ctx || task_shared_vars)
6842 && (walk_tree (gimple_cond_lhs_ptr (stmt), lower_omp_regimplify_p,
6843 ctx ? NULL : &wi, NULL)
6844 || walk_tree (gimple_cond_rhs_ptr (stmt), lower_omp_regimplify_p,
6845 ctx ? NULL : &wi, NULL)))
6846 gimple_regimplify_operands (stmt, gsi_p);
6847 break;
6848 case GIMPLE_CATCH:
6849 lower_omp (gimple_catch_handler_ptr (stmt), ctx);
6850 break;
6851 case GIMPLE_EH_FILTER:
6852 lower_omp (gimple_eh_filter_failure_ptr (stmt), ctx);
6853 break;
6854 case GIMPLE_TRY:
6855 lower_omp (gimple_try_eval_ptr (stmt), ctx);
6856 lower_omp (gimple_try_cleanup_ptr (stmt), ctx);
6857 break;
6858 case GIMPLE_TRANSACTION:
6859 lower_omp (gimple_transaction_body_ptr (stmt), ctx);
6860 break;
6861 case GIMPLE_BIND:
6862 lower_omp (gimple_bind_body_ptr (stmt), ctx);
6863 break;
6864 case GIMPLE_OMP_PARALLEL:
6865 case GIMPLE_OMP_TASK:
6866 ctx = maybe_lookup_ctx (stmt);
6867 lower_omp_taskreg (gsi_p, ctx);
6868 break;
6869 case GIMPLE_OMP_FOR:
6870 ctx = maybe_lookup_ctx (stmt);
6871 gcc_assert (ctx);
6872 lower_omp_for (gsi_p, ctx);
6873 break;
6874 case GIMPLE_OMP_SECTIONS:
6875 ctx = maybe_lookup_ctx (stmt);
6876 gcc_assert (ctx);
6877 lower_omp_sections (gsi_p, ctx);
6878 break;
6879 case GIMPLE_OMP_SINGLE:
6880 ctx = maybe_lookup_ctx (stmt);
6881 gcc_assert (ctx);
6882 lower_omp_single (gsi_p, ctx);
6883 break;
6884 case GIMPLE_OMP_MASTER:
6885 ctx = maybe_lookup_ctx (stmt);
6886 gcc_assert (ctx);
6887 lower_omp_master (gsi_p, ctx);
6888 break;
6889 case GIMPLE_OMP_ORDERED:
6890 ctx = maybe_lookup_ctx (stmt);
6891 gcc_assert (ctx);
6892 lower_omp_ordered (gsi_p, ctx);
6893 break;
6894 case GIMPLE_OMP_CRITICAL:
6895 ctx = maybe_lookup_ctx (stmt);
6896 gcc_assert (ctx);
6897 lower_omp_critical (gsi_p, ctx);
6898 break;
6899 case GIMPLE_OMP_ATOMIC_LOAD:
6900 if ((ctx || task_shared_vars)
6901 && walk_tree (gimple_omp_atomic_load_rhs_ptr (stmt),
6902 lower_omp_regimplify_p, ctx ? NULL : &wi, NULL))
6903 gimple_regimplify_operands (stmt, gsi_p);
6904 break;
6905 default:
6906 if ((ctx || task_shared_vars)
6907 && walk_gimple_op (stmt, lower_omp_regimplify_p,
6908 ctx ? NULL : &wi))
6909 gimple_regimplify_operands (stmt, gsi_p);
6910 break;
6911 }
6912 }
6913
6914 static void
6915 lower_omp (gimple_seq *body, omp_context *ctx)
6916 {
6917 location_t saved_location = input_location;
6918 gimple_stmt_iterator gsi;
6919 for (gsi = gsi_start (*body); !gsi_end_p (gsi); gsi_next (&gsi))
6920 lower_omp_1 (&gsi, ctx);
6921 input_location = saved_location;
6922 }
6923 \f
6924 /* Main entry point. */
6925
6926 static unsigned int
6927 execute_lower_omp (void)
6928 {
6929 gimple_seq body;
6930
6931 /* This pass always runs, to provide PROP_gimple_lomp.
6932 But there is nothing to do unless -fopenmp is given. */
6933 if (flag_openmp == 0)
6934 return 0;
6935
6936 all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
6937 delete_omp_context);
6938
6939 body = gimple_body (current_function_decl);
6940 scan_omp (&body, NULL);
6941 gcc_assert (taskreg_nesting_level == 0);
6942
6943 if (all_contexts->root)
6944 {
6945 struct gimplify_ctx gctx;
6946
6947 if (task_shared_vars)
6948 push_gimplify_context (&gctx);
6949 lower_omp (&body, NULL);
6950 if (task_shared_vars)
6951 pop_gimplify_context (NULL);
6952 }
6953
6954 if (all_contexts)
6955 {
6956 splay_tree_delete (all_contexts);
6957 all_contexts = NULL;
6958 }
6959 BITMAP_FREE (task_shared_vars);
6960 return 0;
6961 }
6962
6963 struct gimple_opt_pass pass_lower_omp =
6964 {
6965 {
6966 GIMPLE_PASS,
6967 "omplower", /* name */
6968 OPTGROUP_NONE, /* optinfo_flags */
6969 NULL, /* gate */
6970 execute_lower_omp, /* execute */
6971 NULL, /* sub */
6972 NULL, /* next */
6973 0, /* static_pass_number */
6974 TV_NONE, /* tv_id */
6975 PROP_gimple_any, /* properties_required */
6976 PROP_gimple_lomp, /* properties_provided */
6977 0, /* properties_destroyed */
6978 0, /* todo_flags_start */
6979 0 /* todo_flags_finish */
6980 }
6981 };
6982 \f
6983 /* The following is a utility to diagnose OpenMP structured block violations.
6984 It is not part of the "omplower" pass, as that's invoked too late. It
6985 should be invoked by the respective front ends after gimplification. */
6986
6987 static splay_tree all_labels;
6988
6989 /* Check for mismatched contexts and generate an error if needed. Return
6990 true if an error is detected. */
6991
6992 static bool
6993 diagnose_sb_0 (gimple_stmt_iterator *gsi_p,
6994 gimple branch_ctx, gimple label_ctx)
6995 {
6996 if (label_ctx == branch_ctx)
6997 return false;
6998
6999
7000 /*
7001 Previously we kept track of the label's entire context in diagnose_sb_[12]
7002 so we could traverse it and issue a correct "exit" or "enter" error
7003 message upon a structured block violation.
7004
7005 We built the context by building a list with tree_cons'ing, but there is
7006 no easy counterpart in gimple tuples. It seems like far too much work
7007 for issuing exit/enter error messages. If someone really misses the
7008 distinct error message... patches welcome.
7009 */
7010
7011 #if 0
7012 /* Try to avoid confusing the user by producing and error message
7013 with correct "exit" or "enter" verbiage. We prefer "exit"
7014 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
7015 if (branch_ctx == NULL)
7016 exit_p = false;
7017 else
7018 {
7019 while (label_ctx)
7020 {
7021 if (TREE_VALUE (label_ctx) == branch_ctx)
7022 {
7023 exit_p = false;
7024 break;
7025 }
7026 label_ctx = TREE_CHAIN (label_ctx);
7027 }
7028 }
7029
7030 if (exit_p)
7031 error ("invalid exit from OpenMP structured block");
7032 else
7033 error ("invalid entry to OpenMP structured block");
7034 #endif
7035
7036 /* If it's obvious we have an invalid entry, be specific about the error. */
7037 if (branch_ctx == NULL)
7038 error ("invalid entry to OpenMP structured block");
7039 else
7040 /* Otherwise, be vague and lazy, but efficient. */
7041 error ("invalid branch to/from an OpenMP structured block");
7042
7043 gsi_replace (gsi_p, gimple_build_nop (), false);
7044 return true;
7045 }
7046
7047 /* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
7048 where each label is found. */
7049
7050 static tree
7051 diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
7052 struct walk_stmt_info *wi)
7053 {
7054 gimple context = (gimple) wi->info;
7055 gimple inner_context;
7056 gimple stmt = gsi_stmt (*gsi_p);
7057
7058 *handled_ops_p = true;
7059
7060 switch (gimple_code (stmt))
7061 {
7062 WALK_SUBSTMTS;
7063
7064 case GIMPLE_OMP_PARALLEL:
7065 case GIMPLE_OMP_TASK:
7066 case GIMPLE_OMP_SECTIONS:
7067 case GIMPLE_OMP_SINGLE:
7068 case GIMPLE_OMP_SECTION:
7069 case GIMPLE_OMP_MASTER:
7070 case GIMPLE_OMP_ORDERED:
7071 case GIMPLE_OMP_CRITICAL:
7072 /* The minimal context here is just the current OMP construct. */
7073 inner_context = stmt;
7074 wi->info = inner_context;
7075 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
7076 wi->info = context;
7077 break;
7078
7079 case GIMPLE_OMP_FOR:
7080 inner_context = stmt;
7081 wi->info = inner_context;
7082 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
7083 walk them. */
7084 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
7085 diagnose_sb_1, NULL, wi);
7086 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
7087 wi->info = context;
7088 break;
7089
7090 case GIMPLE_LABEL:
7091 splay_tree_insert (all_labels, (splay_tree_key) gimple_label_label (stmt),
7092 (splay_tree_value) context);
7093 break;
7094
7095 default:
7096 break;
7097 }
7098
7099 return NULL_TREE;
7100 }
7101
7102 /* Pass 2: Check each branch and see if its context differs from that of
7103 the destination label's context. */
7104
7105 static tree
7106 diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
7107 struct walk_stmt_info *wi)
7108 {
7109 gimple context = (gimple) wi->info;
7110 splay_tree_node n;
7111 gimple stmt = gsi_stmt (*gsi_p);
7112
7113 *handled_ops_p = true;
7114
7115 switch (gimple_code (stmt))
7116 {
7117 WALK_SUBSTMTS;
7118
7119 case GIMPLE_OMP_PARALLEL:
7120 case GIMPLE_OMP_TASK:
7121 case GIMPLE_OMP_SECTIONS:
7122 case GIMPLE_OMP_SINGLE:
7123 case GIMPLE_OMP_SECTION:
7124 case GIMPLE_OMP_MASTER:
7125 case GIMPLE_OMP_ORDERED:
7126 case GIMPLE_OMP_CRITICAL:
7127 wi->info = stmt;
7128 walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), diagnose_sb_2, NULL, wi);
7129 wi->info = context;
7130 break;
7131
7132 case GIMPLE_OMP_FOR:
7133 wi->info = stmt;
7134 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
7135 walk them. */
7136 walk_gimple_seq_mod (gimple_omp_for_pre_body_ptr (stmt),
7137 diagnose_sb_2, NULL, wi);
7138 walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), diagnose_sb_2, NULL, wi);
7139 wi->info = context;
7140 break;
7141
7142 case GIMPLE_COND:
7143 {
7144 tree lab = gimple_cond_true_label (stmt);
7145 if (lab)
7146 {
7147 n = splay_tree_lookup (all_labels,
7148 (splay_tree_key) lab);
7149 diagnose_sb_0 (gsi_p, context,
7150 n ? (gimple) n->value : NULL);
7151 }
7152 lab = gimple_cond_false_label (stmt);
7153 if (lab)
7154 {
7155 n = splay_tree_lookup (all_labels,
7156 (splay_tree_key) lab);
7157 diagnose_sb_0 (gsi_p, context,
7158 n ? (gimple) n->value : NULL);
7159 }
7160 }
7161 break;
7162
7163 case GIMPLE_GOTO:
7164 {
7165 tree lab = gimple_goto_dest (stmt);
7166 if (TREE_CODE (lab) != LABEL_DECL)
7167 break;
7168
7169 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
7170 diagnose_sb_0 (gsi_p, context, n ? (gimple) n->value : NULL);
7171 }
7172 break;
7173
7174 case GIMPLE_SWITCH:
7175 {
7176 unsigned int i;
7177 for (i = 0; i < gimple_switch_num_labels (stmt); ++i)
7178 {
7179 tree lab = CASE_LABEL (gimple_switch_label (stmt, i));
7180 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
7181 if (n && diagnose_sb_0 (gsi_p, context, (gimple) n->value))
7182 break;
7183 }
7184 }
7185 break;
7186
7187 case GIMPLE_RETURN:
7188 diagnose_sb_0 (gsi_p, context, NULL);
7189 break;
7190
7191 default:
7192 break;
7193 }
7194
7195 return NULL_TREE;
7196 }
7197
7198 static unsigned int
7199 diagnose_omp_structured_block_errors (void)
7200 {
7201 struct walk_stmt_info wi;
7202 gimple_seq body = gimple_body (current_function_decl);
7203
7204 all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
7205
7206 memset (&wi, 0, sizeof (wi));
7207 walk_gimple_seq (body, diagnose_sb_1, NULL, &wi);
7208
7209 memset (&wi, 0, sizeof (wi));
7210 wi.want_locations = true;
7211 walk_gimple_seq_mod (&body, diagnose_sb_2, NULL, &wi);
7212
7213 gimple_set_body (current_function_decl, body);
7214
7215 splay_tree_delete (all_labels);
7216 all_labels = NULL;
7217
7218 return 0;
7219 }
7220
7221 static bool
7222 gate_diagnose_omp_blocks (void)
7223 {
7224 return flag_openmp != 0;
7225 }
7226
7227 struct gimple_opt_pass pass_diagnose_omp_blocks =
7228 {
7229 {
7230 GIMPLE_PASS,
7231 "*diagnose_omp_blocks", /* name */
7232 OPTGROUP_NONE, /* optinfo_flags */
7233 gate_diagnose_omp_blocks, /* gate */
7234 diagnose_omp_structured_block_errors, /* execute */
7235 NULL, /* sub */
7236 NULL, /* next */
7237 0, /* static_pass_number */
7238 TV_NONE, /* tv_id */
7239 PROP_gimple_any, /* properties_required */
7240 0, /* properties_provided */
7241 0, /* properties_destroyed */
7242 0, /* todo_flags_start */
7243 0, /* todo_flags_finish */
7244 }
7245 };
7246
7247 #include "gt-omp-low.h"