re PR middle-end/39573 (Linking fails on AMD with -march=native and -fopenmp, works...
[gcc.git] / gcc / omp-low.c
1 /* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
5
6 Copyright (C) 2005, 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
14
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "tree.h"
29 #include "rtl.h"
30 #include "gimple.h"
31 #include "tree-iterator.h"
32 #include "tree-inline.h"
33 #include "langhooks.h"
34 #include "diagnostic.h"
35 #include "tree-flow.h"
36 #include "timevar.h"
37 #include "flags.h"
38 #include "function.h"
39 #include "expr.h"
40 #include "toplev.h"
41 #include "tree-pass.h"
42 #include "ggc.h"
43 #include "except.h"
44 #include "splay-tree.h"
45 #include "optabs.h"
46 #include "cfgloop.h"
47
48
49 /* Lowering of OpenMP parallel and workshare constructs proceeds in two
50 phases. The first phase scans the function looking for OMP statements
51 and then for variables that must be replaced to satisfy data sharing
52 clauses. The second phase expands code for the constructs, as well as
53 re-gimplifying things when variables have been replaced with complex
54 expressions.
55
56 Final code generation is done by pass_expand_omp. The flowgraph is
57 scanned for parallel regions which are then moved to a new
58 function, to be invoked by the thread library. */
59
60 /* Context structure. Used to store information about each parallel
61 directive in the code. */
62
63 typedef struct omp_context
64 {
65 /* This field must be at the beginning, as we do "inheritance": Some
66 callback functions for tree-inline.c (e.g., omp_copy_decl)
67 receive a copy_body_data pointer that is up-casted to an
68 omp_context pointer. */
69 copy_body_data cb;
70
71 /* The tree of contexts corresponding to the encountered constructs. */
72 struct omp_context *outer;
73 gimple stmt;
74
75 /* Map variables to fields in a structure that allows communication
76 between sending and receiving threads. */
77 splay_tree field_map;
78 tree record_type;
79 tree sender_decl;
80 tree receiver_decl;
81
82 /* These are used just by task contexts, if task firstprivate fn is
83 needed. srecord_type is used to communicate from the thread
84 that encountered the task construct to task firstprivate fn,
85 record_type is allocated by GOMP_task, initialized by task firstprivate
86 fn and passed to the task body fn. */
87 splay_tree sfield_map;
88 tree srecord_type;
89
90 /* A chain of variables to add to the top-level block surrounding the
91 construct. In the case of a parallel, this is in the child function. */
92 tree block_vars;
93
94 /* What to do with variables with implicitly determined sharing
95 attributes. */
96 enum omp_clause_default_kind default_kind;
97
98 /* Nesting depth of this context. Used to beautify error messages re
99 invalid gotos. The outermost ctx is depth 1, with depth 0 being
100 reserved for the main body of the function. */
101 int depth;
102
103 /* True if this parallel directive is nested within another. */
104 bool is_nested;
105 } omp_context;
106
107
108 struct omp_for_data_loop
109 {
110 tree v, n1, n2, step;
111 enum tree_code cond_code;
112 };
113
114 /* A structure describing the main elements of a parallel loop. */
115
116 struct omp_for_data
117 {
118 struct omp_for_data_loop loop;
119 tree chunk_size;
120 gimple for_stmt;
121 tree pre, iter_type;
122 int collapse;
123 bool have_nowait, have_ordered;
124 enum omp_clause_schedule_kind sched_kind;
125 struct omp_for_data_loop *loops;
126 };
127
128
129 static splay_tree all_contexts;
130 static int taskreg_nesting_level;
131 struct omp_region *root_omp_region;
132 static bitmap task_shared_vars;
133
134 static void scan_omp (gimple_seq, omp_context *);
135 static tree scan_omp_1_op (tree *, int *, void *);
136
137 #define WALK_SUBSTMTS \
138 case GIMPLE_BIND: \
139 case GIMPLE_TRY: \
140 case GIMPLE_CATCH: \
141 case GIMPLE_EH_FILTER: \
142 /* The sub-statements for these should be walked. */ \
143 *handled_ops_p = false; \
144 break;
145
146 /* Convenience function for calling scan_omp_1_op on tree operands. */
147
148 static inline tree
149 scan_omp_op (tree *tp, omp_context *ctx)
150 {
151 struct walk_stmt_info wi;
152
153 memset (&wi, 0, sizeof (wi));
154 wi.info = ctx;
155 wi.want_locations = true;
156
157 return walk_tree (tp, scan_omp_1_op, &wi, NULL);
158 }
159
160 static void lower_omp (gimple_seq, omp_context *);
161 static tree lookup_decl_in_outer_ctx (tree, omp_context *);
162 static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
163
164 /* Find an OpenMP clause of type KIND within CLAUSES. */
165
166 tree
167 find_omp_clause (tree clauses, enum omp_clause_code kind)
168 {
169 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
170 if (OMP_CLAUSE_CODE (clauses) == kind)
171 return clauses;
172
173 return NULL_TREE;
174 }
175
176 /* Return true if CTX is for an omp parallel. */
177
178 static inline bool
179 is_parallel_ctx (omp_context *ctx)
180 {
181 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL;
182 }
183
184
185 /* Return true if CTX is for an omp task. */
186
187 static inline bool
188 is_task_ctx (omp_context *ctx)
189 {
190 return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
191 }
192
193
194 /* Return true if CTX is for an omp parallel or omp task. */
195
196 static inline bool
197 is_taskreg_ctx (omp_context *ctx)
198 {
199 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL
200 || gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
201 }
202
203
204 /* Return true if REGION is a combined parallel+workshare region. */
205
206 static inline bool
207 is_combined_parallel (struct omp_region *region)
208 {
209 return region->is_combined_parallel;
210 }
211
212
213 /* Extract the header elements of parallel loop FOR_STMT and store
214 them into *FD. */
215
216 static void
217 extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
218 struct omp_for_data_loop *loops)
219 {
220 tree t, var, *collapse_iter, *collapse_count;
221 tree count = NULL_TREE, iter_type = long_integer_type_node;
222 struct omp_for_data_loop *loop;
223 int i;
224 struct omp_for_data_loop dummy_loop;
225
226 fd->for_stmt = for_stmt;
227 fd->pre = NULL;
228 fd->collapse = gimple_omp_for_collapse (for_stmt);
229 if (fd->collapse > 1)
230 fd->loops = loops;
231 else
232 fd->loops = &fd->loop;
233
234 fd->have_nowait = fd->have_ordered = false;
235 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
236 fd->chunk_size = NULL_TREE;
237 collapse_iter = NULL;
238 collapse_count = NULL;
239
240 for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
241 switch (OMP_CLAUSE_CODE (t))
242 {
243 case OMP_CLAUSE_NOWAIT:
244 fd->have_nowait = true;
245 break;
246 case OMP_CLAUSE_ORDERED:
247 fd->have_ordered = true;
248 break;
249 case OMP_CLAUSE_SCHEDULE:
250 fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
251 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
252 break;
253 case OMP_CLAUSE_COLLAPSE:
254 if (fd->collapse > 1)
255 {
256 collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
257 collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
258 }
259 default:
260 break;
261 }
262
263 /* FIXME: for now map schedule(auto) to schedule(static).
264 There should be analysis to determine whether all iterations
265 are approximately the same amount of work (then schedule(static)
266 is best) or if it varies (then schedule(dynamic,N) is better). */
267 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
268 {
269 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
270 gcc_assert (fd->chunk_size == NULL);
271 }
272 gcc_assert (fd->collapse == 1 || collapse_iter != NULL);
273 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
274 gcc_assert (fd->chunk_size == NULL);
275 else if (fd->chunk_size == NULL)
276 {
277 /* We only need to compute a default chunk size for ordered
278 static loops and dynamic loops. */
279 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
280 || fd->have_ordered
281 || fd->collapse > 1)
282 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
283 ? integer_zero_node : integer_one_node;
284 }
285
286 for (i = 0; i < fd->collapse; i++)
287 {
288 if (fd->collapse == 1)
289 loop = &fd->loop;
290 else if (loops != NULL)
291 loop = loops + i;
292 else
293 loop = &dummy_loop;
294
295
296 loop->v = gimple_omp_for_index (for_stmt, i);
297 gcc_assert (SSA_VAR_P (loop->v));
298 gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
299 || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
300 var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
301 loop->n1 = gimple_omp_for_initial (for_stmt, i);
302
303 loop->cond_code = gimple_omp_for_cond (for_stmt, i);
304 loop->n2 = gimple_omp_for_final (for_stmt, i);
305 switch (loop->cond_code)
306 {
307 case LT_EXPR:
308 case GT_EXPR:
309 break;
310 case LE_EXPR:
311 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
312 loop->n2 = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (loop->n2),
313 loop->n2, size_one_node);
314 else
315 loop->n2 = fold_build2 (PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
316 build_int_cst (TREE_TYPE (loop->n2), 1));
317 loop->cond_code = LT_EXPR;
318 break;
319 case GE_EXPR:
320 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
321 loop->n2 = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (loop->n2),
322 loop->n2, size_int (-1));
323 else
324 loop->n2 = fold_build2 (MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
325 build_int_cst (TREE_TYPE (loop->n2), 1));
326 loop->cond_code = GT_EXPR;
327 break;
328 default:
329 gcc_unreachable ();
330 }
331
332 t = gimple_omp_for_incr (for_stmt, i);
333 gcc_assert (TREE_OPERAND (t, 0) == var);
334 switch (TREE_CODE (t))
335 {
336 case PLUS_EXPR:
337 case POINTER_PLUS_EXPR:
338 loop->step = TREE_OPERAND (t, 1);
339 break;
340 case MINUS_EXPR:
341 loop->step = TREE_OPERAND (t, 1);
342 loop->step = fold_build1 (NEGATE_EXPR, TREE_TYPE (loop->step),
343 loop->step);
344 break;
345 default:
346 gcc_unreachable ();
347 }
348
349 if (iter_type != long_long_unsigned_type_node)
350 {
351 if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
352 iter_type = long_long_unsigned_type_node;
353 else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
354 && TYPE_PRECISION (TREE_TYPE (loop->v))
355 >= TYPE_PRECISION (iter_type))
356 {
357 tree n;
358
359 if (loop->cond_code == LT_EXPR)
360 n = fold_build2 (PLUS_EXPR, TREE_TYPE (loop->v),
361 loop->n2, loop->step);
362 else
363 n = loop->n1;
364 if (TREE_CODE (n) != INTEGER_CST
365 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
366 iter_type = long_long_unsigned_type_node;
367 }
368 else if (TYPE_PRECISION (TREE_TYPE (loop->v))
369 > TYPE_PRECISION (iter_type))
370 {
371 tree n1, n2;
372
373 if (loop->cond_code == LT_EXPR)
374 {
375 n1 = loop->n1;
376 n2 = fold_build2 (PLUS_EXPR, TREE_TYPE (loop->v),
377 loop->n2, loop->step);
378 }
379 else
380 {
381 n1 = fold_build2 (MINUS_EXPR, TREE_TYPE (loop->v),
382 loop->n2, loop->step);
383 n2 = loop->n1;
384 }
385 if (TREE_CODE (n1) != INTEGER_CST
386 || TREE_CODE (n2) != INTEGER_CST
387 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
388 || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
389 iter_type = long_long_unsigned_type_node;
390 }
391 }
392
393 if (collapse_count && *collapse_count == NULL)
394 {
395 if ((i == 0 || count != NULL_TREE)
396 && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
397 && TREE_CONSTANT (loop->n1)
398 && TREE_CONSTANT (loop->n2)
399 && TREE_CODE (loop->step) == INTEGER_CST)
400 {
401 tree itype = TREE_TYPE (loop->v);
402
403 if (POINTER_TYPE_P (itype))
404 itype
405 = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
406 t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
407 t = fold_build2 (PLUS_EXPR, itype,
408 fold_convert (itype, loop->step), t);
409 t = fold_build2 (PLUS_EXPR, itype, t,
410 fold_convert (itype, loop->n2));
411 t = fold_build2 (MINUS_EXPR, itype, t,
412 fold_convert (itype, loop->n1));
413 if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
414 t = fold_build2 (TRUNC_DIV_EXPR, itype,
415 fold_build1 (NEGATE_EXPR, itype, t),
416 fold_build1 (NEGATE_EXPR, itype,
417 fold_convert (itype,
418 loop->step)));
419 else
420 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
421 fold_convert (itype, loop->step));
422 t = fold_convert (long_long_unsigned_type_node, t);
423 if (count != NULL_TREE)
424 count = fold_build2 (MULT_EXPR, long_long_unsigned_type_node,
425 count, t);
426 else
427 count = t;
428 if (TREE_CODE (count) != INTEGER_CST)
429 count = NULL_TREE;
430 }
431 else
432 count = NULL_TREE;
433 }
434 }
435
436 if (count)
437 {
438 if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
439 iter_type = long_long_unsigned_type_node;
440 else
441 iter_type = long_integer_type_node;
442 }
443 else if (collapse_iter && *collapse_iter != NULL)
444 iter_type = TREE_TYPE (*collapse_iter);
445 fd->iter_type = iter_type;
446 if (collapse_iter && *collapse_iter == NULL)
447 *collapse_iter = create_tmp_var (iter_type, ".iter");
448 if (collapse_count && *collapse_count == NULL)
449 {
450 if (count)
451 *collapse_count = fold_convert (iter_type, count);
452 else
453 *collapse_count = create_tmp_var (iter_type, ".count");
454 }
455
456 if (fd->collapse > 1)
457 {
458 fd->loop.v = *collapse_iter;
459 fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
460 fd->loop.n2 = *collapse_count;
461 fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
462 fd->loop.cond_code = LT_EXPR;
463 }
464 }
465
466
467 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
468 is the immediate dominator of PAR_ENTRY_BB, return true if there
469 are no data dependencies that would prevent expanding the parallel
470 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
471
472 When expanding a combined parallel+workshare region, the call to
473 the child function may need additional arguments in the case of
474 GIMPLE_OMP_FOR regions. In some cases, these arguments are
475 computed out of variables passed in from the parent to the child
476 via 'struct .omp_data_s'. For instance:
477
478 #pragma omp parallel for schedule (guided, i * 4)
479 for (j ...)
480
481 Is lowered into:
482
483 # BLOCK 2 (PAR_ENTRY_BB)
484 .omp_data_o.i = i;
485 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
486
487 # BLOCK 3 (WS_ENTRY_BB)
488 .omp_data_i = &.omp_data_o;
489 D.1667 = .omp_data_i->i;
490 D.1598 = D.1667 * 4;
491 #pragma omp for schedule (guided, D.1598)
492
493 When we outline the parallel region, the call to the child function
494 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
495 that value is computed *after* the call site. So, in principle we
496 cannot do the transformation.
497
498 To see whether the code in WS_ENTRY_BB blocks the combined
499 parallel+workshare call, we collect all the variables used in the
500 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
501 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
502 call.
503
504 FIXME. If we had the SSA form built at this point, we could merely
505 hoist the code in block 3 into block 2 and be done with it. But at
506 this point we don't have dataflow information and though we could
507 hack something up here, it is really not worth the aggravation. */
508
509 static bool
510 workshare_safe_to_combine_p (basic_block par_entry_bb, basic_block ws_entry_bb)
511 {
512 struct omp_for_data fd;
513 gimple par_stmt, ws_stmt;
514
515 par_stmt = last_stmt (par_entry_bb);
516 ws_stmt = last_stmt (ws_entry_bb);
517
518 if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
519 return true;
520
521 gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR);
522
523 extract_omp_for_data (ws_stmt, &fd, NULL);
524
525 if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
526 return false;
527 if (fd.iter_type != long_integer_type_node)
528 return false;
529
530 /* FIXME. We give up too easily here. If any of these arguments
531 are not constants, they will likely involve variables that have
532 been mapped into fields of .omp_data_s for sharing with the child
533 function. With appropriate data flow, it would be possible to
534 see through this. */
535 if (!is_gimple_min_invariant (fd.loop.n1)
536 || !is_gimple_min_invariant (fd.loop.n2)
537 || !is_gimple_min_invariant (fd.loop.step)
538 || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
539 return false;
540
541 return true;
542 }
543
544
545 /* Collect additional arguments needed to emit a combined
546 parallel+workshare call. WS_STMT is the workshare directive being
547 expanded. */
548
549 static tree
550 get_ws_args_for (gimple ws_stmt)
551 {
552 tree t;
553
554 if (gimple_code (ws_stmt) == GIMPLE_OMP_FOR)
555 {
556 struct omp_for_data fd;
557 tree ws_args;
558
559 extract_omp_for_data (ws_stmt, &fd, NULL);
560
561 ws_args = NULL_TREE;
562 if (fd.chunk_size)
563 {
564 t = fold_convert (long_integer_type_node, fd.chunk_size);
565 ws_args = tree_cons (NULL, t, ws_args);
566 }
567
568 t = fold_convert (long_integer_type_node, fd.loop.step);
569 ws_args = tree_cons (NULL, t, ws_args);
570
571 t = fold_convert (long_integer_type_node, fd.loop.n2);
572 ws_args = tree_cons (NULL, t, ws_args);
573
574 t = fold_convert (long_integer_type_node, fd.loop.n1);
575 ws_args = tree_cons (NULL, t, ws_args);
576
577 return ws_args;
578 }
579 else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
580 {
581 /* Number of sections is equal to the number of edges from the
582 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
583 the exit of the sections region. */
584 basic_block bb = single_succ (gimple_bb (ws_stmt));
585 t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
586 t = tree_cons (NULL, t, NULL);
587 return t;
588 }
589
590 gcc_unreachable ();
591 }
592
593
594 /* Discover whether REGION is a combined parallel+workshare region. */
595
596 static void
597 determine_parallel_type (struct omp_region *region)
598 {
599 basic_block par_entry_bb, par_exit_bb;
600 basic_block ws_entry_bb, ws_exit_bb;
601
602 if (region == NULL || region->inner == NULL
603 || region->exit == NULL || region->inner->exit == NULL
604 || region->inner->cont == NULL)
605 return;
606
607 /* We only support parallel+for and parallel+sections. */
608 if (region->type != GIMPLE_OMP_PARALLEL
609 || (region->inner->type != GIMPLE_OMP_FOR
610 && region->inner->type != GIMPLE_OMP_SECTIONS))
611 return;
612
613 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
614 WS_EXIT_BB -> PAR_EXIT_BB. */
615 par_entry_bb = region->entry;
616 par_exit_bb = region->exit;
617 ws_entry_bb = region->inner->entry;
618 ws_exit_bb = region->inner->exit;
619
620 if (single_succ (par_entry_bb) == ws_entry_bb
621 && single_succ (ws_exit_bb) == par_exit_bb
622 && workshare_safe_to_combine_p (par_entry_bb, ws_entry_bb)
623 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb))
624 || (last_and_only_stmt (ws_entry_bb)
625 && last_and_only_stmt (par_exit_bb))))
626 {
627 gimple ws_stmt = last_stmt (ws_entry_bb);
628
629 if (region->inner->type == GIMPLE_OMP_FOR)
630 {
631 /* If this is a combined parallel loop, we need to determine
632 whether or not to use the combined library calls. There
633 are two cases where we do not apply the transformation:
634 static loops and any kind of ordered loop. In the first
635 case, we already open code the loop so there is no need
636 to do anything else. In the latter case, the combined
637 parallel loop call would still need extra synchronization
638 to implement ordered semantics, so there would not be any
639 gain in using the combined call. */
640 tree clauses = gimple_omp_for_clauses (ws_stmt);
641 tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
642 if (c == NULL
643 || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC
644 || find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
645 {
646 region->is_combined_parallel = false;
647 region->inner->is_combined_parallel = false;
648 return;
649 }
650 }
651
652 region->is_combined_parallel = true;
653 region->inner->is_combined_parallel = true;
654 region->ws_args = get_ws_args_for (ws_stmt);
655 }
656 }
657
658
659 /* Return true if EXPR is variable sized. */
660
661 static inline bool
662 is_variable_sized (const_tree expr)
663 {
664 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
665 }
666
667 /* Return true if DECL is a reference type. */
668
669 static inline bool
670 is_reference (tree decl)
671 {
672 return lang_hooks.decls.omp_privatize_by_reference (decl);
673 }
674
675 /* Lookup variables in the decl or field splay trees. The "maybe" form
676 allows for the variable form to not have been entered, otherwise we
677 assert that the variable must have been entered. */
678
679 static inline tree
680 lookup_decl (tree var, omp_context *ctx)
681 {
682 tree *n;
683 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
684 return *n;
685 }
686
687 static inline tree
688 maybe_lookup_decl (const_tree var, omp_context *ctx)
689 {
690 tree *n;
691 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
692 return n ? *n : NULL_TREE;
693 }
694
695 static inline tree
696 lookup_field (tree var, omp_context *ctx)
697 {
698 splay_tree_node n;
699 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
700 return (tree) n->value;
701 }
702
703 static inline tree
704 lookup_sfield (tree var, omp_context *ctx)
705 {
706 splay_tree_node n;
707 n = splay_tree_lookup (ctx->sfield_map
708 ? ctx->sfield_map : ctx->field_map,
709 (splay_tree_key) var);
710 return (tree) n->value;
711 }
712
713 static inline tree
714 maybe_lookup_field (tree var, omp_context *ctx)
715 {
716 splay_tree_node n;
717 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
718 return n ? (tree) n->value : NULL_TREE;
719 }
720
721 /* Return true if DECL should be copied by pointer. SHARED_CTX is
722 the parallel context if DECL is to be shared. */
723
724 static bool
725 use_pointer_for_field (tree decl, omp_context *shared_ctx)
726 {
727 if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
728 return true;
729
730 /* We can only use copy-in/copy-out semantics for shared variables
731 when we know the value is not accessible from an outer scope. */
732 if (shared_ctx)
733 {
734 /* ??? Trivially accessible from anywhere. But why would we even
735 be passing an address in this case? Should we simply assert
736 this to be false, or should we have a cleanup pass that removes
737 these from the list of mappings? */
738 if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
739 return true;
740
741 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
742 without analyzing the expression whether or not its location
743 is accessible to anyone else. In the case of nested parallel
744 regions it certainly may be. */
745 if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
746 return true;
747
748 /* Do not use copy-in/copy-out for variables that have their
749 address taken. */
750 if (TREE_ADDRESSABLE (decl))
751 return true;
752
753 /* Disallow copy-in/out in nested parallel if
754 decl is shared in outer parallel, otherwise
755 each thread could store the shared variable
756 in its own copy-in location, making the
757 variable no longer really shared. */
758 if (!TREE_READONLY (decl) && shared_ctx->is_nested)
759 {
760 omp_context *up;
761
762 for (up = shared_ctx->outer; up; up = up->outer)
763 if (is_taskreg_ctx (up) && maybe_lookup_decl (decl, up))
764 break;
765
766 if (up)
767 {
768 tree c;
769
770 for (c = gimple_omp_taskreg_clauses (up->stmt);
771 c; c = OMP_CLAUSE_CHAIN (c))
772 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
773 && OMP_CLAUSE_DECL (c) == decl)
774 break;
775
776 if (c)
777 return true;
778 }
779 }
780
781 /* For tasks avoid using copy-in/out, unless they are readonly
782 (in which case just copy-in is used). As tasks can be
783 deferred or executed in different thread, when GOMP_task
784 returns, the task hasn't necessarily terminated. */
785 if (!TREE_READONLY (decl) && is_task_ctx (shared_ctx))
786 {
787 tree outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
788 if (is_gimple_reg (outer))
789 {
790 /* Taking address of OUTER in lower_send_shared_vars
791 might need regimplification of everything that uses the
792 variable. */
793 if (!task_shared_vars)
794 task_shared_vars = BITMAP_ALLOC (NULL);
795 bitmap_set_bit (task_shared_vars, DECL_UID (outer));
796 TREE_ADDRESSABLE (outer) = 1;
797 }
798 return true;
799 }
800 }
801
802 return false;
803 }
804
805 /* Create a new VAR_DECL and copy information from VAR to it. */
806
807 tree
808 copy_var_decl (tree var, tree name, tree type)
809 {
810 tree copy = build_decl (VAR_DECL, name, type);
811
812 TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (var);
813 TREE_THIS_VOLATILE (copy) = TREE_THIS_VOLATILE (var);
814 DECL_GIMPLE_REG_P (copy) = DECL_GIMPLE_REG_P (var);
815 DECL_NO_TBAA_P (copy) = DECL_NO_TBAA_P (var);
816 DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (var);
817 DECL_IGNORED_P (copy) = DECL_IGNORED_P (var);
818 DECL_CONTEXT (copy) = DECL_CONTEXT (var);
819 DECL_SOURCE_LOCATION (copy) = DECL_SOURCE_LOCATION (var);
820 TREE_USED (copy) = 1;
821 DECL_SEEN_IN_BIND_EXPR_P (copy) = 1;
822
823 return copy;
824 }
825
826 /* Construct a new automatic decl similar to VAR. */
827
828 static tree
829 omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
830 {
831 tree copy = copy_var_decl (var, name, type);
832
833 DECL_CONTEXT (copy) = current_function_decl;
834 TREE_CHAIN (copy) = ctx->block_vars;
835 ctx->block_vars = copy;
836
837 return copy;
838 }
839
840 static tree
841 omp_copy_decl_1 (tree var, omp_context *ctx)
842 {
843 return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
844 }
845
846 /* Build tree nodes to access the field for VAR on the receiver side. */
847
848 static tree
849 build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
850 {
851 tree x, field = lookup_field (var, ctx);
852
853 /* If the receiver record type was remapped in the child function,
854 remap the field into the new record type. */
855 x = maybe_lookup_field (field, ctx);
856 if (x != NULL)
857 field = x;
858
859 x = build_fold_indirect_ref (ctx->receiver_decl);
860 x = build3 (COMPONENT_REF, TREE_TYPE (field), x, field, NULL);
861 if (by_ref)
862 x = build_fold_indirect_ref (x);
863
864 return x;
865 }
866
867 /* Build tree nodes to access VAR in the scope outer to CTX. In the case
868 of a parallel, this is a component reference; for workshare constructs
869 this is some variable. */
870
871 static tree
872 build_outer_var_ref (tree var, omp_context *ctx)
873 {
874 tree x;
875
876 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
877 x = var;
878 else if (is_variable_sized (var))
879 {
880 x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
881 x = build_outer_var_ref (x, ctx);
882 x = build_fold_indirect_ref (x);
883 }
884 else if (is_taskreg_ctx (ctx))
885 {
886 bool by_ref = use_pointer_for_field (var, NULL);
887 x = build_receiver_ref (var, by_ref, ctx);
888 }
889 else if (ctx->outer)
890 x = lookup_decl (var, ctx->outer);
891 else if (is_reference (var))
892 /* This can happen with orphaned constructs. If var is reference, it is
893 possible it is shared and as such valid. */
894 x = var;
895 else
896 gcc_unreachable ();
897
898 if (is_reference (var))
899 x = build_fold_indirect_ref (x);
900
901 return x;
902 }
903
904 /* Build tree nodes to access the field for VAR on the sender side. */
905
906 static tree
907 build_sender_ref (tree var, omp_context *ctx)
908 {
909 tree field = lookup_sfield (var, ctx);
910 return build3 (COMPONENT_REF, TREE_TYPE (field),
911 ctx->sender_decl, field, NULL);
912 }
913
914 /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
915
916 static void
917 install_var_field (tree var, bool by_ref, int mask, omp_context *ctx)
918 {
919 tree field, type, sfield = NULL_TREE;
920
921 gcc_assert ((mask & 1) == 0
922 || !splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
923 gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
924 || !splay_tree_lookup (ctx->sfield_map, (splay_tree_key) var));
925
926 type = TREE_TYPE (var);
927 if (by_ref)
928 type = build_pointer_type (type);
929 else if ((mask & 3) == 1 && is_reference (var))
930 type = TREE_TYPE (type);
931
932 field = build_decl (FIELD_DECL, DECL_NAME (var), type);
933
934 /* Remember what variable this field was created for. This does have a
935 side effect of making dwarf2out ignore this member, so for helpful
936 debugging we clear it later in delete_omp_context. */
937 DECL_ABSTRACT_ORIGIN (field) = var;
938 if (type == TREE_TYPE (var))
939 {
940 DECL_ALIGN (field) = DECL_ALIGN (var);
941 DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
942 TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
943 }
944 else
945 DECL_ALIGN (field) = TYPE_ALIGN (type);
946
947 if ((mask & 3) == 3)
948 {
949 insert_field_into_struct (ctx->record_type, field);
950 if (ctx->srecord_type)
951 {
952 sfield = build_decl (FIELD_DECL, DECL_NAME (var), type);
953 DECL_ABSTRACT_ORIGIN (sfield) = var;
954 DECL_ALIGN (sfield) = DECL_ALIGN (field);
955 DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
956 TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
957 insert_field_into_struct (ctx->srecord_type, sfield);
958 }
959 }
960 else
961 {
962 if (ctx->srecord_type == NULL_TREE)
963 {
964 tree t;
965
966 ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
967 ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
968 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
969 {
970 sfield = build_decl (FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
971 DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
972 insert_field_into_struct (ctx->srecord_type, sfield);
973 splay_tree_insert (ctx->sfield_map,
974 (splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
975 (splay_tree_value) sfield);
976 }
977 }
978 sfield = field;
979 insert_field_into_struct ((mask & 1) ? ctx->record_type
980 : ctx->srecord_type, field);
981 }
982
983 if (mask & 1)
984 splay_tree_insert (ctx->field_map, (splay_tree_key) var,
985 (splay_tree_value) field);
986 if ((mask & 2) && ctx->sfield_map)
987 splay_tree_insert (ctx->sfield_map, (splay_tree_key) var,
988 (splay_tree_value) sfield);
989 }
990
991 static tree
992 install_var_local (tree var, omp_context *ctx)
993 {
994 tree new_var = omp_copy_decl_1 (var, ctx);
995 insert_decl_map (&ctx->cb, var, new_var);
996 return new_var;
997 }
998
999 /* Adjust the replacement for DECL in CTX for the new context. This means
1000 copying the DECL_VALUE_EXPR, and fixing up the type. */
1001
1002 static void
1003 fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
1004 {
1005 tree new_decl, size;
1006
1007 new_decl = lookup_decl (decl, ctx);
1008
1009 TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
1010
1011 if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
1012 && DECL_HAS_VALUE_EXPR_P (decl))
1013 {
1014 tree ve = DECL_VALUE_EXPR (decl);
1015 walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL);
1016 SET_DECL_VALUE_EXPR (new_decl, ve);
1017 DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
1018 }
1019
1020 if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
1021 {
1022 size = remap_decl (DECL_SIZE (decl), &ctx->cb);
1023 if (size == error_mark_node)
1024 size = TYPE_SIZE (TREE_TYPE (new_decl));
1025 DECL_SIZE (new_decl) = size;
1026
1027 size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
1028 if (size == error_mark_node)
1029 size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
1030 DECL_SIZE_UNIT (new_decl) = size;
1031 }
1032 }
1033
1034 /* The callback for remap_decl. Search all containing contexts for a
1035 mapping of the variable; this avoids having to duplicate the splay
1036 tree ahead of time. We know a mapping doesn't already exist in the
1037 given context. Create new mappings to implement default semantics. */
1038
1039 static tree
1040 omp_copy_decl (tree var, copy_body_data *cb)
1041 {
1042 omp_context *ctx = (omp_context *) cb;
1043 tree new_var;
1044
1045 if (TREE_CODE (var) == LABEL_DECL)
1046 {
1047 new_var = create_artificial_label ();
1048 DECL_CONTEXT (new_var) = current_function_decl;
1049 insert_decl_map (&ctx->cb, var, new_var);
1050 return new_var;
1051 }
1052
1053 while (!is_taskreg_ctx (ctx))
1054 {
1055 ctx = ctx->outer;
1056 if (ctx == NULL)
1057 return var;
1058 new_var = maybe_lookup_decl (var, ctx);
1059 if (new_var)
1060 return new_var;
1061 }
1062
1063 if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
1064 return var;
1065
1066 return error_mark_node;
1067 }
1068
1069
1070 /* Return the parallel region associated with STMT. */
1071
1072 /* Debugging dumps for parallel regions. */
1073 void dump_omp_region (FILE *, struct omp_region *, int);
1074 void debug_omp_region (struct omp_region *);
1075 void debug_all_omp_regions (void);
1076
1077 /* Dump the parallel region tree rooted at REGION. */
1078
1079 void
1080 dump_omp_region (FILE *file, struct omp_region *region, int indent)
1081 {
1082 fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
1083 gimple_code_name[region->type]);
1084
1085 if (region->inner)
1086 dump_omp_region (file, region->inner, indent + 4);
1087
1088 if (region->cont)
1089 {
1090 fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
1091 region->cont->index);
1092 }
1093
1094 if (region->exit)
1095 fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
1096 region->exit->index);
1097 else
1098 fprintf (file, "%*s[no exit marker]\n", indent, "");
1099
1100 if (region->next)
1101 dump_omp_region (file, region->next, indent);
1102 }
1103
1104 void
1105 debug_omp_region (struct omp_region *region)
1106 {
1107 dump_omp_region (stderr, region, 0);
1108 }
1109
1110 void
1111 debug_all_omp_regions (void)
1112 {
1113 dump_omp_region (stderr, root_omp_region, 0);
1114 }
1115
1116
1117 /* Create a new parallel region starting at STMT inside region PARENT. */
1118
1119 struct omp_region *
1120 new_omp_region (basic_block bb, enum gimple_code type,
1121 struct omp_region *parent)
1122 {
1123 struct omp_region *region = XCNEW (struct omp_region);
1124
1125 region->outer = parent;
1126 region->entry = bb;
1127 region->type = type;
1128
1129 if (parent)
1130 {
1131 /* This is a nested region. Add it to the list of inner
1132 regions in PARENT. */
1133 region->next = parent->inner;
1134 parent->inner = region;
1135 }
1136 else
1137 {
1138 /* This is a toplevel region. Add it to the list of toplevel
1139 regions in ROOT_OMP_REGION. */
1140 region->next = root_omp_region;
1141 root_omp_region = region;
1142 }
1143
1144 return region;
1145 }
1146
1147 /* Release the memory associated with the region tree rooted at REGION. */
1148
1149 static void
1150 free_omp_region_1 (struct omp_region *region)
1151 {
1152 struct omp_region *i, *n;
1153
1154 for (i = region->inner; i ; i = n)
1155 {
1156 n = i->next;
1157 free_omp_region_1 (i);
1158 }
1159
1160 free (region);
1161 }
1162
1163 /* Release the memory for the entire omp region tree. */
1164
1165 void
1166 free_omp_regions (void)
1167 {
1168 struct omp_region *r, *n;
1169 for (r = root_omp_region; r ; r = n)
1170 {
1171 n = r->next;
1172 free_omp_region_1 (r);
1173 }
1174 root_omp_region = NULL;
1175 }
1176
1177
1178 /* Create a new context, with OUTER_CTX being the surrounding context. */
1179
1180 static omp_context *
1181 new_omp_context (gimple stmt, omp_context *outer_ctx)
1182 {
1183 omp_context *ctx = XCNEW (omp_context);
1184
1185 splay_tree_insert (all_contexts, (splay_tree_key) stmt,
1186 (splay_tree_value) ctx);
1187 ctx->stmt = stmt;
1188
1189 if (outer_ctx)
1190 {
1191 ctx->outer = outer_ctx;
1192 ctx->cb = outer_ctx->cb;
1193 ctx->cb.block = NULL;
1194 ctx->depth = outer_ctx->depth + 1;
1195 }
1196 else
1197 {
1198 ctx->cb.src_fn = current_function_decl;
1199 ctx->cb.dst_fn = current_function_decl;
1200 ctx->cb.src_node = cgraph_node (current_function_decl);
1201 ctx->cb.dst_node = ctx->cb.src_node;
1202 ctx->cb.src_cfun = cfun;
1203 ctx->cb.copy_decl = omp_copy_decl;
1204 ctx->cb.eh_region = -1;
1205 ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
1206 ctx->depth = 1;
1207 }
1208
1209 ctx->cb.decl_map = pointer_map_create ();
1210
1211 return ctx;
1212 }
1213
1214 static gimple_seq maybe_catch_exception (gimple_seq);
1215
1216 /* Finalize task copyfn. */
1217
1218 static void
1219 finalize_task_copyfn (gimple task_stmt)
1220 {
1221 struct function *child_cfun;
1222 tree child_fn, old_fn;
1223 gimple_seq seq, new_seq;
1224 gimple bind;
1225
1226 child_fn = gimple_omp_task_copy_fn (task_stmt);
1227 if (child_fn == NULL_TREE)
1228 return;
1229
1230 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
1231
1232 /* Inform the callgraph about the new function. */
1233 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
1234 = cfun->curr_properties;
1235
1236 old_fn = current_function_decl;
1237 push_cfun (child_cfun);
1238 current_function_decl = child_fn;
1239 bind = gimplify_body (&DECL_SAVED_TREE (child_fn), child_fn, false);
1240 seq = gimple_seq_alloc ();
1241 gimple_seq_add_stmt (&seq, bind);
1242 new_seq = maybe_catch_exception (seq);
1243 if (new_seq != seq)
1244 {
1245 bind = gimple_build_bind (NULL, new_seq, NULL);
1246 seq = gimple_seq_alloc ();
1247 gimple_seq_add_stmt (&seq, bind);
1248 }
1249 gimple_set_body (child_fn, seq);
1250 pop_cfun ();
1251 current_function_decl = old_fn;
1252
1253 cgraph_add_new_function (child_fn, false);
1254 }
1255
1256 /* Destroy a omp_context data structures. Called through the splay tree
1257 value delete callback. */
1258
1259 static void
1260 delete_omp_context (splay_tree_value value)
1261 {
1262 omp_context *ctx = (omp_context *) value;
1263
1264 pointer_map_destroy (ctx->cb.decl_map);
1265
1266 if (ctx->field_map)
1267 splay_tree_delete (ctx->field_map);
1268 if (ctx->sfield_map)
1269 splay_tree_delete (ctx->sfield_map);
1270
1271 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1272 it produces corrupt debug information. */
1273 if (ctx->record_type)
1274 {
1275 tree t;
1276 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
1277 DECL_ABSTRACT_ORIGIN (t) = NULL;
1278 }
1279 if (ctx->srecord_type)
1280 {
1281 tree t;
1282 for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = TREE_CHAIN (t))
1283 DECL_ABSTRACT_ORIGIN (t) = NULL;
1284 }
1285
1286 if (is_task_ctx (ctx))
1287 finalize_task_copyfn (ctx->stmt);
1288
1289 XDELETE (ctx);
1290 }
1291
1292 /* Fix up RECEIVER_DECL with a type that has been remapped to the child
1293 context. */
1294
1295 static void
1296 fixup_child_record_type (omp_context *ctx)
1297 {
1298 tree f, type = ctx->record_type;
1299
1300 /* ??? It isn't sufficient to just call remap_type here, because
1301 variably_modified_type_p doesn't work the way we expect for
1302 record types. Testing each field for whether it needs remapping
1303 and creating a new record by hand works, however. */
1304 for (f = TYPE_FIELDS (type); f ; f = TREE_CHAIN (f))
1305 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
1306 break;
1307 if (f)
1308 {
1309 tree name, new_fields = NULL;
1310
1311 type = lang_hooks.types.make_type (RECORD_TYPE);
1312 name = DECL_NAME (TYPE_NAME (ctx->record_type));
1313 name = build_decl (TYPE_DECL, name, type);
1314 TYPE_NAME (type) = name;
1315
1316 for (f = TYPE_FIELDS (ctx->record_type); f ; f = TREE_CHAIN (f))
1317 {
1318 tree new_f = copy_node (f);
1319 DECL_CONTEXT (new_f) = type;
1320 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
1321 TREE_CHAIN (new_f) = new_fields;
1322 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
1323 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
1324 &ctx->cb, NULL);
1325 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
1326 &ctx->cb, NULL);
1327 new_fields = new_f;
1328
1329 /* Arrange to be able to look up the receiver field
1330 given the sender field. */
1331 splay_tree_insert (ctx->field_map, (splay_tree_key) f,
1332 (splay_tree_value) new_f);
1333 }
1334 TYPE_FIELDS (type) = nreverse (new_fields);
1335 layout_type (type);
1336 }
1337
1338 TREE_TYPE (ctx->receiver_decl) = build_pointer_type (type);
1339 }
1340
1341 /* Instantiate decls as necessary in CTX to satisfy the data sharing
1342 specified by CLAUSES. */
1343
1344 static void
1345 scan_sharing_clauses (tree clauses, omp_context *ctx)
1346 {
1347 tree c, decl;
1348 bool scan_array_reductions = false;
1349
1350 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1351 {
1352 bool by_ref;
1353
1354 switch (OMP_CLAUSE_CODE (c))
1355 {
1356 case OMP_CLAUSE_PRIVATE:
1357 decl = OMP_CLAUSE_DECL (c);
1358 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
1359 goto do_private;
1360 else if (!is_variable_sized (decl))
1361 install_var_local (decl, ctx);
1362 break;
1363
1364 case OMP_CLAUSE_SHARED:
1365 gcc_assert (is_taskreg_ctx (ctx));
1366 decl = OMP_CLAUSE_DECL (c);
1367 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
1368 || !is_variable_sized (decl));
1369 /* Global variables don't need to be copied,
1370 the receiver side will use them directly. */
1371 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1372 break;
1373 by_ref = use_pointer_for_field (decl, ctx);
1374 if (! TREE_READONLY (decl)
1375 || TREE_ADDRESSABLE (decl)
1376 || by_ref
1377 || is_reference (decl))
1378 {
1379 install_var_field (decl, by_ref, 3, ctx);
1380 install_var_local (decl, ctx);
1381 break;
1382 }
1383 /* We don't need to copy const scalar vars back. */
1384 OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
1385 goto do_private;
1386
1387 case OMP_CLAUSE_LASTPRIVATE:
1388 /* Let the corresponding firstprivate clause create
1389 the variable. */
1390 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1391 break;
1392 /* FALLTHRU */
1393
1394 case OMP_CLAUSE_FIRSTPRIVATE:
1395 case OMP_CLAUSE_REDUCTION:
1396 decl = OMP_CLAUSE_DECL (c);
1397 do_private:
1398 if (is_variable_sized (decl))
1399 {
1400 if (is_task_ctx (ctx))
1401 install_var_field (decl, false, 1, ctx);
1402 break;
1403 }
1404 else if (is_taskreg_ctx (ctx))
1405 {
1406 bool global
1407 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
1408 by_ref = use_pointer_for_field (decl, NULL);
1409
1410 if (is_task_ctx (ctx)
1411 && (global || by_ref || is_reference (decl)))
1412 {
1413 install_var_field (decl, false, 1, ctx);
1414 if (!global)
1415 install_var_field (decl, by_ref, 2, ctx);
1416 }
1417 else if (!global)
1418 install_var_field (decl, by_ref, 3, ctx);
1419 }
1420 install_var_local (decl, ctx);
1421 break;
1422
1423 case OMP_CLAUSE_COPYPRIVATE:
1424 if (ctx->outer)
1425 scan_omp_op (&OMP_CLAUSE_DECL (c), ctx->outer);
1426 /* FALLTHRU */
1427
1428 case OMP_CLAUSE_COPYIN:
1429 decl = OMP_CLAUSE_DECL (c);
1430 by_ref = use_pointer_for_field (decl, NULL);
1431 install_var_field (decl, by_ref, 3, ctx);
1432 break;
1433
1434 case OMP_CLAUSE_DEFAULT:
1435 ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
1436 break;
1437
1438 case OMP_CLAUSE_IF:
1439 case OMP_CLAUSE_NUM_THREADS:
1440 case OMP_CLAUSE_SCHEDULE:
1441 if (ctx->outer)
1442 scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
1443 break;
1444
1445 case OMP_CLAUSE_NOWAIT:
1446 case OMP_CLAUSE_ORDERED:
1447 case OMP_CLAUSE_COLLAPSE:
1448 case OMP_CLAUSE_UNTIED:
1449 break;
1450
1451 default:
1452 gcc_unreachable ();
1453 }
1454 }
1455
1456 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1457 {
1458 switch (OMP_CLAUSE_CODE (c))
1459 {
1460 case OMP_CLAUSE_LASTPRIVATE:
1461 /* Let the corresponding firstprivate clause create
1462 the variable. */
1463 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1464 scan_array_reductions = true;
1465 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1466 break;
1467 /* FALLTHRU */
1468
1469 case OMP_CLAUSE_PRIVATE:
1470 case OMP_CLAUSE_FIRSTPRIVATE:
1471 case OMP_CLAUSE_REDUCTION:
1472 decl = OMP_CLAUSE_DECL (c);
1473 if (is_variable_sized (decl))
1474 install_var_local (decl, ctx);
1475 fixup_remapped_decl (decl, ctx,
1476 OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
1477 && OMP_CLAUSE_PRIVATE_DEBUG (c));
1478 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1479 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1480 scan_array_reductions = true;
1481 break;
1482
1483 case OMP_CLAUSE_SHARED:
1484 decl = OMP_CLAUSE_DECL (c);
1485 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1486 fixup_remapped_decl (decl, ctx, false);
1487 break;
1488
1489 case OMP_CLAUSE_COPYPRIVATE:
1490 case OMP_CLAUSE_COPYIN:
1491 case OMP_CLAUSE_DEFAULT:
1492 case OMP_CLAUSE_IF:
1493 case OMP_CLAUSE_NUM_THREADS:
1494 case OMP_CLAUSE_SCHEDULE:
1495 case OMP_CLAUSE_NOWAIT:
1496 case OMP_CLAUSE_ORDERED:
1497 case OMP_CLAUSE_COLLAPSE:
1498 case OMP_CLAUSE_UNTIED:
1499 break;
1500
1501 default:
1502 gcc_unreachable ();
1503 }
1504 }
1505
1506 if (scan_array_reductions)
1507 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1508 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1509 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1510 {
1511 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
1512 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
1513 }
1514 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
1515 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1516 scan_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
1517 }
1518
1519 /* Create a new name for omp child function. Returns an identifier. */
1520
1521 static GTY(()) unsigned int tmp_ompfn_id_num;
1522
1523 static tree
1524 create_omp_child_function_name (bool task_copy)
1525 {
1526 tree name = DECL_ASSEMBLER_NAME (current_function_decl);
1527 size_t len = IDENTIFIER_LENGTH (name);
1528 char *tmp_name, *prefix;
1529 const char *suffix;
1530
1531 suffix = task_copy ? "_omp_cpyfn" : "_omp_fn";
1532 prefix = XALLOCAVEC (char, len + strlen (suffix) + 1);
1533 memcpy (prefix, IDENTIFIER_POINTER (name), len);
1534 strcpy (prefix + len, suffix);
1535 #ifndef NO_DOT_IN_LABEL
1536 prefix[len] = '.';
1537 #elif !defined NO_DOLLAR_IN_LABEL
1538 prefix[len] = '$';
1539 #endif
1540 ASM_FORMAT_PRIVATE_NAME (tmp_name, prefix, tmp_ompfn_id_num++);
1541 return get_identifier (tmp_name);
1542 }
1543
1544 /* Build a decl for the omp child function. It'll not contain a body
1545 yet, just the bare decl. */
1546
1547 static void
1548 create_omp_child_function (omp_context *ctx, bool task_copy)
1549 {
1550 tree decl, type, name, t;
1551
1552 name = create_omp_child_function_name (task_copy);
1553 if (task_copy)
1554 type = build_function_type_list (void_type_node, ptr_type_node,
1555 ptr_type_node, NULL_TREE);
1556 else
1557 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
1558
1559 decl = build_decl (FUNCTION_DECL, name, type);
1560 decl = lang_hooks.decls.pushdecl (decl);
1561
1562 if (!task_copy)
1563 ctx->cb.dst_fn = decl;
1564 else
1565 gimple_omp_task_set_copy_fn (ctx->stmt, decl);
1566
1567 TREE_STATIC (decl) = 1;
1568 TREE_USED (decl) = 1;
1569 DECL_ARTIFICIAL (decl) = 1;
1570 DECL_IGNORED_P (decl) = 0;
1571 TREE_PUBLIC (decl) = 0;
1572 DECL_UNINLINABLE (decl) = 1;
1573 DECL_EXTERNAL (decl) = 0;
1574 DECL_CONTEXT (decl) = NULL_TREE;
1575 DECL_INITIAL (decl) = make_node (BLOCK);
1576
1577 t = build_decl (RESULT_DECL, NULL_TREE, void_type_node);
1578 DECL_ARTIFICIAL (t) = 1;
1579 DECL_IGNORED_P (t) = 1;
1580 DECL_RESULT (decl) = t;
1581
1582 t = build_decl (PARM_DECL, get_identifier (".omp_data_i"), ptr_type_node);
1583 DECL_ARTIFICIAL (t) = 1;
1584 DECL_ARG_TYPE (t) = ptr_type_node;
1585 DECL_CONTEXT (t) = current_function_decl;
1586 TREE_USED (t) = 1;
1587 DECL_ARGUMENTS (decl) = t;
1588 if (!task_copy)
1589 ctx->receiver_decl = t;
1590 else
1591 {
1592 t = build_decl (PARM_DECL, get_identifier (".omp_data_o"),
1593 ptr_type_node);
1594 DECL_ARTIFICIAL (t) = 1;
1595 DECL_ARG_TYPE (t) = ptr_type_node;
1596 DECL_CONTEXT (t) = current_function_decl;
1597 TREE_USED (t) = 1;
1598 TREE_ADDRESSABLE (t) = 1;
1599 TREE_CHAIN (t) = DECL_ARGUMENTS (decl);
1600 DECL_ARGUMENTS (decl) = t;
1601 }
1602
1603 /* Allocate memory for the function structure. The call to
1604 allocate_struct_function clobbers CFUN, so we need to restore
1605 it afterward. */
1606 push_struct_function (decl);
1607 DECL_SOURCE_LOCATION (decl) = gimple_location (ctx->stmt);
1608 cfun->function_end_locus = gimple_location (ctx->stmt);
1609 pop_cfun ();
1610 }
1611
1612
1613 /* Scan an OpenMP parallel directive. */
1614
1615 static void
1616 scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1617 {
1618 omp_context *ctx;
1619 tree name;
1620 gimple stmt = gsi_stmt (*gsi);
1621
1622 /* Ignore parallel directives with empty bodies, unless there
1623 are copyin clauses. */
1624 if (optimize > 0
1625 && empty_body_p (gimple_omp_body (stmt))
1626 && find_omp_clause (gimple_omp_parallel_clauses (stmt),
1627 OMP_CLAUSE_COPYIN) == NULL)
1628 {
1629 gsi_replace (gsi, gimple_build_nop (), false);
1630 return;
1631 }
1632
1633 ctx = new_omp_context (stmt, outer_ctx);
1634 if (taskreg_nesting_level > 1)
1635 ctx->is_nested = true;
1636 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1637 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1638 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1639 name = create_tmp_var_name (".omp_data_s");
1640 name = build_decl (TYPE_DECL, name, ctx->record_type);
1641 TYPE_NAME (ctx->record_type) = name;
1642 create_omp_child_function (ctx, false);
1643 gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
1644
1645 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx);
1646 scan_omp (gimple_omp_body (stmt), ctx);
1647
1648 if (TYPE_FIELDS (ctx->record_type) == NULL)
1649 ctx->record_type = ctx->receiver_decl = NULL;
1650 else
1651 {
1652 layout_type (ctx->record_type);
1653 fixup_child_record_type (ctx);
1654 }
1655 }
1656
1657 /* Scan an OpenMP task directive. */
1658
1659 static void
1660 scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1661 {
1662 omp_context *ctx;
1663 tree name, t;
1664 gimple stmt = gsi_stmt (*gsi);
1665
1666 /* Ignore task directives with empty bodies. */
1667 if (optimize > 0
1668 && empty_body_p (gimple_omp_body (stmt)))
1669 {
1670 gsi_replace (gsi, gimple_build_nop (), false);
1671 return;
1672 }
1673
1674 ctx = new_omp_context (stmt, outer_ctx);
1675 if (taskreg_nesting_level > 1)
1676 ctx->is_nested = true;
1677 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1678 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1679 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1680 name = create_tmp_var_name (".omp_data_s");
1681 name = build_decl (TYPE_DECL, name, ctx->record_type);
1682 TYPE_NAME (ctx->record_type) = name;
1683 create_omp_child_function (ctx, false);
1684 gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
1685
1686 scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
1687
1688 if (ctx->srecord_type)
1689 {
1690 name = create_tmp_var_name (".omp_data_a");
1691 name = build_decl (TYPE_DECL, name, ctx->srecord_type);
1692 TYPE_NAME (ctx->srecord_type) = name;
1693 create_omp_child_function (ctx, true);
1694 }
1695
1696 scan_omp (gimple_omp_body (stmt), ctx);
1697
1698 if (TYPE_FIELDS (ctx->record_type) == NULL)
1699 {
1700 ctx->record_type = ctx->receiver_decl = NULL;
1701 t = build_int_cst (long_integer_type_node, 0);
1702 gimple_omp_task_set_arg_size (stmt, t);
1703 t = build_int_cst (long_integer_type_node, 1);
1704 gimple_omp_task_set_arg_align (stmt, t);
1705 }
1706 else
1707 {
1708 tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
1709 /* Move VLA fields to the end. */
1710 p = &TYPE_FIELDS (ctx->record_type);
1711 while (*p)
1712 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
1713 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
1714 {
1715 *q = *p;
1716 *p = TREE_CHAIN (*p);
1717 TREE_CHAIN (*q) = NULL_TREE;
1718 q = &TREE_CHAIN (*q);
1719 }
1720 else
1721 p = &TREE_CHAIN (*p);
1722 *p = vla_fields;
1723 layout_type (ctx->record_type);
1724 fixup_child_record_type (ctx);
1725 if (ctx->srecord_type)
1726 layout_type (ctx->srecord_type);
1727 t = fold_convert (long_integer_type_node,
1728 TYPE_SIZE_UNIT (ctx->record_type));
1729 gimple_omp_task_set_arg_size (stmt, t);
1730 t = build_int_cst (long_integer_type_node,
1731 TYPE_ALIGN_UNIT (ctx->record_type));
1732 gimple_omp_task_set_arg_align (stmt, t);
1733 }
1734 }
1735
1736
1737 /* Scan an OpenMP loop directive. */
1738
1739 static void
1740 scan_omp_for (gimple stmt, omp_context *outer_ctx)
1741 {
1742 omp_context *ctx;
1743 size_t i;
1744
1745 ctx = new_omp_context (stmt, outer_ctx);
1746
1747 scan_sharing_clauses (gimple_omp_for_clauses (stmt), ctx);
1748
1749 scan_omp (gimple_omp_for_pre_body (stmt), ctx);
1750 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
1751 {
1752 scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx);
1753 scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx);
1754 scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx);
1755 scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx);
1756 }
1757 scan_omp (gimple_omp_body (stmt), ctx);
1758 }
1759
1760 /* Scan an OpenMP sections directive. */
1761
1762 static void
1763 scan_omp_sections (gimple stmt, omp_context *outer_ctx)
1764 {
1765 omp_context *ctx;
1766
1767 ctx = new_omp_context (stmt, outer_ctx);
1768 scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx);
1769 scan_omp (gimple_omp_body (stmt), ctx);
1770 }
1771
1772 /* Scan an OpenMP single directive. */
1773
1774 static void
1775 scan_omp_single (gimple stmt, omp_context *outer_ctx)
1776 {
1777 omp_context *ctx;
1778 tree name;
1779
1780 ctx = new_omp_context (stmt, outer_ctx);
1781 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1782 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1783 name = create_tmp_var_name (".omp_copy_s");
1784 name = build_decl (TYPE_DECL, name, ctx->record_type);
1785 TYPE_NAME (ctx->record_type) = name;
1786
1787 scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx);
1788 scan_omp (gimple_omp_body (stmt), ctx);
1789
1790 if (TYPE_FIELDS (ctx->record_type) == NULL)
1791 ctx->record_type = NULL;
1792 else
1793 layout_type (ctx->record_type);
1794 }
1795
1796
1797 /* Check OpenMP nesting restrictions. */
1798 static void
1799 check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
1800 {
1801 switch (gimple_code (stmt))
1802 {
1803 case GIMPLE_OMP_FOR:
1804 case GIMPLE_OMP_SECTIONS:
1805 case GIMPLE_OMP_SINGLE:
1806 case GIMPLE_CALL:
1807 for (; ctx != NULL; ctx = ctx->outer)
1808 switch (gimple_code (ctx->stmt))
1809 {
1810 case GIMPLE_OMP_FOR:
1811 case GIMPLE_OMP_SECTIONS:
1812 case GIMPLE_OMP_SINGLE:
1813 case GIMPLE_OMP_ORDERED:
1814 case GIMPLE_OMP_MASTER:
1815 case GIMPLE_OMP_TASK:
1816 if (is_gimple_call (stmt))
1817 {
1818 warning (0, "barrier region may not be closely nested inside "
1819 "of work-sharing, critical, ordered, master or "
1820 "explicit task region");
1821 return;
1822 }
1823 warning (0, "work-sharing region may not be closely nested inside "
1824 "of work-sharing, critical, ordered, master or explicit "
1825 "task region");
1826 return;
1827 case GIMPLE_OMP_PARALLEL:
1828 return;
1829 default:
1830 break;
1831 }
1832 break;
1833 case GIMPLE_OMP_MASTER:
1834 for (; ctx != NULL; ctx = ctx->outer)
1835 switch (gimple_code (ctx->stmt))
1836 {
1837 case GIMPLE_OMP_FOR:
1838 case GIMPLE_OMP_SECTIONS:
1839 case GIMPLE_OMP_SINGLE:
1840 case GIMPLE_OMP_TASK:
1841 warning (0, "master region may not be closely nested inside "
1842 "of work-sharing or explicit task region");
1843 return;
1844 case GIMPLE_OMP_PARALLEL:
1845 return;
1846 default:
1847 break;
1848 }
1849 break;
1850 case GIMPLE_OMP_ORDERED:
1851 for (; ctx != NULL; ctx = ctx->outer)
1852 switch (gimple_code (ctx->stmt))
1853 {
1854 case GIMPLE_OMP_CRITICAL:
1855 case GIMPLE_OMP_TASK:
1856 warning (0, "ordered region may not be closely nested inside "
1857 "of critical or explicit task region");
1858 return;
1859 case GIMPLE_OMP_FOR:
1860 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
1861 OMP_CLAUSE_ORDERED) == NULL)
1862 warning (0, "ordered region must be closely nested inside "
1863 "a loop region with an ordered clause");
1864 return;
1865 case GIMPLE_OMP_PARALLEL:
1866 return;
1867 default:
1868 break;
1869 }
1870 break;
1871 case GIMPLE_OMP_CRITICAL:
1872 for (; ctx != NULL; ctx = ctx->outer)
1873 if (gimple_code (ctx->stmt) == GIMPLE_OMP_CRITICAL
1874 && (gimple_omp_critical_name (stmt)
1875 == gimple_omp_critical_name (ctx->stmt)))
1876 {
1877 warning (0, "critical region may not be nested inside a critical "
1878 "region with the same name");
1879 return;
1880 }
1881 break;
1882 default:
1883 break;
1884 }
1885 }
1886
1887
1888 /* Helper function scan_omp.
1889
1890 Callback for walk_tree or operators in walk_gimple_stmt used to
1891 scan for OpenMP directives in TP. */
1892
1893 static tree
1894 scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
1895 {
1896 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
1897 omp_context *ctx = (omp_context *) wi->info;
1898 tree t = *tp;
1899
1900 switch (TREE_CODE (t))
1901 {
1902 case VAR_DECL:
1903 case PARM_DECL:
1904 case LABEL_DECL:
1905 case RESULT_DECL:
1906 if (ctx)
1907 *tp = remap_decl (t, &ctx->cb);
1908 break;
1909
1910 default:
1911 if (ctx && TYPE_P (t))
1912 *tp = remap_type (t, &ctx->cb);
1913 else if (!DECL_P (t))
1914 *walk_subtrees = 1;
1915 break;
1916 }
1917
1918 return NULL_TREE;
1919 }
1920
1921
1922 /* Helper function for scan_omp.
1923
1924 Callback for walk_gimple_stmt used to scan for OpenMP directives in
1925 the current statement in GSI. */
1926
1927 static tree
1928 scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
1929 struct walk_stmt_info *wi)
1930 {
1931 gimple stmt = gsi_stmt (*gsi);
1932 omp_context *ctx = (omp_context *) wi->info;
1933
1934 if (gimple_has_location (stmt))
1935 input_location = gimple_location (stmt);
1936
1937 /* Check the OpenMP nesting restrictions. */
1938 if (ctx != NULL)
1939 {
1940 if (is_gimple_omp (stmt))
1941 check_omp_nesting_restrictions (stmt, ctx);
1942 else if (is_gimple_call (stmt))
1943 {
1944 tree fndecl = gimple_call_fndecl (stmt);
1945 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
1946 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
1947 check_omp_nesting_restrictions (stmt, ctx);
1948 }
1949 }
1950
1951 *handled_ops_p = true;
1952
1953 switch (gimple_code (stmt))
1954 {
1955 case GIMPLE_OMP_PARALLEL:
1956 taskreg_nesting_level++;
1957 scan_omp_parallel (gsi, ctx);
1958 taskreg_nesting_level--;
1959 break;
1960
1961 case GIMPLE_OMP_TASK:
1962 taskreg_nesting_level++;
1963 scan_omp_task (gsi, ctx);
1964 taskreg_nesting_level--;
1965 break;
1966
1967 case GIMPLE_OMP_FOR:
1968 scan_omp_for (stmt, ctx);
1969 break;
1970
1971 case GIMPLE_OMP_SECTIONS:
1972 scan_omp_sections (stmt, ctx);
1973 break;
1974
1975 case GIMPLE_OMP_SINGLE:
1976 scan_omp_single (stmt, ctx);
1977 break;
1978
1979 case GIMPLE_OMP_SECTION:
1980 case GIMPLE_OMP_MASTER:
1981 case GIMPLE_OMP_ORDERED:
1982 case GIMPLE_OMP_CRITICAL:
1983 ctx = new_omp_context (stmt, ctx);
1984 scan_omp (gimple_omp_body (stmt), ctx);
1985 break;
1986
1987 case GIMPLE_BIND:
1988 {
1989 tree var;
1990
1991 *handled_ops_p = false;
1992 if (ctx)
1993 for (var = gimple_bind_vars (stmt); var ; var = TREE_CHAIN (var))
1994 insert_decl_map (&ctx->cb, var, var);
1995 }
1996 break;
1997 default:
1998 *handled_ops_p = false;
1999 break;
2000 }
2001
2002 return NULL_TREE;
2003 }
2004
2005
2006 /* Scan all the statements starting at the current statement. CTX
2007 contains context information about the OpenMP directives and
2008 clauses found during the scan. */
2009
2010 static void
2011 scan_omp (gimple_seq body, omp_context *ctx)
2012 {
2013 location_t saved_location;
2014 struct walk_stmt_info wi;
2015
2016 memset (&wi, 0, sizeof (wi));
2017 wi.info = ctx;
2018 wi.want_locations = true;
2019
2020 saved_location = input_location;
2021 walk_gimple_seq (body, scan_omp_1_stmt, scan_omp_1_op, &wi);
2022 input_location = saved_location;
2023 }
2024 \f
2025 /* Re-gimplification and code generation routines. */
2026
2027 /* Build a call to GOMP_barrier. */
2028
2029 static tree
2030 build_omp_barrier (void)
2031 {
2032 return build_call_expr (built_in_decls[BUILT_IN_GOMP_BARRIER], 0);
2033 }
2034
2035 /* If a context was created for STMT when it was scanned, return it. */
2036
2037 static omp_context *
2038 maybe_lookup_ctx (gimple stmt)
2039 {
2040 splay_tree_node n;
2041 n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
2042 return n ? (omp_context *) n->value : NULL;
2043 }
2044
2045
2046 /* Find the mapping for DECL in CTX or the immediately enclosing
2047 context that has a mapping for DECL.
2048
2049 If CTX is a nested parallel directive, we may have to use the decl
2050 mappings created in CTX's parent context. Suppose that we have the
2051 following parallel nesting (variable UIDs showed for clarity):
2052
2053 iD.1562 = 0;
2054 #omp parallel shared(iD.1562) -> outer parallel
2055 iD.1562 = iD.1562 + 1;
2056
2057 #omp parallel shared (iD.1562) -> inner parallel
2058 iD.1562 = iD.1562 - 1;
2059
2060 Each parallel structure will create a distinct .omp_data_s structure
2061 for copying iD.1562 in/out of the directive:
2062
2063 outer parallel .omp_data_s.1.i -> iD.1562
2064 inner parallel .omp_data_s.2.i -> iD.1562
2065
2066 A shared variable mapping will produce a copy-out operation before
2067 the parallel directive and a copy-in operation after it. So, in
2068 this case we would have:
2069
2070 iD.1562 = 0;
2071 .omp_data_o.1.i = iD.1562;
2072 #omp parallel shared(iD.1562) -> outer parallel
2073 .omp_data_i.1 = &.omp_data_o.1
2074 .omp_data_i.1->i = .omp_data_i.1->i + 1;
2075
2076 .omp_data_o.2.i = iD.1562; -> **
2077 #omp parallel shared(iD.1562) -> inner parallel
2078 .omp_data_i.2 = &.omp_data_o.2
2079 .omp_data_i.2->i = .omp_data_i.2->i - 1;
2080
2081
2082 ** This is a problem. The symbol iD.1562 cannot be referenced
2083 inside the body of the outer parallel region. But since we are
2084 emitting this copy operation while expanding the inner parallel
2085 directive, we need to access the CTX structure of the outer
2086 parallel directive to get the correct mapping:
2087
2088 .omp_data_o.2.i = .omp_data_i.1->i
2089
2090 Since there may be other workshare or parallel directives enclosing
2091 the parallel directive, it may be necessary to walk up the context
2092 parent chain. This is not a problem in general because nested
2093 parallelism happens only rarely. */
2094
2095 static tree
2096 lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2097 {
2098 tree t;
2099 omp_context *up;
2100
2101 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2102 t = maybe_lookup_decl (decl, up);
2103
2104 gcc_assert (!ctx->is_nested || t || is_global_var (decl));
2105
2106 return t ? t : decl;
2107 }
2108
2109
2110 /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
2111 in outer contexts. */
2112
2113 static tree
2114 maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2115 {
2116 tree t = NULL;
2117 omp_context *up;
2118
2119 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2120 t = maybe_lookup_decl (decl, up);
2121
2122 return t ? t : decl;
2123 }
2124
2125
2126 /* Construct the initialization value for reduction CLAUSE. */
2127
2128 tree
2129 omp_reduction_init (tree clause, tree type)
2130 {
2131 switch (OMP_CLAUSE_REDUCTION_CODE (clause))
2132 {
2133 case PLUS_EXPR:
2134 case MINUS_EXPR:
2135 case BIT_IOR_EXPR:
2136 case BIT_XOR_EXPR:
2137 case TRUTH_OR_EXPR:
2138 case TRUTH_ORIF_EXPR:
2139 case TRUTH_XOR_EXPR:
2140 case NE_EXPR:
2141 return fold_convert (type, integer_zero_node);
2142
2143 case MULT_EXPR:
2144 case TRUTH_AND_EXPR:
2145 case TRUTH_ANDIF_EXPR:
2146 case EQ_EXPR:
2147 return fold_convert (type, integer_one_node);
2148
2149 case BIT_AND_EXPR:
2150 return fold_convert (type, integer_minus_one_node);
2151
2152 case MAX_EXPR:
2153 if (SCALAR_FLOAT_TYPE_P (type))
2154 {
2155 REAL_VALUE_TYPE max, min;
2156 if (HONOR_INFINITIES (TYPE_MODE (type)))
2157 {
2158 real_inf (&max);
2159 real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
2160 }
2161 else
2162 real_maxval (&min, 1, TYPE_MODE (type));
2163 return build_real (type, min);
2164 }
2165 else
2166 {
2167 gcc_assert (INTEGRAL_TYPE_P (type));
2168 return TYPE_MIN_VALUE (type);
2169 }
2170
2171 case MIN_EXPR:
2172 if (SCALAR_FLOAT_TYPE_P (type))
2173 {
2174 REAL_VALUE_TYPE max;
2175 if (HONOR_INFINITIES (TYPE_MODE (type)))
2176 real_inf (&max);
2177 else
2178 real_maxval (&max, 0, TYPE_MODE (type));
2179 return build_real (type, max);
2180 }
2181 else
2182 {
2183 gcc_assert (INTEGRAL_TYPE_P (type));
2184 return TYPE_MAX_VALUE (type);
2185 }
2186
2187 default:
2188 gcc_unreachable ();
2189 }
2190 }
2191
2192 /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
2193 from the receiver (aka child) side and initializers for REFERENCE_TYPE
2194 private variables. Initialization statements go in ILIST, while calls
2195 to destructors go in DLIST. */
2196
2197 static void
2198 lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
2199 omp_context *ctx)
2200 {
2201 gimple_stmt_iterator diter;
2202 tree c, dtor, copyin_seq, x, ptr;
2203 bool copyin_by_ref = false;
2204 bool lastprivate_firstprivate = false;
2205 int pass;
2206
2207 *dlist = gimple_seq_alloc ();
2208 diter = gsi_start (*dlist);
2209 copyin_seq = NULL;
2210
2211 /* Do all the fixed sized types in the first pass, and the variable sized
2212 types in the second pass. This makes sure that the scalar arguments to
2213 the variable sized types are processed before we use them in the
2214 variable sized operations. */
2215 for (pass = 0; pass < 2; ++pass)
2216 {
2217 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2218 {
2219 enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
2220 tree var, new_var;
2221 bool by_ref;
2222
2223 switch (c_kind)
2224 {
2225 case OMP_CLAUSE_PRIVATE:
2226 if (OMP_CLAUSE_PRIVATE_DEBUG (c))
2227 continue;
2228 break;
2229 case OMP_CLAUSE_SHARED:
2230 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
2231 {
2232 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
2233 continue;
2234 }
2235 case OMP_CLAUSE_FIRSTPRIVATE:
2236 case OMP_CLAUSE_COPYIN:
2237 case OMP_CLAUSE_REDUCTION:
2238 break;
2239 case OMP_CLAUSE_LASTPRIVATE:
2240 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2241 {
2242 lastprivate_firstprivate = true;
2243 if (pass != 0)
2244 continue;
2245 }
2246 break;
2247 default:
2248 continue;
2249 }
2250
2251 new_var = var = OMP_CLAUSE_DECL (c);
2252 if (c_kind != OMP_CLAUSE_COPYIN)
2253 new_var = lookup_decl (var, ctx);
2254
2255 if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
2256 {
2257 if (pass != 0)
2258 continue;
2259 }
2260 else if (is_variable_sized (var))
2261 {
2262 /* For variable sized types, we need to allocate the
2263 actual storage here. Call alloca and store the
2264 result in the pointer decl that we created elsewhere. */
2265 if (pass == 0)
2266 continue;
2267
2268 if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
2269 {
2270 gimple stmt;
2271 tree tmp;
2272
2273 ptr = DECL_VALUE_EXPR (new_var);
2274 gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
2275 ptr = TREE_OPERAND (ptr, 0);
2276 gcc_assert (DECL_P (ptr));
2277 x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
2278
2279 /* void *tmp = __builtin_alloca */
2280 stmt
2281 = gimple_build_call (built_in_decls[BUILT_IN_ALLOCA], 1, x);
2282 tmp = create_tmp_var_raw (ptr_type_node, NULL);
2283 gimple_add_tmp_var (tmp);
2284 gimple_call_set_lhs (stmt, tmp);
2285
2286 gimple_seq_add_stmt (ilist, stmt);
2287
2288 x = fold_convert (TREE_TYPE (ptr), tmp);
2289 gimplify_assign (ptr, x, ilist);
2290 }
2291 }
2292 else if (is_reference (var))
2293 {
2294 /* For references that are being privatized for Fortran,
2295 allocate new backing storage for the new pointer
2296 variable. This allows us to avoid changing all the
2297 code that expects a pointer to something that expects
2298 a direct variable. Note that this doesn't apply to
2299 C++, since reference types are disallowed in data
2300 sharing clauses there, except for NRV optimized
2301 return values. */
2302 if (pass == 0)
2303 continue;
2304
2305 x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
2306 if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
2307 {
2308 x = build_receiver_ref (var, false, ctx);
2309 x = build_fold_addr_expr (x);
2310 }
2311 else if (TREE_CONSTANT (x))
2312 {
2313 const char *name = NULL;
2314 if (DECL_NAME (var))
2315 name = IDENTIFIER_POINTER (DECL_NAME (new_var));
2316
2317 x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
2318 name);
2319 gimple_add_tmp_var (x);
2320 TREE_ADDRESSABLE (x) = 1;
2321 x = build_fold_addr_expr_with_type (x, TREE_TYPE (new_var));
2322 }
2323 else
2324 {
2325 x = build_call_expr (built_in_decls[BUILT_IN_ALLOCA], 1, x);
2326 x = fold_convert (TREE_TYPE (new_var), x);
2327 }
2328
2329 gimplify_assign (new_var, x, ilist);
2330
2331 new_var = build_fold_indirect_ref (new_var);
2332 }
2333 else if (c_kind == OMP_CLAUSE_REDUCTION
2334 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2335 {
2336 if (pass == 0)
2337 continue;
2338 }
2339 else if (pass != 0)
2340 continue;
2341
2342 switch (OMP_CLAUSE_CODE (c))
2343 {
2344 case OMP_CLAUSE_SHARED:
2345 /* Shared global vars are just accessed directly. */
2346 if (is_global_var (new_var))
2347 break;
2348 /* Set up the DECL_VALUE_EXPR for shared variables now. This
2349 needs to be delayed until after fixup_child_record_type so
2350 that we get the correct type during the dereference. */
2351 by_ref = use_pointer_for_field (var, ctx);
2352 x = build_receiver_ref (var, by_ref, ctx);
2353 SET_DECL_VALUE_EXPR (new_var, x);
2354 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2355
2356 /* ??? If VAR is not passed by reference, and the variable
2357 hasn't been initialized yet, then we'll get a warning for
2358 the store into the omp_data_s structure. Ideally, we'd be
2359 able to notice this and not store anything at all, but
2360 we're generating code too early. Suppress the warning. */
2361 if (!by_ref)
2362 TREE_NO_WARNING (var) = 1;
2363 break;
2364
2365 case OMP_CLAUSE_LASTPRIVATE:
2366 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2367 break;
2368 /* FALLTHRU */
2369
2370 case OMP_CLAUSE_PRIVATE:
2371 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE)
2372 x = build_outer_var_ref (var, ctx);
2373 else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2374 {
2375 if (is_task_ctx (ctx))
2376 x = build_receiver_ref (var, false, ctx);
2377 else
2378 x = build_outer_var_ref (var, ctx);
2379 }
2380 else
2381 x = NULL;
2382 x = lang_hooks.decls.omp_clause_default_ctor (c, new_var, x);
2383 if (x)
2384 gimplify_and_add (x, ilist);
2385 /* FALLTHRU */
2386
2387 do_dtor:
2388 x = lang_hooks.decls.omp_clause_dtor (c, new_var);
2389 if (x)
2390 {
2391 gimple_seq tseq = NULL;
2392
2393 dtor = x;
2394 gimplify_stmt (&dtor, &tseq);
2395 gsi_insert_seq_before (&diter, tseq, GSI_SAME_STMT);
2396 }
2397 break;
2398
2399 case OMP_CLAUSE_FIRSTPRIVATE:
2400 if (is_task_ctx (ctx))
2401 {
2402 if (is_reference (var) || is_variable_sized (var))
2403 goto do_dtor;
2404 else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var,
2405 ctx))
2406 || use_pointer_for_field (var, NULL))
2407 {
2408 x = build_receiver_ref (var, false, ctx);
2409 SET_DECL_VALUE_EXPR (new_var, x);
2410 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2411 goto do_dtor;
2412 }
2413 }
2414 x = build_outer_var_ref (var, ctx);
2415 x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x);
2416 gimplify_and_add (x, ilist);
2417 goto do_dtor;
2418 break;
2419
2420 case OMP_CLAUSE_COPYIN:
2421 by_ref = use_pointer_for_field (var, NULL);
2422 x = build_receiver_ref (var, by_ref, ctx);
2423 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
2424 append_to_statement_list (x, &copyin_seq);
2425 copyin_by_ref |= by_ref;
2426 break;
2427
2428 case OMP_CLAUSE_REDUCTION:
2429 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2430 {
2431 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2432 x = build_outer_var_ref (var, ctx);
2433
2434 if (is_reference (var))
2435 x = build_fold_addr_expr (x);
2436 SET_DECL_VALUE_EXPR (placeholder, x);
2437 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2438 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
2439 gimple_seq_add_seq (ilist,
2440 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c));
2441 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
2442 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
2443 }
2444 else
2445 {
2446 x = omp_reduction_init (c, TREE_TYPE (new_var));
2447 gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
2448 gimplify_assign (new_var, x, ilist);
2449 }
2450 break;
2451
2452 default:
2453 gcc_unreachable ();
2454 }
2455 }
2456 }
2457
2458 /* The copyin sequence is not to be executed by the main thread, since
2459 that would result in self-copies. Perhaps not visible to scalars,
2460 but it certainly is to C++ operator=. */
2461 if (copyin_seq)
2462 {
2463 x = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
2464 x = build2 (NE_EXPR, boolean_type_node, x,
2465 build_int_cst (TREE_TYPE (x), 0));
2466 x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
2467 gimplify_and_add (x, ilist);
2468 }
2469
2470 /* If any copyin variable is passed by reference, we must ensure the
2471 master thread doesn't modify it before it is copied over in all
2472 threads. Similarly for variables in both firstprivate and
2473 lastprivate clauses we need to ensure the lastprivate copying
2474 happens after firstprivate copying in all threads. */
2475 if (copyin_by_ref || lastprivate_firstprivate)
2476 gimplify_and_add (build_omp_barrier (), ilist);
2477 }
2478
2479
2480 /* Generate code to implement the LASTPRIVATE clauses. This is used for
2481 both parallel and workshare constructs. PREDICATE may be NULL if it's
2482 always true. */
2483
2484 static void
2485 lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *stmt_list,
2486 omp_context *ctx)
2487 {
2488 tree x, c, label = NULL;
2489 bool par_clauses = false;
2490
2491 /* Early exit if there are no lastprivate clauses. */
2492 clauses = find_omp_clause (clauses, OMP_CLAUSE_LASTPRIVATE);
2493 if (clauses == NULL)
2494 {
2495 /* If this was a workshare clause, see if it had been combined
2496 with its parallel. In that case, look for the clauses on the
2497 parallel statement itself. */
2498 if (is_parallel_ctx (ctx))
2499 return;
2500
2501 ctx = ctx->outer;
2502 if (ctx == NULL || !is_parallel_ctx (ctx))
2503 return;
2504
2505 clauses = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2506 OMP_CLAUSE_LASTPRIVATE);
2507 if (clauses == NULL)
2508 return;
2509 par_clauses = true;
2510 }
2511
2512 if (predicate)
2513 {
2514 gimple stmt;
2515 tree label_true, arm1, arm2;
2516
2517 label = create_artificial_label ();
2518 label_true = create_artificial_label ();
2519 arm1 = TREE_OPERAND (predicate, 0);
2520 arm2 = TREE_OPERAND (predicate, 1);
2521 gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
2522 gimplify_expr (&arm2, stmt_list, NULL, is_gimple_val, fb_rvalue);
2523 stmt = gimple_build_cond (TREE_CODE (predicate), arm1, arm2,
2524 label_true, label);
2525 gimple_seq_add_stmt (stmt_list, stmt);
2526 gimple_seq_add_stmt (stmt_list, gimple_build_label (label_true));
2527 }
2528
2529 for (c = clauses; c ;)
2530 {
2531 tree var, new_var;
2532
2533 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
2534 {
2535 var = OMP_CLAUSE_DECL (c);
2536 new_var = lookup_decl (var, ctx);
2537
2538 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
2539 {
2540 lower_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
2541 gimple_seq_add_seq (stmt_list,
2542 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
2543 }
2544 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) = NULL;
2545
2546 x = build_outer_var_ref (var, ctx);
2547 if (is_reference (var))
2548 new_var = build_fold_indirect_ref (new_var);
2549 x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
2550 gimplify_and_add (x, stmt_list);
2551 }
2552 c = OMP_CLAUSE_CHAIN (c);
2553 if (c == NULL && !par_clauses)
2554 {
2555 /* If this was a workshare clause, see if it had been combined
2556 with its parallel. In that case, continue looking for the
2557 clauses also on the parallel statement itself. */
2558 if (is_parallel_ctx (ctx))
2559 break;
2560
2561 ctx = ctx->outer;
2562 if (ctx == NULL || !is_parallel_ctx (ctx))
2563 break;
2564
2565 c = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2566 OMP_CLAUSE_LASTPRIVATE);
2567 par_clauses = true;
2568 }
2569 }
2570
2571 if (label)
2572 gimple_seq_add_stmt (stmt_list, gimple_build_label (label));
2573 }
2574
2575
2576 /* Generate code to implement the REDUCTION clauses. */
2577
2578 static void
2579 lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
2580 {
2581 gimple_seq sub_seq = NULL;
2582 gimple stmt;
2583 tree x, c;
2584 int count = 0;
2585
2586 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
2587 update in that case, otherwise use a lock. */
2588 for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
2589 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
2590 {
2591 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2592 {
2593 /* Never use OMP_ATOMIC for array reductions. */
2594 count = -1;
2595 break;
2596 }
2597 count++;
2598 }
2599
2600 if (count == 0)
2601 return;
2602
2603 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2604 {
2605 tree var, ref, new_var;
2606 enum tree_code code;
2607
2608 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
2609 continue;
2610
2611 var = OMP_CLAUSE_DECL (c);
2612 new_var = lookup_decl (var, ctx);
2613 if (is_reference (var))
2614 new_var = build_fold_indirect_ref (new_var);
2615 ref = build_outer_var_ref (var, ctx);
2616 code = OMP_CLAUSE_REDUCTION_CODE (c);
2617
2618 /* reduction(-:var) sums up the partial results, so it acts
2619 identically to reduction(+:var). */
2620 if (code == MINUS_EXPR)
2621 code = PLUS_EXPR;
2622
2623 if (count == 1)
2624 {
2625 tree addr = build_fold_addr_expr (ref);
2626
2627 addr = save_expr (addr);
2628 ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
2629 x = fold_build2 (code, TREE_TYPE (ref), ref, new_var);
2630 x = build2 (OMP_ATOMIC, void_type_node, addr, x);
2631 gimplify_and_add (x, stmt_seqp);
2632 return;
2633 }
2634
2635 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2636 {
2637 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2638
2639 if (is_reference (var))
2640 ref = build_fold_addr_expr (ref);
2641 SET_DECL_VALUE_EXPR (placeholder, ref);
2642 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2643 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
2644 gimple_seq_add_seq (&sub_seq, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
2645 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
2646 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
2647 }
2648 else
2649 {
2650 x = build2 (code, TREE_TYPE (ref), ref, new_var);
2651 ref = build_outer_var_ref (var, ctx);
2652 gimplify_assign (ref, x, &sub_seq);
2653 }
2654 }
2655
2656 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ATOMIC_START], 0);
2657 gimple_seq_add_stmt (stmt_seqp, stmt);
2658
2659 gimple_seq_add_seq (stmt_seqp, sub_seq);
2660
2661 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ATOMIC_END], 0);
2662 gimple_seq_add_stmt (stmt_seqp, stmt);
2663 }
2664
2665
2666 /* Generate code to implement the COPYPRIVATE clauses. */
2667
2668 static void
2669 lower_copyprivate_clauses (tree clauses, gimple_seq *slist, gimple_seq *rlist,
2670 omp_context *ctx)
2671 {
2672 tree c;
2673
2674 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2675 {
2676 tree var, ref, x;
2677 bool by_ref;
2678
2679 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
2680 continue;
2681
2682 var = OMP_CLAUSE_DECL (c);
2683 by_ref = use_pointer_for_field (var, NULL);
2684
2685 ref = build_sender_ref (var, ctx);
2686 x = lookup_decl_in_outer_ctx (var, ctx);
2687 x = by_ref ? build_fold_addr_expr (x) : x;
2688 gimplify_assign (ref, x, slist);
2689
2690 ref = build_receiver_ref (var, by_ref, ctx);
2691 if (is_reference (var))
2692 {
2693 ref = build_fold_indirect_ref (ref);
2694 var = build_fold_indirect_ref (var);
2695 }
2696 x = lang_hooks.decls.omp_clause_assign_op (c, var, ref);
2697 gimplify_and_add (x, rlist);
2698 }
2699 }
2700
2701
2702 /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
2703 and REDUCTION from the sender (aka parent) side. */
2704
2705 static void
2706 lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
2707 omp_context *ctx)
2708 {
2709 tree c;
2710
2711 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2712 {
2713 tree val, ref, x, var;
2714 bool by_ref, do_in = false, do_out = false;
2715
2716 switch (OMP_CLAUSE_CODE (c))
2717 {
2718 case OMP_CLAUSE_PRIVATE:
2719 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2720 break;
2721 continue;
2722 case OMP_CLAUSE_FIRSTPRIVATE:
2723 case OMP_CLAUSE_COPYIN:
2724 case OMP_CLAUSE_LASTPRIVATE:
2725 case OMP_CLAUSE_REDUCTION:
2726 break;
2727 default:
2728 continue;
2729 }
2730
2731 val = OMP_CLAUSE_DECL (c);
2732 var = lookup_decl_in_outer_ctx (val, ctx);
2733
2734 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
2735 && is_global_var (var))
2736 continue;
2737 if (is_variable_sized (val))
2738 continue;
2739 by_ref = use_pointer_for_field (val, NULL);
2740
2741 switch (OMP_CLAUSE_CODE (c))
2742 {
2743 case OMP_CLAUSE_PRIVATE:
2744 case OMP_CLAUSE_FIRSTPRIVATE:
2745 case OMP_CLAUSE_COPYIN:
2746 do_in = true;
2747 break;
2748
2749 case OMP_CLAUSE_LASTPRIVATE:
2750 if (by_ref || is_reference (val))
2751 {
2752 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2753 continue;
2754 do_in = true;
2755 }
2756 else
2757 {
2758 do_out = true;
2759 if (lang_hooks.decls.omp_private_outer_ref (val))
2760 do_in = true;
2761 }
2762 break;
2763
2764 case OMP_CLAUSE_REDUCTION:
2765 do_in = true;
2766 do_out = !(by_ref || is_reference (val));
2767 break;
2768
2769 default:
2770 gcc_unreachable ();
2771 }
2772
2773 if (do_in)
2774 {
2775 ref = build_sender_ref (val, ctx);
2776 x = by_ref ? build_fold_addr_expr (var) : var;
2777 gimplify_assign (ref, x, ilist);
2778 if (is_task_ctx (ctx))
2779 DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL;
2780 }
2781
2782 if (do_out)
2783 {
2784 ref = build_sender_ref (val, ctx);
2785 gimplify_assign (var, ref, olist);
2786 }
2787 }
2788 }
2789
2790 /* Generate code to implement SHARED from the sender (aka parent)
2791 side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
2792 list things that got automatically shared. */
2793
2794 static void
2795 lower_send_shared_vars (gimple_seq *ilist, gimple_seq *olist, omp_context *ctx)
2796 {
2797 tree var, ovar, nvar, f, x, record_type;
2798
2799 if (ctx->record_type == NULL)
2800 return;
2801
2802 record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type;
2803 for (f = TYPE_FIELDS (record_type); f ; f = TREE_CHAIN (f))
2804 {
2805 ovar = DECL_ABSTRACT_ORIGIN (f);
2806 nvar = maybe_lookup_decl (ovar, ctx);
2807 if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
2808 continue;
2809
2810 /* If CTX is a nested parallel directive. Find the immediately
2811 enclosing parallel or workshare construct that contains a
2812 mapping for OVAR. */
2813 var = lookup_decl_in_outer_ctx (ovar, ctx);
2814
2815 if (use_pointer_for_field (ovar, ctx))
2816 {
2817 x = build_sender_ref (ovar, ctx);
2818 var = build_fold_addr_expr (var);
2819 gimplify_assign (x, var, ilist);
2820 }
2821 else
2822 {
2823 x = build_sender_ref (ovar, ctx);
2824 gimplify_assign (x, var, ilist);
2825
2826 if (!TREE_READONLY (var)
2827 /* We don't need to receive a new reference to a result
2828 or parm decl. In fact we may not store to it as we will
2829 invalidate any pending RSO and generate wrong gimple
2830 during inlining. */
2831 && !((TREE_CODE (var) == RESULT_DECL
2832 || TREE_CODE (var) == PARM_DECL)
2833 && DECL_BY_REFERENCE (var)))
2834 {
2835 x = build_sender_ref (ovar, ctx);
2836 gimplify_assign (var, x, olist);
2837 }
2838 }
2839 }
2840 }
2841
2842
2843 /* A convenience function to build an empty GIMPLE_COND with just the
2844 condition. */
2845
2846 static gimple
2847 gimple_build_cond_empty (tree cond)
2848 {
2849 enum tree_code pred_code;
2850 tree lhs, rhs;
2851
2852 gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs);
2853 return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE);
2854 }
2855
2856
2857 /* Build the function calls to GOMP_parallel_start etc to actually
2858 generate the parallel operation. REGION is the parallel region
2859 being expanded. BB is the block where to insert the code. WS_ARGS
2860 will be set if this is a call to a combined parallel+workshare
2861 construct, it contains the list of additional arguments needed by
2862 the workshare construct. */
2863
2864 static void
2865 expand_parallel_call (struct omp_region *region, basic_block bb,
2866 gimple entry_stmt, tree ws_args)
2867 {
2868 tree t, t1, t2, val, cond, c, clauses;
2869 gimple_stmt_iterator gsi;
2870 gimple stmt;
2871 int start_ix;
2872
2873 clauses = gimple_omp_parallel_clauses (entry_stmt);
2874
2875 /* Determine what flavor of GOMP_parallel_start we will be
2876 emitting. */
2877 start_ix = BUILT_IN_GOMP_PARALLEL_START;
2878 if (is_combined_parallel (region))
2879 {
2880 switch (region->inner->type)
2881 {
2882 case GIMPLE_OMP_FOR:
2883 gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
2884 start_ix = BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START
2885 + (region->inner->sched_kind
2886 == OMP_CLAUSE_SCHEDULE_RUNTIME
2887 ? 3 : region->inner->sched_kind);
2888 break;
2889 case GIMPLE_OMP_SECTIONS:
2890 start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS_START;
2891 break;
2892 default:
2893 gcc_unreachable ();
2894 }
2895 }
2896
2897 /* By default, the value of NUM_THREADS is zero (selected at run time)
2898 and there is no conditional. */
2899 cond = NULL_TREE;
2900 val = build_int_cst (unsigned_type_node, 0);
2901
2902 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
2903 if (c)
2904 cond = OMP_CLAUSE_IF_EXPR (c);
2905
2906 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
2907 if (c)
2908 val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
2909
2910 /* Ensure 'val' is of the correct type. */
2911 val = fold_convert (unsigned_type_node, val);
2912
2913 /* If we found the clause 'if (cond)', build either
2914 (cond != 0) or (cond ? val : 1u). */
2915 if (cond)
2916 {
2917 gimple_stmt_iterator gsi;
2918
2919 cond = gimple_boolify (cond);
2920
2921 if (integer_zerop (val))
2922 val = fold_build2 (EQ_EXPR, unsigned_type_node, cond,
2923 build_int_cst (TREE_TYPE (cond), 0));
2924 else
2925 {
2926 basic_block cond_bb, then_bb, else_bb;
2927 edge e, e_then, e_else;
2928 tree tmp_then, tmp_else, tmp_join, tmp_var;
2929
2930 tmp_var = create_tmp_var (TREE_TYPE (val), NULL);
2931 if (gimple_in_ssa_p (cfun))
2932 {
2933 tmp_then = make_ssa_name (tmp_var, NULL);
2934 tmp_else = make_ssa_name (tmp_var, NULL);
2935 tmp_join = make_ssa_name (tmp_var, NULL);
2936 }
2937 else
2938 {
2939 tmp_then = tmp_var;
2940 tmp_else = tmp_var;
2941 tmp_join = tmp_var;
2942 }
2943
2944 e = split_block (bb, NULL);
2945 cond_bb = e->src;
2946 bb = e->dest;
2947 remove_edge (e);
2948
2949 then_bb = create_empty_bb (cond_bb);
2950 else_bb = create_empty_bb (then_bb);
2951 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
2952 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
2953
2954 stmt = gimple_build_cond_empty (cond);
2955 gsi = gsi_start_bb (cond_bb);
2956 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2957
2958 gsi = gsi_start_bb (then_bb);
2959 stmt = gimple_build_assign (tmp_then, val);
2960 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2961
2962 gsi = gsi_start_bb (else_bb);
2963 stmt = gimple_build_assign
2964 (tmp_else, build_int_cst (unsigned_type_node, 1));
2965 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2966
2967 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
2968 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
2969 e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
2970 e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
2971
2972 if (gimple_in_ssa_p (cfun))
2973 {
2974 gimple phi = create_phi_node (tmp_join, bb);
2975 SSA_NAME_DEF_STMT (tmp_join) = phi;
2976 add_phi_arg (phi, tmp_then, e_then);
2977 add_phi_arg (phi, tmp_else, e_else);
2978 }
2979
2980 val = tmp_join;
2981 }
2982
2983 gsi = gsi_start_bb (bb);
2984 val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE,
2985 false, GSI_CONTINUE_LINKING);
2986 }
2987
2988 gsi = gsi_last_bb (bb);
2989 t = gimple_omp_parallel_data_arg (entry_stmt);
2990 if (t == NULL)
2991 t1 = null_pointer_node;
2992 else
2993 t1 = build_fold_addr_expr (t);
2994 t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
2995
2996 if (ws_args)
2997 {
2998 tree args = tree_cons (NULL, t2,
2999 tree_cons (NULL, t1,
3000 tree_cons (NULL, val, ws_args)));
3001 t = build_function_call_expr (built_in_decls[start_ix], args);
3002 }
3003 else
3004 t = build_call_expr (built_in_decls[start_ix], 3, t2, t1, val);
3005
3006 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3007 false, GSI_CONTINUE_LINKING);
3008
3009 t = gimple_omp_parallel_data_arg (entry_stmt);
3010 if (t == NULL)
3011 t = null_pointer_node;
3012 else
3013 t = build_fold_addr_expr (t);
3014 t = build_call_expr (gimple_omp_parallel_child_fn (entry_stmt), 1, t);
3015 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3016 false, GSI_CONTINUE_LINKING);
3017
3018 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_PARALLEL_END], 0);
3019 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3020 false, GSI_CONTINUE_LINKING);
3021 }
3022
3023
3024 /* Build the function call to GOMP_task to actually
3025 generate the task operation. BB is the block where to insert the code. */
3026
3027 static void
3028 expand_task_call (basic_block bb, gimple entry_stmt)
3029 {
3030 tree t, t1, t2, t3, flags, cond, c, clauses;
3031 gimple_stmt_iterator gsi;
3032
3033 clauses = gimple_omp_task_clauses (entry_stmt);
3034
3035 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
3036 if (c)
3037 cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (c));
3038 else
3039 cond = boolean_true_node;
3040
3041 c = find_omp_clause (clauses, OMP_CLAUSE_UNTIED);
3042 flags = build_int_cst (unsigned_type_node, (c ? 1 : 0));
3043
3044 gsi = gsi_last_bb (bb);
3045 t = gimple_omp_task_data_arg (entry_stmt);
3046 if (t == NULL)
3047 t2 = null_pointer_node;
3048 else
3049 t2 = build_fold_addr_expr (t);
3050 t1 = build_fold_addr_expr (gimple_omp_task_child_fn (entry_stmt));
3051 t = gimple_omp_task_copy_fn (entry_stmt);
3052 if (t == NULL)
3053 t3 = null_pointer_node;
3054 else
3055 t3 = build_fold_addr_expr (t);
3056
3057 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_TASK], 7, t1, t2, t3,
3058 gimple_omp_task_arg_size (entry_stmt),
3059 gimple_omp_task_arg_align (entry_stmt), cond, flags);
3060
3061 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3062 false, GSI_CONTINUE_LINKING);
3063 }
3064
3065
3066 /* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
3067 catch handler and return it. This prevents programs from violating the
3068 structured block semantics with throws. */
3069
3070 static gimple_seq
3071 maybe_catch_exception (gimple_seq body)
3072 {
3073 gimple f, t;
3074
3075 if (!flag_exceptions)
3076 return body;
3077
3078 if (lang_protect_cleanup_actions)
3079 t = lang_protect_cleanup_actions ();
3080 else
3081 t = gimple_build_call (built_in_decls[BUILT_IN_TRAP], 0);
3082
3083 f = gimple_build_eh_filter (NULL, gimple_seq_alloc_with_stmt (t));
3084 gimple_eh_filter_set_must_not_throw (f, true);
3085
3086 t = gimple_build_try (body, gimple_seq_alloc_with_stmt (f),
3087 GIMPLE_TRY_CATCH);
3088
3089 return gimple_seq_alloc_with_stmt (t);
3090 }
3091
3092 /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
3093
3094 static tree
3095 list2chain (tree list)
3096 {
3097 tree t;
3098
3099 for (t = list; t; t = TREE_CHAIN (t))
3100 {
3101 tree var = TREE_VALUE (t);
3102 if (TREE_CHAIN (t))
3103 TREE_CHAIN (var) = TREE_VALUE (TREE_CHAIN (t));
3104 else
3105 TREE_CHAIN (var) = NULL_TREE;
3106 }
3107
3108 return list ? TREE_VALUE (list) : NULL_TREE;
3109 }
3110
3111
3112 /* Remove barriers in REGION->EXIT's block. Note that this is only
3113 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
3114 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
3115 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
3116 removed. */
3117
3118 static void
3119 remove_exit_barrier (struct omp_region *region)
3120 {
3121 gimple_stmt_iterator gsi;
3122 basic_block exit_bb;
3123 edge_iterator ei;
3124 edge e;
3125 gimple stmt;
3126 int any_addressable_vars = -1;
3127
3128 exit_bb = region->exit;
3129
3130 /* If the parallel region doesn't return, we don't have REGION->EXIT
3131 block at all. */
3132 if (! exit_bb)
3133 return;
3134
3135 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
3136 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
3137 statements that can appear in between are extremely limited -- no
3138 memory operations at all. Here, we allow nothing at all, so the
3139 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
3140 gsi = gsi_last_bb (exit_bb);
3141 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3142 gsi_prev (&gsi);
3143 if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
3144 return;
3145
3146 FOR_EACH_EDGE (e, ei, exit_bb->preds)
3147 {
3148 gsi = gsi_last_bb (e->src);
3149 if (gsi_end_p (gsi))
3150 continue;
3151 stmt = gsi_stmt (gsi);
3152 if (gimple_code (stmt) == GIMPLE_OMP_RETURN
3153 && !gimple_omp_return_nowait_p (stmt))
3154 {
3155 /* OpenMP 3.0 tasks unfortunately prevent this optimization
3156 in many cases. If there could be tasks queued, the barrier
3157 might be needed to let the tasks run before some local
3158 variable of the parallel that the task uses as shared
3159 runs out of scope. The task can be spawned either
3160 from within current function (this would be easy to check)
3161 or from some function it calls and gets passed an address
3162 of such a variable. */
3163 if (any_addressable_vars < 0)
3164 {
3165 gimple parallel_stmt = last_stmt (region->entry);
3166 tree child_fun = gimple_omp_parallel_child_fn (parallel_stmt);
3167 tree local_decls = DECL_STRUCT_FUNCTION (child_fun)->local_decls;
3168 tree block;
3169
3170 any_addressable_vars = 0;
3171 for (; local_decls; local_decls = TREE_CHAIN (local_decls))
3172 if (TREE_ADDRESSABLE (TREE_VALUE (local_decls)))
3173 {
3174 any_addressable_vars = 1;
3175 break;
3176 }
3177 for (block = gimple_block (stmt);
3178 !any_addressable_vars
3179 && block
3180 && TREE_CODE (block) == BLOCK;
3181 block = BLOCK_SUPERCONTEXT (block))
3182 {
3183 for (local_decls = BLOCK_VARS (block);
3184 local_decls;
3185 local_decls = TREE_CHAIN (local_decls))
3186 if (TREE_ADDRESSABLE (local_decls))
3187 {
3188 any_addressable_vars = 1;
3189 break;
3190 }
3191 if (block == gimple_block (parallel_stmt))
3192 break;
3193 }
3194 }
3195 if (!any_addressable_vars)
3196 gimple_omp_return_set_nowait (stmt);
3197 }
3198 }
3199 }
3200
3201 static void
3202 remove_exit_barriers (struct omp_region *region)
3203 {
3204 if (region->type == GIMPLE_OMP_PARALLEL)
3205 remove_exit_barrier (region);
3206
3207 if (region->inner)
3208 {
3209 region = region->inner;
3210 remove_exit_barriers (region);
3211 while (region->next)
3212 {
3213 region = region->next;
3214 remove_exit_barriers (region);
3215 }
3216 }
3217 }
3218
3219 /* Optimize omp_get_thread_num () and omp_get_num_threads ()
3220 calls. These can't be declared as const functions, but
3221 within one parallel body they are constant, so they can be
3222 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
3223 which are declared const. Similarly for task body, except
3224 that in untied task omp_get_thread_num () can change at any task
3225 scheduling point. */
3226
3227 static void
3228 optimize_omp_library_calls (gimple entry_stmt)
3229 {
3230 basic_block bb;
3231 gimple_stmt_iterator gsi;
3232 tree thr_num_id
3233 = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM]);
3234 tree num_thr_id
3235 = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS]);
3236 bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK
3237 && find_omp_clause (gimple_omp_task_clauses (entry_stmt),
3238 OMP_CLAUSE_UNTIED) != NULL);
3239
3240 FOR_EACH_BB (bb)
3241 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3242 {
3243 gimple call = gsi_stmt (gsi);
3244 tree decl;
3245
3246 if (is_gimple_call (call)
3247 && (decl = gimple_call_fndecl (call))
3248 && DECL_EXTERNAL (decl)
3249 && TREE_PUBLIC (decl)
3250 && DECL_INITIAL (decl) == NULL)
3251 {
3252 tree built_in;
3253
3254 if (DECL_NAME (decl) == thr_num_id)
3255 {
3256 /* In #pragma omp task untied omp_get_thread_num () can change
3257 during the execution of the task region. */
3258 if (untied_task)
3259 continue;
3260 built_in = built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM];
3261 }
3262 else if (DECL_NAME (decl) == num_thr_id)
3263 built_in = built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS];
3264 else
3265 continue;
3266
3267 if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
3268 || gimple_call_num_args (call) != 0)
3269 continue;
3270
3271 if (flag_exceptions && !TREE_NOTHROW (decl))
3272 continue;
3273
3274 if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
3275 || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (decl)))
3276 != TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (built_in))))
3277 continue;
3278
3279 gimple_call_set_fndecl (call, built_in);
3280 }
3281 }
3282 }
3283
3284 /* Expand the OpenMP parallel or task directive starting at REGION. */
3285
3286 static void
3287 expand_omp_taskreg (struct omp_region *region)
3288 {
3289 basic_block entry_bb, exit_bb, new_bb;
3290 struct function *child_cfun;
3291 tree child_fn, block, t, ws_args, *tp;
3292 tree save_current;
3293 gimple_stmt_iterator gsi;
3294 gimple entry_stmt, stmt;
3295 edge e;
3296
3297 entry_stmt = last_stmt (region->entry);
3298 child_fn = gimple_omp_taskreg_child_fn (entry_stmt);
3299 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
3300 /* If this function has been already instrumented, make sure
3301 the child function isn't instrumented again. */
3302 child_cfun->after_tree_profile = cfun->after_tree_profile;
3303
3304 entry_bb = region->entry;
3305 exit_bb = region->exit;
3306
3307 if (is_combined_parallel (region))
3308 ws_args = region->ws_args;
3309 else
3310 ws_args = NULL_TREE;
3311
3312 if (child_cfun->cfg)
3313 {
3314 /* Due to inlining, it may happen that we have already outlined
3315 the region, in which case all we need to do is make the
3316 sub-graph unreachable and emit the parallel call. */
3317 edge entry_succ_e, exit_succ_e;
3318 gimple_stmt_iterator gsi;
3319
3320 entry_succ_e = single_succ_edge (entry_bb);
3321
3322 gsi = gsi_last_bb (entry_bb);
3323 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL
3324 || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK);
3325 gsi_remove (&gsi, true);
3326
3327 new_bb = entry_bb;
3328 if (exit_bb)
3329 {
3330 exit_succ_e = single_succ_edge (exit_bb);
3331 make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
3332 }
3333 remove_edge_and_dominated_blocks (entry_succ_e);
3334 }
3335 else
3336 {
3337 /* If the parallel region needs data sent from the parent
3338 function, then the very first statement (except possible
3339 tree profile counter updates) of the parallel body
3340 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
3341 &.OMP_DATA_O is passed as an argument to the child function,
3342 we need to replace it with the argument as seen by the child
3343 function.
3344
3345 In most cases, this will end up being the identity assignment
3346 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
3347 a function call that has been inlined, the original PARM_DECL
3348 .OMP_DATA_I may have been converted into a different local
3349 variable. In which case, we need to keep the assignment. */
3350 if (gimple_omp_taskreg_data_arg (entry_stmt))
3351 {
3352 basic_block entry_succ_bb = single_succ (entry_bb);
3353 gimple_stmt_iterator gsi;
3354 tree arg, narg;
3355 gimple parcopy_stmt = NULL;
3356
3357 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
3358 {
3359 gimple stmt;
3360
3361 gcc_assert (!gsi_end_p (gsi));
3362 stmt = gsi_stmt (gsi);
3363 if (gimple_code (stmt) != GIMPLE_ASSIGN)
3364 continue;
3365
3366 if (gimple_num_ops (stmt) == 2)
3367 {
3368 tree arg = gimple_assign_rhs1 (stmt);
3369
3370 /* We're ignore the subcode because we're
3371 effectively doing a STRIP_NOPS. */
3372
3373 if (TREE_CODE (arg) == ADDR_EXPR
3374 && TREE_OPERAND (arg, 0)
3375 == gimple_omp_taskreg_data_arg (entry_stmt))
3376 {
3377 parcopy_stmt = stmt;
3378 break;
3379 }
3380 }
3381 }
3382
3383 gcc_assert (parcopy_stmt != NULL);
3384 arg = DECL_ARGUMENTS (child_fn);
3385
3386 if (!gimple_in_ssa_p (cfun))
3387 {
3388 if (gimple_assign_lhs (parcopy_stmt) == arg)
3389 gsi_remove (&gsi, true);
3390 else
3391 {
3392 /* ?? Is setting the subcode really necessary ?? */
3393 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg));
3394 gimple_assign_set_rhs1 (parcopy_stmt, arg);
3395 }
3396 }
3397 else
3398 {
3399 /* If we are in ssa form, we must load the value from the default
3400 definition of the argument. That should not be defined now,
3401 since the argument is not used uninitialized. */
3402 gcc_assert (gimple_default_def (cfun, arg) == NULL);
3403 narg = make_ssa_name (arg, gimple_build_nop ());
3404 set_default_def (arg, narg);
3405 /* ?? Is setting the subcode really necessary ?? */
3406 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (narg));
3407 gimple_assign_set_rhs1 (parcopy_stmt, narg);
3408 update_stmt (parcopy_stmt);
3409 }
3410 }
3411
3412 /* Declare local variables needed in CHILD_CFUN. */
3413 block = DECL_INITIAL (child_fn);
3414 BLOCK_VARS (block) = list2chain (child_cfun->local_decls);
3415 /* The gimplifier could record temporaries in parallel/task block
3416 rather than in containing function's local_decls chain,
3417 which would mean cgraph missed finalizing them. Do it now. */
3418 for (t = BLOCK_VARS (block); t; t = TREE_CHAIN (t))
3419 if (TREE_CODE (t) == VAR_DECL
3420 && TREE_STATIC (t)
3421 && !DECL_EXTERNAL (t))
3422 varpool_finalize_decl (t);
3423 DECL_SAVED_TREE (child_fn) = NULL;
3424 gimple_set_body (child_fn, bb_seq (single_succ (entry_bb)));
3425 TREE_USED (block) = 1;
3426
3427 /* Reset DECL_CONTEXT on function arguments. */
3428 for (t = DECL_ARGUMENTS (child_fn); t; t = TREE_CHAIN (t))
3429 DECL_CONTEXT (t) = child_fn;
3430
3431 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
3432 so that it can be moved to the child function. */
3433 gsi = gsi_last_bb (entry_bb);
3434 stmt = gsi_stmt (gsi);
3435 gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
3436 || gimple_code (stmt) == GIMPLE_OMP_TASK));
3437 gsi_remove (&gsi, true);
3438 e = split_block (entry_bb, stmt);
3439 entry_bb = e->dest;
3440 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
3441
3442 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
3443 if (exit_bb)
3444 {
3445 gsi = gsi_last_bb (exit_bb);
3446 gcc_assert (!gsi_end_p (gsi)
3447 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3448 stmt = gimple_build_return (NULL);
3449 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
3450 gsi_remove (&gsi, true);
3451 }
3452
3453 /* Move the parallel region into CHILD_CFUN. */
3454
3455 if (gimple_in_ssa_p (cfun))
3456 {
3457 push_cfun (child_cfun);
3458 init_tree_ssa (child_cfun);
3459 init_ssa_operands ();
3460 cfun->gimple_df->in_ssa_p = true;
3461 pop_cfun ();
3462 block = NULL_TREE;
3463 }
3464 else
3465 block = gimple_block (entry_stmt);
3466
3467 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
3468 if (exit_bb)
3469 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
3470
3471 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
3472 for (tp = &child_cfun->local_decls; *tp; )
3473 if (DECL_CONTEXT (TREE_VALUE (*tp)) != cfun->decl)
3474 tp = &TREE_CHAIN (*tp);
3475 else
3476 *tp = TREE_CHAIN (*tp);
3477
3478 /* Inform the callgraph about the new function. */
3479 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
3480 = cfun->curr_properties;
3481 cgraph_add_new_function (child_fn, true);
3482
3483 /* Fix the callgraph edges for child_cfun. Those for cfun will be
3484 fixed in a following pass. */
3485 push_cfun (child_cfun);
3486 save_current = current_function_decl;
3487 current_function_decl = child_fn;
3488 if (optimize)
3489 optimize_omp_library_calls (entry_stmt);
3490 rebuild_cgraph_edges ();
3491
3492 /* Some EH regions might become dead, see PR34608. If
3493 pass_cleanup_cfg isn't the first pass to happen with the
3494 new child, these dead EH edges might cause problems.
3495 Clean them up now. */
3496 if (flag_exceptions)
3497 {
3498 basic_block bb;
3499 bool changed = false;
3500
3501 FOR_EACH_BB (bb)
3502 changed |= gimple_purge_dead_eh_edges (bb);
3503 if (changed)
3504 cleanup_tree_cfg ();
3505 }
3506 if (gimple_in_ssa_p (cfun))
3507 update_ssa (TODO_update_ssa);
3508 current_function_decl = save_current;
3509 pop_cfun ();
3510 }
3511
3512 /* Emit a library call to launch the children threads. */
3513 if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
3514 expand_parallel_call (region, new_bb, entry_stmt, ws_args);
3515 else
3516 expand_task_call (new_bb, entry_stmt);
3517 update_ssa (TODO_update_ssa_only_virtuals);
3518 }
3519
3520
3521 /* A subroutine of expand_omp_for. Generate code for a parallel
3522 loop with any schedule. Given parameters:
3523
3524 for (V = N1; V cond N2; V += STEP) BODY;
3525
3526 where COND is "<" or ">", we generate pseudocode
3527
3528 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
3529 if (more) goto L0; else goto L3;
3530 L0:
3531 V = istart0;
3532 iend = iend0;
3533 L1:
3534 BODY;
3535 V += STEP;
3536 if (V cond iend) goto L1; else goto L2;
3537 L2:
3538 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3539 L3:
3540
3541 If this is a combined omp parallel loop, instead of the call to
3542 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
3543
3544 For collapsed loops, given parameters:
3545 collapse(3)
3546 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
3547 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
3548 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
3549 BODY;
3550
3551 we generate pseudocode
3552
3553 if (cond3 is <)
3554 adj = STEP3 - 1;
3555 else
3556 adj = STEP3 + 1;
3557 count3 = (adj + N32 - N31) / STEP3;
3558 if (cond2 is <)
3559 adj = STEP2 - 1;
3560 else
3561 adj = STEP2 + 1;
3562 count2 = (adj + N22 - N21) / STEP2;
3563 if (cond1 is <)
3564 adj = STEP1 - 1;
3565 else
3566 adj = STEP1 + 1;
3567 count1 = (adj + N12 - N11) / STEP1;
3568 count = count1 * count2 * count3;
3569 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
3570 if (more) goto L0; else goto L3;
3571 L0:
3572 V = istart0;
3573 T = V;
3574 V3 = N31 + (T % count3) * STEP3;
3575 T = T / count3;
3576 V2 = N21 + (T % count2) * STEP2;
3577 T = T / count2;
3578 V1 = N11 + T * STEP1;
3579 iend = iend0;
3580 L1:
3581 BODY;
3582 V += 1;
3583 if (V < iend) goto L10; else goto L2;
3584 L10:
3585 V3 += STEP3;
3586 if (V3 cond3 N32) goto L1; else goto L11;
3587 L11:
3588 V3 = N31;
3589 V2 += STEP2;
3590 if (V2 cond2 N22) goto L1; else goto L12;
3591 L12:
3592 V2 = N21;
3593 V1 += STEP1;
3594 goto L1;
3595 L2:
3596 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3597 L3:
3598
3599 */
3600
3601 static void
3602 expand_omp_for_generic (struct omp_region *region,
3603 struct omp_for_data *fd,
3604 enum built_in_function start_fn,
3605 enum built_in_function next_fn)
3606 {
3607 tree type, istart0, iend0, iend;
3608 tree t, vmain, vback, bias = NULL_TREE;
3609 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb;
3610 basic_block l2_bb = NULL, l3_bb = NULL;
3611 gimple_stmt_iterator gsi;
3612 gimple stmt;
3613 bool in_combined_parallel = is_combined_parallel (region);
3614 bool broken_loop = region->cont == NULL;
3615 edge e, ne;
3616 tree *counts = NULL;
3617 int i;
3618
3619 gcc_assert (!broken_loop || !in_combined_parallel);
3620 gcc_assert (fd->iter_type == long_integer_type_node
3621 || !in_combined_parallel);
3622
3623 type = TREE_TYPE (fd->loop.v);
3624 istart0 = create_tmp_var (fd->iter_type, ".istart0");
3625 iend0 = create_tmp_var (fd->iter_type, ".iend0");
3626 TREE_ADDRESSABLE (istart0) = 1;
3627 TREE_ADDRESSABLE (iend0) = 1;
3628 if (gimple_in_ssa_p (cfun))
3629 {
3630 add_referenced_var (istart0);
3631 add_referenced_var (iend0);
3632 }
3633
3634 /* See if we need to bias by LLONG_MIN. */
3635 if (fd->iter_type == long_long_unsigned_type_node
3636 && TREE_CODE (type) == INTEGER_TYPE
3637 && !TYPE_UNSIGNED (type))
3638 {
3639 tree n1, n2;
3640
3641 if (fd->loop.cond_code == LT_EXPR)
3642 {
3643 n1 = fd->loop.n1;
3644 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
3645 }
3646 else
3647 {
3648 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
3649 n2 = fd->loop.n1;
3650 }
3651 if (TREE_CODE (n1) != INTEGER_CST
3652 || TREE_CODE (n2) != INTEGER_CST
3653 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
3654 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
3655 }
3656
3657 entry_bb = region->entry;
3658 cont_bb = region->cont;
3659 collapse_bb = NULL;
3660 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
3661 gcc_assert (broken_loop
3662 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
3663 l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
3664 l1_bb = single_succ (l0_bb);
3665 if (!broken_loop)
3666 {
3667 l2_bb = create_empty_bb (cont_bb);
3668 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb);
3669 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
3670 }
3671 else
3672 l2_bb = NULL;
3673 l3_bb = BRANCH_EDGE (entry_bb)->dest;
3674 exit_bb = region->exit;
3675
3676 gsi = gsi_last_bb (entry_bb);
3677
3678 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
3679 if (fd->collapse > 1)
3680 {
3681 /* collapsed loops need work for expansion in SSA form. */
3682 gcc_assert (!gimple_in_ssa_p (cfun));
3683 counts = (tree *) alloca (fd->collapse * sizeof (tree));
3684 for (i = 0; i < fd->collapse; i++)
3685 {
3686 tree itype = TREE_TYPE (fd->loops[i].v);
3687
3688 if (POINTER_TYPE_P (itype))
3689 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
3690 t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR
3691 ? -1 : 1));
3692 t = fold_build2 (PLUS_EXPR, itype,
3693 fold_convert (itype, fd->loops[i].step), t);
3694 t = fold_build2 (PLUS_EXPR, itype, t,
3695 fold_convert (itype, fd->loops[i].n2));
3696 t = fold_build2 (MINUS_EXPR, itype, t,
3697 fold_convert (itype, fd->loops[i].n1));
3698 if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
3699 t = fold_build2 (TRUNC_DIV_EXPR, itype,
3700 fold_build1 (NEGATE_EXPR, itype, t),
3701 fold_build1 (NEGATE_EXPR, itype,
3702 fold_convert (itype,
3703 fd->loops[i].step)));
3704 else
3705 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
3706 fold_convert (itype, fd->loops[i].step));
3707 t = fold_convert (type, t);
3708 if (TREE_CODE (t) == INTEGER_CST)
3709 counts[i] = t;
3710 else
3711 {
3712 counts[i] = create_tmp_var (type, ".count");
3713 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3714 true, GSI_SAME_STMT);
3715 stmt = gimple_build_assign (counts[i], t);
3716 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3717 }
3718 if (SSA_VAR_P (fd->loop.n2))
3719 {
3720 if (i == 0)
3721 t = counts[0];
3722 else
3723 {
3724 t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]);
3725 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3726 true, GSI_SAME_STMT);
3727 }
3728 stmt = gimple_build_assign (fd->loop.n2, t);
3729 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3730 }
3731 }
3732 }
3733 if (in_combined_parallel)
3734 {
3735 /* In a combined parallel loop, emit a call to
3736 GOMP_loop_foo_next. */
3737 t = build_call_expr (built_in_decls[next_fn], 2,
3738 build_fold_addr_expr (istart0),
3739 build_fold_addr_expr (iend0));
3740 }
3741 else
3742 {
3743 tree t0, t1, t2, t3, t4;
3744 /* If this is not a combined parallel loop, emit a call to
3745 GOMP_loop_foo_start in ENTRY_BB. */
3746 t4 = build_fold_addr_expr (iend0);
3747 t3 = build_fold_addr_expr (istart0);
3748 t2 = fold_convert (fd->iter_type, fd->loop.step);
3749 if (POINTER_TYPE_P (type)
3750 && TYPE_PRECISION (type) != TYPE_PRECISION (fd->iter_type))
3751 {
3752 /* Avoid casting pointers to integer of a different size. */
3753 tree itype
3754 = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
3755 t1 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n2));
3756 t0 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n1));
3757 }
3758 else
3759 {
3760 t1 = fold_convert (fd->iter_type, fd->loop.n2);
3761 t0 = fold_convert (fd->iter_type, fd->loop.n1);
3762 }
3763 if (bias)
3764 {
3765 t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
3766 t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
3767 }
3768 if (fd->iter_type == long_integer_type_node)
3769 {
3770 if (fd->chunk_size)
3771 {
3772 t = fold_convert (fd->iter_type, fd->chunk_size);
3773 t = build_call_expr (built_in_decls[start_fn], 6,
3774 t0, t1, t2, t, t3, t4);
3775 }
3776 else
3777 t = build_call_expr (built_in_decls[start_fn], 5,
3778 t0, t1, t2, t3, t4);
3779 }
3780 else
3781 {
3782 tree t5;
3783 tree c_bool_type;
3784
3785 /* The GOMP_loop_ull_*start functions have additional boolean
3786 argument, true for < loops and false for > loops.
3787 In Fortran, the C bool type can be different from
3788 boolean_type_node. */
3789 c_bool_type = TREE_TYPE (TREE_TYPE (built_in_decls[start_fn]));
3790 t5 = build_int_cst (c_bool_type,
3791 fd->loop.cond_code == LT_EXPR ? 1 : 0);
3792 if (fd->chunk_size)
3793 {
3794 t = fold_convert (fd->iter_type, fd->chunk_size);
3795 t = build_call_expr (built_in_decls[start_fn], 7,
3796 t5, t0, t1, t2, t, t3, t4);
3797 }
3798 else
3799 t = build_call_expr (built_in_decls[start_fn], 6,
3800 t5, t0, t1, t2, t3, t4);
3801 }
3802 }
3803 if (TREE_TYPE (t) != boolean_type_node)
3804 t = fold_build2 (NE_EXPR, boolean_type_node,
3805 t, build_int_cst (TREE_TYPE (t), 0));
3806 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3807 true, GSI_SAME_STMT);
3808 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
3809
3810 /* Remove the GIMPLE_OMP_FOR statement. */
3811 gsi_remove (&gsi, true);
3812
3813 /* Iteration setup for sequential loop goes in L0_BB. */
3814 gsi = gsi_start_bb (l0_bb);
3815 if (bias)
3816 t = fold_convert (type, fold_build2 (MINUS_EXPR, fd->iter_type,
3817 istart0, bias));
3818 else
3819 t = fold_convert (type, istart0);
3820 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3821 false, GSI_CONTINUE_LINKING);
3822 stmt = gimple_build_assign (fd->loop.v, t);
3823 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3824
3825 if (bias)
3826 t = fold_convert (type, fold_build2 (MINUS_EXPR, fd->iter_type,
3827 iend0, bias));
3828 else
3829 t = fold_convert (type, iend0);
3830 iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3831 false, GSI_CONTINUE_LINKING);
3832 if (fd->collapse > 1)
3833 {
3834 tree tem = create_tmp_var (type, ".tem");
3835
3836 stmt = gimple_build_assign (tem, fd->loop.v);
3837 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3838 for (i = fd->collapse - 1; i >= 0; i--)
3839 {
3840 tree vtype = TREE_TYPE (fd->loops[i].v), itype;
3841 itype = vtype;
3842 if (POINTER_TYPE_P (vtype))
3843 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (vtype), 0);
3844 t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]);
3845 t = fold_convert (itype, t);
3846 t = fold_build2 (MULT_EXPR, itype, t, fd->loops[i].step);
3847 if (POINTER_TYPE_P (vtype))
3848 t = fold_build2 (POINTER_PLUS_EXPR, vtype,
3849 fd->loops[i].n1, fold_convert (sizetype, t));
3850 else
3851 t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t);
3852 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3853 false, GSI_CONTINUE_LINKING);
3854 stmt = gimple_build_assign (fd->loops[i].v, t);
3855 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3856 if (i != 0)
3857 {
3858 t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]);
3859 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3860 false, GSI_CONTINUE_LINKING);
3861 stmt = gimple_build_assign (tem, t);
3862 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3863 }
3864 }
3865 }
3866
3867 if (!broken_loop)
3868 {
3869 /* Code to control the increment and predicate for the sequential
3870 loop goes in the CONT_BB. */
3871 gsi = gsi_last_bb (cont_bb);
3872 stmt = gsi_stmt (gsi);
3873 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
3874 vmain = gimple_omp_continue_control_use (stmt);
3875 vback = gimple_omp_continue_control_def (stmt);
3876
3877 if (POINTER_TYPE_P (type))
3878 t = fold_build2 (POINTER_PLUS_EXPR, type, vmain,
3879 fold_convert (sizetype, fd->loop.step));
3880 else
3881 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
3882 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3883 true, GSI_SAME_STMT);
3884 stmt = gimple_build_assign (vback, t);
3885 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3886
3887 t = build2 (fd->loop.cond_code, boolean_type_node, vback, iend);
3888 stmt = gimple_build_cond_empty (t);
3889 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3890
3891 /* Remove GIMPLE_OMP_CONTINUE. */
3892 gsi_remove (&gsi, true);
3893
3894 if (fd->collapse > 1)
3895 {
3896 basic_block last_bb, bb;
3897
3898 last_bb = cont_bb;
3899 for (i = fd->collapse - 1; i >= 0; i--)
3900 {
3901 tree vtype = TREE_TYPE (fd->loops[i].v);
3902
3903 bb = create_empty_bb (last_bb);
3904 gsi = gsi_start_bb (bb);
3905
3906 if (i < fd->collapse - 1)
3907 {
3908 e = make_edge (last_bb, bb, EDGE_FALSE_VALUE);
3909 e->probability = REG_BR_PROB_BASE / 8;
3910
3911 t = fd->loops[i + 1].n1;
3912 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3913 false, GSI_CONTINUE_LINKING);
3914 stmt = gimple_build_assign (fd->loops[i + 1].v, t);
3915 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3916 }
3917 else
3918 collapse_bb = bb;
3919
3920 set_immediate_dominator (CDI_DOMINATORS, bb, last_bb);
3921
3922 if (POINTER_TYPE_P (vtype))
3923 t = fold_build2 (POINTER_PLUS_EXPR, vtype,
3924 fd->loops[i].v,
3925 fold_convert (sizetype, fd->loops[i].step));
3926 else
3927 t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v,
3928 fd->loops[i].step);
3929 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3930 false, GSI_CONTINUE_LINKING);
3931 stmt = gimple_build_assign (fd->loops[i].v, t);
3932 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3933
3934 if (i > 0)
3935 {
3936 t = fd->loops[i].n2;
3937 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3938 false, GSI_CONTINUE_LINKING);
3939 t = fold_build2 (fd->loops[i].cond_code, boolean_type_node,
3940 fd->loops[i].v, t);
3941 stmt = gimple_build_cond_empty (t);
3942 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3943 e = make_edge (bb, l1_bb, EDGE_TRUE_VALUE);
3944 e->probability = REG_BR_PROB_BASE * 7 / 8;
3945 }
3946 else
3947 make_edge (bb, l1_bb, EDGE_FALLTHRU);
3948 last_bb = bb;
3949 }
3950 }
3951
3952 /* Emit code to get the next parallel iteration in L2_BB. */
3953 gsi = gsi_start_bb (l2_bb);
3954
3955 t = build_call_expr (built_in_decls[next_fn], 2,
3956 build_fold_addr_expr (istart0),
3957 build_fold_addr_expr (iend0));
3958 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3959 false, GSI_CONTINUE_LINKING);
3960 if (TREE_TYPE (t) != boolean_type_node)
3961 t = fold_build2 (NE_EXPR, boolean_type_node,
3962 t, build_int_cst (TREE_TYPE (t), 0));
3963 stmt = gimple_build_cond_empty (t);
3964 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3965 }
3966
3967 /* Add the loop cleanup function. */
3968 gsi = gsi_last_bb (exit_bb);
3969 if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
3970 t = built_in_decls[BUILT_IN_GOMP_LOOP_END_NOWAIT];
3971 else
3972 t = built_in_decls[BUILT_IN_GOMP_LOOP_END];
3973 stmt = gimple_build_call (t, 0);
3974 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
3975 gsi_remove (&gsi, true);
3976
3977 /* Connect the new blocks. */
3978 find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
3979 find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
3980
3981 if (!broken_loop)
3982 {
3983 gimple_seq phis;
3984
3985 e = find_edge (cont_bb, l3_bb);
3986 ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
3987
3988 phis = phi_nodes (l3_bb);
3989 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
3990 {
3991 gimple phi = gsi_stmt (gsi);
3992 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
3993 PHI_ARG_DEF_FROM_EDGE (phi, e));
3994 }
3995 remove_edge (e);
3996
3997 make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
3998 if (fd->collapse > 1)
3999 {
4000 e = find_edge (cont_bb, l1_bb);
4001 remove_edge (e);
4002 e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
4003 }
4004 else
4005 {
4006 e = find_edge (cont_bb, l1_bb);
4007 e->flags = EDGE_TRUE_VALUE;
4008 }
4009 e->probability = REG_BR_PROB_BASE * 7 / 8;
4010 find_edge (cont_bb, l2_bb)->probability = REG_BR_PROB_BASE / 8;
4011 make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
4012
4013 set_immediate_dominator (CDI_DOMINATORS, l2_bb,
4014 recompute_dominator (CDI_DOMINATORS, l2_bb));
4015 set_immediate_dominator (CDI_DOMINATORS, l3_bb,
4016 recompute_dominator (CDI_DOMINATORS, l3_bb));
4017 set_immediate_dominator (CDI_DOMINATORS, l0_bb,
4018 recompute_dominator (CDI_DOMINATORS, l0_bb));
4019 set_immediate_dominator (CDI_DOMINATORS, l1_bb,
4020 recompute_dominator (CDI_DOMINATORS, l1_bb));
4021 }
4022 }
4023
4024
4025 /* A subroutine of expand_omp_for. Generate code for a parallel
4026 loop with static schedule and no specified chunk size. Given
4027 parameters:
4028
4029 for (V = N1; V cond N2; V += STEP) BODY;
4030
4031 where COND is "<" or ">", we generate pseudocode
4032
4033 if (cond is <)
4034 adj = STEP - 1;
4035 else
4036 adj = STEP + 1;
4037 if ((__typeof (V)) -1 > 0 && cond is >)
4038 n = -(adj + N2 - N1) / -STEP;
4039 else
4040 n = (adj + N2 - N1) / STEP;
4041 q = n / nthreads;
4042 q += (q * nthreads != n);
4043 s0 = q * threadid;
4044 e0 = min(s0 + q, n);
4045 V = s0 * STEP + N1;
4046 if (s0 >= e0) goto L2; else goto L0;
4047 L0:
4048 e = e0 * STEP + N1;
4049 L1:
4050 BODY;
4051 V += STEP;
4052 if (V cond e) goto L1;
4053 L2:
4054 */
4055
4056 static void
4057 expand_omp_for_static_nochunk (struct omp_region *region,
4058 struct omp_for_data *fd)
4059 {
4060 tree n, q, s0, e0, e, t, nthreads, threadid;
4061 tree type, itype, vmain, vback;
4062 basic_block entry_bb, exit_bb, seq_start_bb, body_bb, cont_bb;
4063 basic_block fin_bb;
4064 gimple_stmt_iterator gsi;
4065 gimple stmt;
4066
4067 itype = type = TREE_TYPE (fd->loop.v);
4068 if (POINTER_TYPE_P (type))
4069 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
4070
4071 entry_bb = region->entry;
4072 cont_bb = region->cont;
4073 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
4074 gcc_assert (BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
4075 seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
4076 body_bb = single_succ (seq_start_bb);
4077 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4078 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4079 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4080 exit_bb = region->exit;
4081
4082 /* Iteration space partitioning goes in ENTRY_BB. */
4083 gsi = gsi_last_bb (entry_bb);
4084 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4085
4086 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
4087 t = fold_convert (itype, t);
4088 nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4089 true, GSI_SAME_STMT);
4090
4091 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
4092 t = fold_convert (itype, t);
4093 threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4094 true, GSI_SAME_STMT);
4095
4096 fd->loop.n1
4097 = force_gimple_operand_gsi (&gsi, fold_convert (type, fd->loop.n1),
4098 true, NULL_TREE, true, GSI_SAME_STMT);
4099 fd->loop.n2
4100 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.n2),
4101 true, NULL_TREE, true, GSI_SAME_STMT);
4102 fd->loop.step
4103 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.step),
4104 true, NULL_TREE, true, GSI_SAME_STMT);
4105
4106 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4107 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4108 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4109 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4110 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4111 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4112 fold_build1 (NEGATE_EXPR, itype, t),
4113 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4114 else
4115 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4116 t = fold_convert (itype, t);
4117 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4118
4119 t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads);
4120 q = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4121
4122 t = fold_build2 (MULT_EXPR, itype, q, nthreads);
4123 t = fold_build2 (NE_EXPR, itype, t, n);
4124 t = fold_build2 (PLUS_EXPR, itype, q, t);
4125 q = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4126
4127 t = build2 (MULT_EXPR, itype, q, threadid);
4128 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4129
4130 t = fold_build2 (PLUS_EXPR, itype, s0, q);
4131 t = fold_build2 (MIN_EXPR, itype, t, n);
4132 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4133
4134 t = build2 (GE_EXPR, boolean_type_node, s0, e0);
4135 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4136
4137 /* Remove the GIMPLE_OMP_FOR statement. */
4138 gsi_remove (&gsi, true);
4139
4140 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4141 gsi = gsi_start_bb (seq_start_bb);
4142
4143 t = fold_convert (itype, s0);
4144 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4145 if (POINTER_TYPE_P (type))
4146 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4147 fold_convert (sizetype, t));
4148 else
4149 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4150 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4151 false, GSI_CONTINUE_LINKING);
4152 stmt = gimple_build_assign (fd->loop.v, t);
4153 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4154
4155 t = fold_convert (itype, e0);
4156 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4157 if (POINTER_TYPE_P (type))
4158 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4159 fold_convert (sizetype, t));
4160 else
4161 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4162 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4163 false, GSI_CONTINUE_LINKING);
4164
4165 /* The code controlling the sequential loop replaces the
4166 GIMPLE_OMP_CONTINUE. */
4167 gsi = gsi_last_bb (cont_bb);
4168 stmt = gsi_stmt (gsi);
4169 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4170 vmain = gimple_omp_continue_control_use (stmt);
4171 vback = gimple_omp_continue_control_def (stmt);
4172
4173 if (POINTER_TYPE_P (type))
4174 t = fold_build2 (POINTER_PLUS_EXPR, type, vmain,
4175 fold_convert (sizetype, fd->loop.step));
4176 else
4177 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
4178 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4179 true, GSI_SAME_STMT);
4180 stmt = gimple_build_assign (vback, t);
4181 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4182
4183 t = build2 (fd->loop.cond_code, boolean_type_node, vback, e);
4184 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4185
4186 /* Remove the GIMPLE_OMP_CONTINUE statement. */
4187 gsi_remove (&gsi, true);
4188
4189 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4190 gsi = gsi_last_bb (exit_bb);
4191 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4192 force_gimple_operand_gsi (&gsi, build_omp_barrier (), false, NULL_TREE,
4193 false, GSI_SAME_STMT);
4194 gsi_remove (&gsi, true);
4195
4196 /* Connect all the blocks. */
4197 find_edge (entry_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
4198 find_edge (entry_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
4199
4200 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4201 find_edge (cont_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4202
4203 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, entry_bb);
4204 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4205 recompute_dominator (CDI_DOMINATORS, body_bb));
4206 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4207 recompute_dominator (CDI_DOMINATORS, fin_bb));
4208 }
4209
4210
4211 /* A subroutine of expand_omp_for. Generate code for a parallel
4212 loop with static schedule and a specified chunk size. Given
4213 parameters:
4214
4215 for (V = N1; V cond N2; V += STEP) BODY;
4216
4217 where COND is "<" or ">", we generate pseudocode
4218
4219 if (cond is <)
4220 adj = STEP - 1;
4221 else
4222 adj = STEP + 1;
4223 if ((__typeof (V)) -1 > 0 && cond is >)
4224 n = -(adj + N2 - N1) / -STEP;
4225 else
4226 n = (adj + N2 - N1) / STEP;
4227 trip = 0;
4228 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
4229 here so that V is defined
4230 if the loop is not entered
4231 L0:
4232 s0 = (trip * nthreads + threadid) * CHUNK;
4233 e0 = min(s0 + CHUNK, n);
4234 if (s0 < n) goto L1; else goto L4;
4235 L1:
4236 V = s0 * STEP + N1;
4237 e = e0 * STEP + N1;
4238 L2:
4239 BODY;
4240 V += STEP;
4241 if (V cond e) goto L2; else goto L3;
4242 L3:
4243 trip += 1;
4244 goto L0;
4245 L4:
4246 */
4247
4248 static void
4249 expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
4250 {
4251 tree n, s0, e0, e, t;
4252 tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
4253 tree type, itype, v_main, v_back, v_extra;
4254 basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
4255 basic_block trip_update_bb, cont_bb, fin_bb;
4256 gimple_stmt_iterator si;
4257 gimple stmt;
4258 edge se;
4259
4260 itype = type = TREE_TYPE (fd->loop.v);
4261 if (POINTER_TYPE_P (type))
4262 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
4263
4264 entry_bb = region->entry;
4265 se = split_block (entry_bb, last_stmt (entry_bb));
4266 entry_bb = se->src;
4267 iter_part_bb = se->dest;
4268 cont_bb = region->cont;
4269 gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
4270 gcc_assert (BRANCH_EDGE (iter_part_bb)->dest
4271 == FALLTHRU_EDGE (cont_bb)->dest);
4272 seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
4273 body_bb = single_succ (seq_start_bb);
4274 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4275 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4276 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4277 trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
4278 exit_bb = region->exit;
4279
4280 /* Trip and adjustment setup goes in ENTRY_BB. */
4281 si = gsi_last_bb (entry_bb);
4282 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_FOR);
4283
4284 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
4285 t = fold_convert (itype, t);
4286 nthreads = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4287 true, GSI_SAME_STMT);
4288
4289 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
4290 t = fold_convert (itype, t);
4291 threadid = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4292 true, GSI_SAME_STMT);
4293
4294 fd->loop.n1
4295 = force_gimple_operand_gsi (&si, fold_convert (type, fd->loop.n1),
4296 true, NULL_TREE, true, GSI_SAME_STMT);
4297 fd->loop.n2
4298 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.n2),
4299 true, NULL_TREE, true, GSI_SAME_STMT);
4300 fd->loop.step
4301 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.step),
4302 true, NULL_TREE, true, GSI_SAME_STMT);
4303 fd->chunk_size
4304 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->chunk_size),
4305 true, NULL_TREE, true, GSI_SAME_STMT);
4306
4307 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4308 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4309 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4310 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4311 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4312 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4313 fold_build1 (NEGATE_EXPR, itype, t),
4314 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4315 else
4316 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4317 t = fold_convert (itype, t);
4318 n = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4319 true, GSI_SAME_STMT);
4320
4321 trip_var = create_tmp_var (itype, ".trip");
4322 if (gimple_in_ssa_p (cfun))
4323 {
4324 add_referenced_var (trip_var);
4325 trip_init = make_ssa_name (trip_var, NULL);
4326 trip_main = make_ssa_name (trip_var, NULL);
4327 trip_back = make_ssa_name (trip_var, NULL);
4328 }
4329 else
4330 {
4331 trip_init = trip_var;
4332 trip_main = trip_var;
4333 trip_back = trip_var;
4334 }
4335
4336 stmt = gimple_build_assign (trip_init, build_int_cst (itype, 0));
4337 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4338
4339 t = fold_build2 (MULT_EXPR, itype, threadid, fd->chunk_size);
4340 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4341 if (POINTER_TYPE_P (type))
4342 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4343 fold_convert (sizetype, t));
4344 else
4345 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4346 v_extra = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4347 true, GSI_SAME_STMT);
4348
4349 /* Remove the GIMPLE_OMP_FOR. */
4350 gsi_remove (&si, true);
4351
4352 /* Iteration space partitioning goes in ITER_PART_BB. */
4353 si = gsi_last_bb (iter_part_bb);
4354
4355 t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads);
4356 t = fold_build2 (PLUS_EXPR, itype, t, threadid);
4357 t = fold_build2 (MULT_EXPR, itype, t, fd->chunk_size);
4358 s0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4359 false, GSI_CONTINUE_LINKING);
4360
4361 t = fold_build2 (PLUS_EXPR, itype, s0, fd->chunk_size);
4362 t = fold_build2 (MIN_EXPR, itype, t, n);
4363 e0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4364 false, GSI_CONTINUE_LINKING);
4365
4366 t = build2 (LT_EXPR, boolean_type_node, s0, n);
4367 gsi_insert_after (&si, gimple_build_cond_empty (t), GSI_CONTINUE_LINKING);
4368
4369 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4370 si = gsi_start_bb (seq_start_bb);
4371
4372 t = fold_convert (itype, s0);
4373 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4374 if (POINTER_TYPE_P (type))
4375 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4376 fold_convert (sizetype, t));
4377 else
4378 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4379 t = force_gimple_operand_gsi (&si, t, false, NULL_TREE,
4380 false, GSI_CONTINUE_LINKING);
4381 stmt = gimple_build_assign (fd->loop.v, t);
4382 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4383
4384 t = fold_convert (itype, e0);
4385 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4386 if (POINTER_TYPE_P (type))
4387 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4388 fold_convert (sizetype, t));
4389 else
4390 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4391 e = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4392 false, GSI_CONTINUE_LINKING);
4393
4394 /* The code controlling the sequential loop goes in CONT_BB,
4395 replacing the GIMPLE_OMP_CONTINUE. */
4396 si = gsi_last_bb (cont_bb);
4397 stmt = gsi_stmt (si);
4398 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4399 v_main = gimple_omp_continue_control_use (stmt);
4400 v_back = gimple_omp_continue_control_def (stmt);
4401
4402 if (POINTER_TYPE_P (type))
4403 t = fold_build2 (POINTER_PLUS_EXPR, type, v_main,
4404 fold_convert (sizetype, fd->loop.step));
4405 else
4406 t = fold_build2 (PLUS_EXPR, type, v_main, fd->loop.step);
4407 stmt = gimple_build_assign (v_back, t);
4408 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4409
4410 t = build2 (fd->loop.cond_code, boolean_type_node, v_back, e);
4411 gsi_insert_before (&si, gimple_build_cond_empty (t), GSI_SAME_STMT);
4412
4413 /* Remove GIMPLE_OMP_CONTINUE. */
4414 gsi_remove (&si, true);
4415
4416 /* Trip update code goes into TRIP_UPDATE_BB. */
4417 si = gsi_start_bb (trip_update_bb);
4418
4419 t = build_int_cst (itype, 1);
4420 t = build2 (PLUS_EXPR, itype, trip_main, t);
4421 stmt = gimple_build_assign (trip_back, t);
4422 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4423
4424 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4425 si = gsi_last_bb (exit_bb);
4426 if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
4427 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4428 false, GSI_SAME_STMT);
4429 gsi_remove (&si, true);
4430
4431 /* Connect the new blocks. */
4432 find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
4433 find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4434
4435 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4436 find_edge (cont_bb, trip_update_bb)->flags = EDGE_FALSE_VALUE;
4437
4438 redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
4439
4440 if (gimple_in_ssa_p (cfun))
4441 {
4442 gimple_stmt_iterator psi;
4443 gimple phi;
4444 edge re, ene;
4445 edge_var_map_vector head;
4446 edge_var_map *vm;
4447 size_t i;
4448
4449 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
4450 remove arguments of the phi nodes in fin_bb. We need to create
4451 appropriate phi nodes in iter_part_bb instead. */
4452 se = single_pred_edge (fin_bb);
4453 re = single_succ_edge (trip_update_bb);
4454 head = redirect_edge_var_map_vector (re);
4455 ene = single_succ_edge (entry_bb);
4456
4457 psi = gsi_start_phis (fin_bb);
4458 for (i = 0; !gsi_end_p (psi) && VEC_iterate (edge_var_map, head, i, vm);
4459 gsi_next (&psi), ++i)
4460 {
4461 gimple nphi;
4462
4463 phi = gsi_stmt (psi);
4464 t = gimple_phi_result (phi);
4465 gcc_assert (t == redirect_edge_var_map_result (vm));
4466 nphi = create_phi_node (t, iter_part_bb);
4467 SSA_NAME_DEF_STMT (t) = nphi;
4468
4469 t = PHI_ARG_DEF_FROM_EDGE (phi, se);
4470 /* A special case -- fd->loop.v is not yet computed in
4471 iter_part_bb, we need to use v_extra instead. */
4472 if (t == fd->loop.v)
4473 t = v_extra;
4474 add_phi_arg (nphi, t, ene);
4475 add_phi_arg (nphi, redirect_edge_var_map_def (vm), re);
4476 }
4477 gcc_assert (!gsi_end_p (psi) && i == VEC_length (edge_var_map, head));
4478 redirect_edge_var_map_clear (re);
4479 while (1)
4480 {
4481 psi = gsi_start_phis (fin_bb);
4482 if (gsi_end_p (psi))
4483 break;
4484 remove_phi_node (&psi, false);
4485 }
4486
4487 /* Make phi node for trip. */
4488 phi = create_phi_node (trip_main, iter_part_bb);
4489 SSA_NAME_DEF_STMT (trip_main) = phi;
4490 add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb));
4491 add_phi_arg (phi, trip_init, single_succ_edge (entry_bb));
4492 }
4493
4494 set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
4495 set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
4496 recompute_dominator (CDI_DOMINATORS, iter_part_bb));
4497 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4498 recompute_dominator (CDI_DOMINATORS, fin_bb));
4499 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
4500 recompute_dominator (CDI_DOMINATORS, seq_start_bb));
4501 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4502 recompute_dominator (CDI_DOMINATORS, body_bb));
4503 }
4504
4505
4506 /* Expand the OpenMP loop defined by REGION. */
4507
4508 static void
4509 expand_omp_for (struct omp_region *region)
4510 {
4511 struct omp_for_data fd;
4512 struct omp_for_data_loop *loops;
4513
4514 loops
4515 = (struct omp_for_data_loop *)
4516 alloca (gimple_omp_for_collapse (last_stmt (region->entry))
4517 * sizeof (struct omp_for_data_loop));
4518 extract_omp_for_data (last_stmt (region->entry), &fd, loops);
4519 region->sched_kind = fd.sched_kind;
4520
4521 gcc_assert (EDGE_COUNT (region->entry->succs) == 2);
4522 BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4523 FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4524 if (region->cont)
4525 {
4526 gcc_assert (EDGE_COUNT (region->cont->succs) == 2);
4527 BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4528 FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4529 }
4530
4531 if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
4532 && !fd.have_ordered
4533 && fd.collapse == 1
4534 && region->cont != NULL)
4535 {
4536 if (fd.chunk_size == NULL)
4537 expand_omp_for_static_nochunk (region, &fd);
4538 else
4539 expand_omp_for_static_chunk (region, &fd);
4540 }
4541 else
4542 {
4543 int fn_index, start_ix, next_ix;
4544
4545 gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
4546 fn_index = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
4547 ? 3 : fd.sched_kind;
4548 fn_index += fd.have_ordered * 4;
4549 start_ix = BUILT_IN_GOMP_LOOP_STATIC_START + fn_index;
4550 next_ix = BUILT_IN_GOMP_LOOP_STATIC_NEXT + fn_index;
4551 if (fd.iter_type == long_long_unsigned_type_node)
4552 {
4553 start_ix += BUILT_IN_GOMP_LOOP_ULL_STATIC_START
4554 - BUILT_IN_GOMP_LOOP_STATIC_START;
4555 next_ix += BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
4556 - BUILT_IN_GOMP_LOOP_STATIC_NEXT;
4557 }
4558 expand_omp_for_generic (region, &fd, start_ix, next_ix);
4559 }
4560
4561 update_ssa (TODO_update_ssa_only_virtuals);
4562 }
4563
4564
4565 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
4566
4567 v = GOMP_sections_start (n);
4568 L0:
4569 switch (v)
4570 {
4571 case 0:
4572 goto L2;
4573 case 1:
4574 section 1;
4575 goto L1;
4576 case 2:
4577 ...
4578 case n:
4579 ...
4580 default:
4581 abort ();
4582 }
4583 L1:
4584 v = GOMP_sections_next ();
4585 goto L0;
4586 L2:
4587 reduction;
4588
4589 If this is a combined parallel sections, replace the call to
4590 GOMP_sections_start with call to GOMP_sections_next. */
4591
4592 static void
4593 expand_omp_sections (struct omp_region *region)
4594 {
4595 tree t, u, vin = NULL, vmain, vnext, l1, l2;
4596 VEC (tree,heap) *label_vec;
4597 unsigned len;
4598 basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
4599 gimple_stmt_iterator si, switch_si;
4600 gimple sections_stmt, stmt, cont;
4601 edge_iterator ei;
4602 edge e;
4603 struct omp_region *inner;
4604 unsigned i, casei;
4605 bool exit_reachable = region->cont != NULL;
4606
4607 gcc_assert (exit_reachable == (region->exit != NULL));
4608 entry_bb = region->entry;
4609 l0_bb = single_succ (entry_bb);
4610 l1_bb = region->cont;
4611 l2_bb = region->exit;
4612 if (exit_reachable)
4613 {
4614 if (single_pred (l2_bb) == l0_bb)
4615 l2 = gimple_block_label (l2_bb);
4616 else
4617 {
4618 /* This can happen if there are reductions. */
4619 len = EDGE_COUNT (l0_bb->succs);
4620 gcc_assert (len > 0);
4621 e = EDGE_SUCC (l0_bb, len - 1);
4622 si = gsi_last_bb (e->dest);
4623 l2 = NULL_TREE;
4624 if (gsi_end_p (si)
4625 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4626 l2 = gimple_block_label (e->dest);
4627 else
4628 FOR_EACH_EDGE (e, ei, l0_bb->succs)
4629 {
4630 si = gsi_last_bb (e->dest);
4631 if (gsi_end_p (si)
4632 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4633 {
4634 l2 = gimple_block_label (e->dest);
4635 break;
4636 }
4637 }
4638 }
4639 default_bb = create_empty_bb (l1_bb->prev_bb);
4640 l1 = gimple_block_label (l1_bb);
4641 }
4642 else
4643 {
4644 default_bb = create_empty_bb (l0_bb);
4645 l1 = NULL_TREE;
4646 l2 = gimple_block_label (default_bb);
4647 }
4648
4649 /* We will build a switch() with enough cases for all the
4650 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
4651 and a default case to abort if something goes wrong. */
4652 len = EDGE_COUNT (l0_bb->succs);
4653
4654 /* Use VEC_quick_push on label_vec throughout, since we know the size
4655 in advance. */
4656 label_vec = VEC_alloc (tree, heap, len);
4657
4658 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
4659 GIMPLE_OMP_SECTIONS statement. */
4660 si = gsi_last_bb (entry_bb);
4661 sections_stmt = gsi_stmt (si);
4662 gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS);
4663 vin = gimple_omp_sections_control (sections_stmt);
4664 if (!is_combined_parallel (region))
4665 {
4666 /* If we are not inside a combined parallel+sections region,
4667 call GOMP_sections_start. */
4668 t = build_int_cst (unsigned_type_node,
4669 exit_reachable ? len - 1 : len);
4670 u = built_in_decls[BUILT_IN_GOMP_SECTIONS_START];
4671 stmt = gimple_build_call (u, 1, t);
4672 }
4673 else
4674 {
4675 /* Otherwise, call GOMP_sections_next. */
4676 u = built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT];
4677 stmt = gimple_build_call (u, 0);
4678 }
4679 gimple_call_set_lhs (stmt, vin);
4680 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4681 gsi_remove (&si, true);
4682
4683 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
4684 L0_BB. */
4685 switch_si = gsi_last_bb (l0_bb);
4686 gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH);
4687 if (exit_reachable)
4688 {
4689 cont = last_stmt (l1_bb);
4690 gcc_assert (gimple_code (cont) == GIMPLE_OMP_CONTINUE);
4691 vmain = gimple_omp_continue_control_use (cont);
4692 vnext = gimple_omp_continue_control_def (cont);
4693 }
4694 else
4695 {
4696 vmain = vin;
4697 vnext = NULL_TREE;
4698 }
4699
4700 i = 0;
4701 if (exit_reachable)
4702 {
4703 t = build3 (CASE_LABEL_EXPR, void_type_node,
4704 build_int_cst (unsigned_type_node, 0), NULL, l2);
4705 VEC_quick_push (tree, label_vec, t);
4706 i++;
4707 }
4708
4709 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
4710 for (inner = region->inner, casei = 1;
4711 inner;
4712 inner = inner->next, i++, casei++)
4713 {
4714 basic_block s_entry_bb, s_exit_bb;
4715
4716 /* Skip optional reduction region. */
4717 if (inner->type == GIMPLE_OMP_ATOMIC_LOAD)
4718 {
4719 --i;
4720 --casei;
4721 continue;
4722 }
4723
4724 s_entry_bb = inner->entry;
4725 s_exit_bb = inner->exit;
4726
4727 t = gimple_block_label (s_entry_bb);
4728 u = build_int_cst (unsigned_type_node, casei);
4729 u = build3 (CASE_LABEL_EXPR, void_type_node, u, NULL, t);
4730 VEC_quick_push (tree, label_vec, u);
4731
4732 si = gsi_last_bb (s_entry_bb);
4733 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION);
4734 gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si)));
4735 gsi_remove (&si, true);
4736 single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
4737
4738 if (s_exit_bb == NULL)
4739 continue;
4740
4741 si = gsi_last_bb (s_exit_bb);
4742 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4743 gsi_remove (&si, true);
4744
4745 single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
4746 }
4747
4748 /* Error handling code goes in DEFAULT_BB. */
4749 t = gimple_block_label (default_bb);
4750 u = build3 (CASE_LABEL_EXPR, void_type_node, NULL, NULL, t);
4751 make_edge (l0_bb, default_bb, 0);
4752
4753 stmt = gimple_build_switch_vec (vmain, u, label_vec);
4754 gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT);
4755 gsi_remove (&switch_si, true);
4756 VEC_free (tree, heap, label_vec);
4757
4758 si = gsi_start_bb (default_bb);
4759 stmt = gimple_build_call (built_in_decls[BUILT_IN_TRAP], 0);
4760 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4761
4762 if (exit_reachable)
4763 {
4764 /* Code to get the next section goes in L1_BB. */
4765 si = gsi_last_bb (l1_bb);
4766 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE);
4767
4768 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT], 0);
4769 gimple_call_set_lhs (stmt, vnext);
4770 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4771 gsi_remove (&si, true);
4772
4773 single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
4774
4775 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
4776 si = gsi_last_bb (l2_bb);
4777 if (gimple_omp_return_nowait_p (gsi_stmt (si)))
4778 t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END_NOWAIT];
4779 else
4780 t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END];
4781 stmt = gimple_build_call (t, 0);
4782 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4783 gsi_remove (&si, true);
4784 }
4785
4786 set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
4787 }
4788
4789
4790 /* Expand code for an OpenMP single directive. We've already expanded
4791 much of the code, here we simply place the GOMP_barrier call. */
4792
4793 static void
4794 expand_omp_single (struct omp_region *region)
4795 {
4796 basic_block entry_bb, exit_bb;
4797 gimple_stmt_iterator si;
4798 bool need_barrier = false;
4799
4800 entry_bb = region->entry;
4801 exit_bb = region->exit;
4802
4803 si = gsi_last_bb (entry_bb);
4804 /* The terminal barrier at the end of a GOMP_single_copy sequence cannot
4805 be removed. We need to ensure that the thread that entered the single
4806 does not exit before the data is copied out by the other threads. */
4807 if (find_omp_clause (gimple_omp_single_clauses (gsi_stmt (si)),
4808 OMP_CLAUSE_COPYPRIVATE))
4809 need_barrier = true;
4810 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE);
4811 gsi_remove (&si, true);
4812 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4813
4814 si = gsi_last_bb (exit_bb);
4815 if (!gimple_omp_return_nowait_p (gsi_stmt (si)) || need_barrier)
4816 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4817 false, GSI_SAME_STMT);
4818 gsi_remove (&si, true);
4819 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4820 }
4821
4822
4823 /* Generic expansion for OpenMP synchronization directives: master,
4824 ordered and critical. All we need to do here is remove the entry
4825 and exit markers for REGION. */
4826
4827 static void
4828 expand_omp_synch (struct omp_region *region)
4829 {
4830 basic_block entry_bb, exit_bb;
4831 gimple_stmt_iterator si;
4832
4833 entry_bb = region->entry;
4834 exit_bb = region->exit;
4835
4836 si = gsi_last_bb (entry_bb);
4837 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE
4838 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER
4839 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED
4840 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL);
4841 gsi_remove (&si, true);
4842 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4843
4844 if (exit_bb)
4845 {
4846 si = gsi_last_bb (exit_bb);
4847 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4848 gsi_remove (&si, true);
4849 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4850 }
4851 }
4852
4853 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
4854 operation as a __sync_fetch_and_op builtin. INDEX is log2 of the
4855 size of the data type, and thus usable to find the index of the builtin
4856 decl. Returns false if the expression is not of the proper form. */
4857
4858 static bool
4859 expand_omp_atomic_fetch_op (basic_block load_bb,
4860 tree addr, tree loaded_val,
4861 tree stored_val, int index)
4862 {
4863 enum built_in_function base;
4864 tree decl, itype, call;
4865 enum insn_code *optab;
4866 tree rhs;
4867 basic_block store_bb = single_succ (load_bb);
4868 gimple_stmt_iterator gsi;
4869 gimple stmt;
4870
4871 /* We expect to find the following sequences:
4872
4873 load_bb:
4874 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
4875
4876 store_bb:
4877 val = tmp OP something; (or: something OP tmp)
4878 GIMPLE_OMP_STORE (val)
4879
4880 ???FIXME: Allow a more flexible sequence.
4881 Perhaps use data flow to pick the statements.
4882
4883 */
4884
4885 gsi = gsi_after_labels (store_bb);
4886 stmt = gsi_stmt (gsi);
4887 if (!is_gimple_assign (stmt))
4888 return false;
4889 gsi_next (&gsi);
4890 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE)
4891 return false;
4892
4893 if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0))
4894 return false;
4895
4896 /* Check for one of the supported fetch-op operations. */
4897 switch (gimple_assign_rhs_code (stmt))
4898 {
4899 case PLUS_EXPR:
4900 case POINTER_PLUS_EXPR:
4901 base = BUILT_IN_FETCH_AND_ADD_N;
4902 optab = sync_add_optab;
4903 break;
4904 case MINUS_EXPR:
4905 base = BUILT_IN_FETCH_AND_SUB_N;
4906 optab = sync_add_optab;
4907 break;
4908 case BIT_AND_EXPR:
4909 base = BUILT_IN_FETCH_AND_AND_N;
4910 optab = sync_and_optab;
4911 break;
4912 case BIT_IOR_EXPR:
4913 base = BUILT_IN_FETCH_AND_OR_N;
4914 optab = sync_ior_optab;
4915 break;
4916 case BIT_XOR_EXPR:
4917 base = BUILT_IN_FETCH_AND_XOR_N;
4918 optab = sync_xor_optab;
4919 break;
4920 default:
4921 return false;
4922 }
4923 /* Make sure the expression is of the proper form. */
4924 if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0))
4925 rhs = gimple_assign_rhs2 (stmt);
4926 else if (commutative_tree_code (gimple_assign_rhs_code (stmt))
4927 && operand_equal_p (gimple_assign_rhs2 (stmt), loaded_val, 0))
4928 rhs = gimple_assign_rhs1 (stmt);
4929 else
4930 return false;
4931
4932 decl = built_in_decls[base + index + 1];
4933 itype = TREE_TYPE (TREE_TYPE (decl));
4934
4935 if (optab[TYPE_MODE (itype)] == CODE_FOR_nothing)
4936 return false;
4937
4938 gsi = gsi_last_bb (load_bb);
4939 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD);
4940 call = build_call_expr (decl, 2, addr, fold_convert (itype, rhs));
4941 call = fold_convert (void_type_node, call);
4942 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
4943 gsi_remove (&gsi, true);
4944
4945 gsi = gsi_last_bb (store_bb);
4946 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
4947 gsi_remove (&gsi, true);
4948 gsi = gsi_last_bb (store_bb);
4949 gsi_remove (&gsi, true);
4950
4951 if (gimple_in_ssa_p (cfun))
4952 update_ssa (TODO_update_ssa_no_phi);
4953
4954 return true;
4955 }
4956
4957 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
4958
4959 oldval = *addr;
4960 repeat:
4961 newval = rhs; // with oldval replacing *addr in rhs
4962 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
4963 if (oldval != newval)
4964 goto repeat;
4965
4966 INDEX is log2 of the size of the data type, and thus usable to find the
4967 index of the builtin decl. */
4968
4969 static bool
4970 expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
4971 tree addr, tree loaded_val, tree stored_val,
4972 int index)
4973 {
4974 tree loadedi, storedi, initial, new_storedi, old_vali;
4975 tree type, itype, cmpxchg, iaddr;
4976 gimple_stmt_iterator si;
4977 basic_block loop_header = single_succ (load_bb);
4978 gimple phi, stmt;
4979 edge e;
4980
4981 cmpxchg = built_in_decls[BUILT_IN_VAL_COMPARE_AND_SWAP_N + index + 1];
4982 type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
4983 itype = TREE_TYPE (TREE_TYPE (cmpxchg));
4984
4985 if (sync_compare_and_swap[TYPE_MODE (itype)] == CODE_FOR_nothing)
4986 return false;
4987
4988 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
4989 si = gsi_last_bb (load_bb);
4990 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
4991
4992 /* For floating-point values, we'll need to view-convert them to integers
4993 so that we can perform the atomic compare and swap. Simplify the
4994 following code by always setting up the "i"ntegral variables. */
4995 if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
4996 {
4997 tree iaddr_val;
4998
4999 iaddr = create_tmp_var (build_pointer_type (itype), NULL);
5000 iaddr_val
5001 = force_gimple_operand_gsi (&si,
5002 fold_convert (TREE_TYPE (iaddr), addr),
5003 false, NULL_TREE, true, GSI_SAME_STMT);
5004 stmt = gimple_build_assign (iaddr, iaddr_val);
5005 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5006 DECL_NO_TBAA_P (iaddr) = 1;
5007 DECL_POINTER_ALIAS_SET (iaddr) = 0;
5008 loadedi = create_tmp_var (itype, NULL);
5009 if (gimple_in_ssa_p (cfun))
5010 {
5011 add_referenced_var (iaddr);
5012 add_referenced_var (loadedi);
5013 loadedi = make_ssa_name (loadedi, NULL);
5014 }
5015 }
5016 else
5017 {
5018 iaddr = addr;
5019 loadedi = loaded_val;
5020 }
5021
5022 initial = force_gimple_operand_gsi (&si, build_fold_indirect_ref (iaddr),
5023 true, NULL_TREE, true, GSI_SAME_STMT);
5024
5025 /* Move the value to the LOADEDI temporary. */
5026 if (gimple_in_ssa_p (cfun))
5027 {
5028 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header)));
5029 phi = create_phi_node (loadedi, loop_header);
5030 SSA_NAME_DEF_STMT (loadedi) = phi;
5031 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
5032 initial);
5033 }
5034 else
5035 gsi_insert_before (&si,
5036 gimple_build_assign (loadedi, initial),
5037 GSI_SAME_STMT);
5038 if (loadedi != loaded_val)
5039 {
5040 gimple_stmt_iterator gsi2;
5041 tree x;
5042
5043 x = build1 (VIEW_CONVERT_EXPR, type, loadedi);
5044 gsi2 = gsi_start_bb (loop_header);
5045 if (gimple_in_ssa_p (cfun))
5046 {
5047 gimple stmt;
5048 x = force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5049 true, GSI_SAME_STMT);
5050 stmt = gimple_build_assign (loaded_val, x);
5051 gsi_insert_before (&gsi2, stmt, GSI_SAME_STMT);
5052 }
5053 else
5054 {
5055 x = build2 (MODIFY_EXPR, TREE_TYPE (loaded_val), loaded_val, x);
5056 force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5057 true, GSI_SAME_STMT);
5058 }
5059 }
5060 gsi_remove (&si, true);
5061
5062 si = gsi_last_bb (store_bb);
5063 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
5064
5065 if (iaddr == addr)
5066 storedi = stored_val;
5067 else
5068 storedi =
5069 force_gimple_operand_gsi (&si,
5070 build1 (VIEW_CONVERT_EXPR, itype,
5071 stored_val), true, NULL_TREE, true,
5072 GSI_SAME_STMT);
5073
5074 /* Build the compare&swap statement. */
5075 new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
5076 new_storedi = force_gimple_operand_gsi (&si,
5077 fold_convert (itype, new_storedi),
5078 true, NULL_TREE,
5079 true, GSI_SAME_STMT);
5080
5081 if (gimple_in_ssa_p (cfun))
5082 old_vali = loadedi;
5083 else
5084 {
5085 old_vali = create_tmp_var (itype, NULL);
5086 if (gimple_in_ssa_p (cfun))
5087 add_referenced_var (old_vali);
5088 stmt = gimple_build_assign (old_vali, loadedi);
5089 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5090
5091 stmt = gimple_build_assign (loadedi, new_storedi);
5092 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5093 }
5094
5095 /* Note that we always perform the comparison as an integer, even for
5096 floating point. This allows the atomic operation to properly
5097 succeed even with NaNs and -0.0. */
5098 stmt = gimple_build_cond_empty
5099 (build2 (NE_EXPR, boolean_type_node,
5100 new_storedi, old_vali));
5101 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5102
5103 /* Update cfg. */
5104 e = single_succ_edge (store_bb);
5105 e->flags &= ~EDGE_FALLTHRU;
5106 e->flags |= EDGE_FALSE_VALUE;
5107
5108 e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
5109
5110 /* Copy the new value to loadedi (we already did that before the condition
5111 if we are not in SSA). */
5112 if (gimple_in_ssa_p (cfun))
5113 {
5114 phi = gimple_seq_first_stmt (phi_nodes (loop_header));
5115 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi);
5116 }
5117
5118 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
5119 gsi_remove (&si, true);
5120
5121 if (gimple_in_ssa_p (cfun))
5122 update_ssa (TODO_update_ssa_no_phi);
5123
5124 return true;
5125 }
5126
5127 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5128
5129 GOMP_atomic_start ();
5130 *addr = rhs;
5131 GOMP_atomic_end ();
5132
5133 The result is not globally atomic, but works so long as all parallel
5134 references are within #pragma omp atomic directives. According to
5135 responses received from omp@openmp.org, appears to be within spec.
5136 Which makes sense, since that's how several other compilers handle
5137 this situation as well.
5138 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
5139 expanding. STORED_VAL is the operand of the matching
5140 GIMPLE_OMP_ATOMIC_STORE.
5141
5142 We replace
5143 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
5144 loaded_val = *addr;
5145
5146 and replace
5147 GIMPLE_OMP_ATOMIC_ATORE (stored_val) with
5148 *addr = stored_val;
5149 */
5150
5151 static bool
5152 expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
5153 tree addr, tree loaded_val, tree stored_val)
5154 {
5155 gimple_stmt_iterator si;
5156 gimple stmt;
5157 tree t;
5158
5159 si = gsi_last_bb (load_bb);
5160 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
5161
5162 t = built_in_decls[BUILT_IN_GOMP_ATOMIC_START];
5163 t = build_function_call_expr (t, 0);
5164 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5165
5166 stmt = gimple_build_assign (loaded_val, build_fold_indirect_ref (addr));
5167 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5168 gsi_remove (&si, true);
5169
5170 si = gsi_last_bb (store_bb);
5171 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
5172
5173 stmt = gimple_build_assign (build_fold_indirect_ref (unshare_expr (addr)),
5174 stored_val);
5175 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5176
5177 t = built_in_decls[BUILT_IN_GOMP_ATOMIC_END];
5178 t = build_function_call_expr (t, 0);
5179 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5180 gsi_remove (&si, true);
5181
5182 if (gimple_in_ssa_p (cfun))
5183 update_ssa (TODO_update_ssa_no_phi);
5184 return true;
5185 }
5186
5187 /* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
5188 using expand_omp_atomic_fetch_op. If it failed, we try to
5189 call expand_omp_atomic_pipeline, and if it fails too, the
5190 ultimate fallback is wrapping the operation in a mutex
5191 (expand_omp_atomic_mutex). REGION is the atomic region built
5192 by build_omp_regions_1(). */
5193
5194 static void
5195 expand_omp_atomic (struct omp_region *region)
5196 {
5197 basic_block load_bb = region->entry, store_bb = region->exit;
5198 gimple load = last_stmt (load_bb), store = last_stmt (store_bb);
5199 tree loaded_val = gimple_omp_atomic_load_lhs (load);
5200 tree addr = gimple_omp_atomic_load_rhs (load);
5201 tree stored_val = gimple_omp_atomic_store_val (store);
5202 tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5203 HOST_WIDE_INT index;
5204
5205 /* Make sure the type is one of the supported sizes. */
5206 index = tree_low_cst (TYPE_SIZE_UNIT (type), 1);
5207 index = exact_log2 (index);
5208 if (index >= 0 && index <= 4)
5209 {
5210 unsigned int align = TYPE_ALIGN_UNIT (type);
5211
5212 /* __sync builtins require strict data alignment. */
5213 if (exact_log2 (align) >= index)
5214 {
5215 /* When possible, use specialized atomic update functions. */
5216 if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
5217 && store_bb == single_succ (load_bb))
5218 {
5219 if (expand_omp_atomic_fetch_op (load_bb, addr,
5220 loaded_val, stored_val, index))
5221 return;
5222 }
5223
5224 /* If we don't have specialized __sync builtins, try and implement
5225 as a compare and swap loop. */
5226 if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
5227 loaded_val, stored_val, index))
5228 return;
5229 }
5230 }
5231
5232 /* The ultimate fallback is wrapping the operation in a mutex. */
5233 expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
5234 }
5235
5236
5237 /* Expand the parallel region tree rooted at REGION. Expansion
5238 proceeds in depth-first order. Innermost regions are expanded
5239 first. This way, parallel regions that require a new function to
5240 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
5241 internal dependencies in their body. */
5242
5243 static void
5244 expand_omp (struct omp_region *region)
5245 {
5246 while (region)
5247 {
5248 location_t saved_location;
5249
5250 /* First, determine whether this is a combined parallel+workshare
5251 region. */
5252 if (region->type == GIMPLE_OMP_PARALLEL)
5253 determine_parallel_type (region);
5254
5255 if (region->inner)
5256 expand_omp (region->inner);
5257
5258 saved_location = input_location;
5259 if (gimple_has_location (last_stmt (region->entry)))
5260 input_location = gimple_location (last_stmt (region->entry));
5261
5262 switch (region->type)
5263 {
5264 case GIMPLE_OMP_PARALLEL:
5265 case GIMPLE_OMP_TASK:
5266 expand_omp_taskreg (region);
5267 break;
5268
5269 case GIMPLE_OMP_FOR:
5270 expand_omp_for (region);
5271 break;
5272
5273 case GIMPLE_OMP_SECTIONS:
5274 expand_omp_sections (region);
5275 break;
5276
5277 case GIMPLE_OMP_SECTION:
5278 /* Individual omp sections are handled together with their
5279 parent GIMPLE_OMP_SECTIONS region. */
5280 break;
5281
5282 case GIMPLE_OMP_SINGLE:
5283 expand_omp_single (region);
5284 break;
5285
5286 case GIMPLE_OMP_MASTER:
5287 case GIMPLE_OMP_ORDERED:
5288 case GIMPLE_OMP_CRITICAL:
5289 expand_omp_synch (region);
5290 break;
5291
5292 case GIMPLE_OMP_ATOMIC_LOAD:
5293 expand_omp_atomic (region);
5294 break;
5295
5296 default:
5297 gcc_unreachable ();
5298 }
5299
5300 input_location = saved_location;
5301 region = region->next;
5302 }
5303 }
5304
5305
5306 /* Helper for build_omp_regions. Scan the dominator tree starting at
5307 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
5308 true, the function ends once a single tree is built (otherwise, whole
5309 forest of OMP constructs may be built). */
5310
5311 static void
5312 build_omp_regions_1 (basic_block bb, struct omp_region *parent,
5313 bool single_tree)
5314 {
5315 gimple_stmt_iterator gsi;
5316 gimple stmt;
5317 basic_block son;
5318
5319 gsi = gsi_last_bb (bb);
5320 if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi)))
5321 {
5322 struct omp_region *region;
5323 enum gimple_code code;
5324
5325 stmt = gsi_stmt (gsi);
5326 code = gimple_code (stmt);
5327 if (code == GIMPLE_OMP_RETURN)
5328 {
5329 /* STMT is the return point out of region PARENT. Mark it
5330 as the exit point and make PARENT the immediately
5331 enclosing region. */
5332 gcc_assert (parent);
5333 region = parent;
5334 region->exit = bb;
5335 parent = parent->outer;
5336 }
5337 else if (code == GIMPLE_OMP_ATOMIC_STORE)
5338 {
5339 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
5340 GIMPLE_OMP_RETURN, but matches with
5341 GIMPLE_OMP_ATOMIC_LOAD. */
5342 gcc_assert (parent);
5343 gcc_assert (parent->type == GIMPLE_OMP_ATOMIC_LOAD);
5344 region = parent;
5345 region->exit = bb;
5346 parent = parent->outer;
5347 }
5348
5349 else if (code == GIMPLE_OMP_CONTINUE)
5350 {
5351 gcc_assert (parent);
5352 parent->cont = bb;
5353 }
5354 else if (code == GIMPLE_OMP_SECTIONS_SWITCH)
5355 {
5356 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
5357 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
5358 ;
5359 }
5360 else
5361 {
5362 /* Otherwise, this directive becomes the parent for a new
5363 region. */
5364 region = new_omp_region (bb, code, parent);
5365 parent = region;
5366 }
5367 }
5368
5369 if (single_tree && !parent)
5370 return;
5371
5372 for (son = first_dom_son (CDI_DOMINATORS, bb);
5373 son;
5374 son = next_dom_son (CDI_DOMINATORS, son))
5375 build_omp_regions_1 (son, parent, single_tree);
5376 }
5377
5378 /* Builds the tree of OMP regions rooted at ROOT, storing it to
5379 root_omp_region. */
5380
5381 static void
5382 build_omp_regions_root (basic_block root)
5383 {
5384 gcc_assert (root_omp_region == NULL);
5385 build_omp_regions_1 (root, NULL, true);
5386 gcc_assert (root_omp_region != NULL);
5387 }
5388
5389 /* Expands omp construct (and its subconstructs) starting in HEAD. */
5390
5391 void
5392 omp_expand_local (basic_block head)
5393 {
5394 build_omp_regions_root (head);
5395 if (dump_file && (dump_flags & TDF_DETAILS))
5396 {
5397 fprintf (dump_file, "\nOMP region tree\n\n");
5398 dump_omp_region (dump_file, root_omp_region, 0);
5399 fprintf (dump_file, "\n");
5400 }
5401
5402 remove_exit_barriers (root_omp_region);
5403 expand_omp (root_omp_region);
5404
5405 free_omp_regions ();
5406 }
5407
5408 /* Scan the CFG and build a tree of OMP regions. Return the root of
5409 the OMP region tree. */
5410
5411 static void
5412 build_omp_regions (void)
5413 {
5414 gcc_assert (root_omp_region == NULL);
5415 calculate_dominance_info (CDI_DOMINATORS);
5416 build_omp_regions_1 (ENTRY_BLOCK_PTR, NULL, false);
5417 }
5418
5419 /* Main entry point for expanding OMP-GIMPLE into runtime calls. */
5420
5421 static unsigned int
5422 execute_expand_omp (void)
5423 {
5424 build_omp_regions ();
5425
5426 if (!root_omp_region)
5427 return 0;
5428
5429 if (dump_file)
5430 {
5431 fprintf (dump_file, "\nOMP region tree\n\n");
5432 dump_omp_region (dump_file, root_omp_region, 0);
5433 fprintf (dump_file, "\n");
5434 }
5435
5436 remove_exit_barriers (root_omp_region);
5437
5438 expand_omp (root_omp_region);
5439
5440 cleanup_tree_cfg ();
5441
5442 free_omp_regions ();
5443
5444 return 0;
5445 }
5446
5447 /* OMP expansion -- the default pass, run before creation of SSA form. */
5448
5449 static bool
5450 gate_expand_omp (void)
5451 {
5452 return (flag_openmp != 0 && errorcount == 0);
5453 }
5454
5455 struct gimple_opt_pass pass_expand_omp =
5456 {
5457 {
5458 GIMPLE_PASS,
5459 "ompexp", /* name */
5460 gate_expand_omp, /* gate */
5461 execute_expand_omp, /* execute */
5462 NULL, /* sub */
5463 NULL, /* next */
5464 0, /* static_pass_number */
5465 0, /* tv_id */
5466 PROP_gimple_any, /* properties_required */
5467 0, /* properties_provided */
5468 0, /* properties_destroyed */
5469 0, /* todo_flags_start */
5470 TODO_dump_func /* todo_flags_finish */
5471 }
5472 };
5473 \f
5474 /* Routines to lower OpenMP directives into OMP-GIMPLE. */
5475
5476 /* Lower the OpenMP sections directive in the current statement in GSI_P.
5477 CTX is the enclosing OMP context for the current statement. */
5478
5479 static void
5480 lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5481 {
5482 tree block, control;
5483 gimple_stmt_iterator tgsi;
5484 unsigned i, len;
5485 gimple stmt, new_stmt, bind, t;
5486 gimple_seq ilist, dlist, olist, new_body, body;
5487 struct gimplify_ctx gctx;
5488
5489 stmt = gsi_stmt (*gsi_p);
5490
5491 push_gimplify_context (&gctx);
5492
5493 dlist = NULL;
5494 ilist = NULL;
5495 lower_rec_input_clauses (gimple_omp_sections_clauses (stmt),
5496 &ilist, &dlist, ctx);
5497
5498 tgsi = gsi_start (gimple_omp_body (stmt));
5499 for (len = 0; !gsi_end_p (tgsi); len++, gsi_next (&tgsi))
5500 continue;
5501
5502 tgsi = gsi_start (gimple_omp_body (stmt));
5503 body = NULL;
5504 for (i = 0; i < len; i++, gsi_next (&tgsi))
5505 {
5506 omp_context *sctx;
5507 gimple sec_start;
5508
5509 sec_start = gsi_stmt (tgsi);
5510 sctx = maybe_lookup_ctx (sec_start);
5511 gcc_assert (sctx);
5512
5513 gimple_seq_add_stmt (&body, sec_start);
5514
5515 lower_omp (gimple_omp_body (sec_start), sctx);
5516 gimple_seq_add_seq (&body, gimple_omp_body (sec_start));
5517 gimple_omp_set_body (sec_start, NULL);
5518
5519 if (i == len - 1)
5520 {
5521 gimple_seq l = NULL;
5522 lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt), NULL,
5523 &l, ctx);
5524 gimple_seq_add_seq (&body, l);
5525 gimple_omp_section_set_last (sec_start);
5526 }
5527
5528 gimple_seq_add_stmt (&body, gimple_build_omp_return (false));
5529 }
5530
5531 block = make_node (BLOCK);
5532 bind = gimple_build_bind (NULL, body, block);
5533
5534 olist = NULL;
5535 lower_reduction_clauses (gimple_omp_sections_clauses (stmt), &olist, ctx);
5536
5537 block = make_node (BLOCK);
5538 new_stmt = gimple_build_bind (NULL, NULL, block);
5539
5540 pop_gimplify_context (new_stmt);
5541 gimple_bind_append_vars (new_stmt, ctx->block_vars);
5542 BLOCK_VARS (block) = gimple_bind_vars (bind);
5543 if (BLOCK_VARS (block))
5544 TREE_USED (block) = 1;
5545
5546 new_body = NULL;
5547 gimple_seq_add_seq (&new_body, ilist);
5548 gimple_seq_add_stmt (&new_body, stmt);
5549 gimple_seq_add_stmt (&new_body, gimple_build_omp_sections_switch ());
5550 gimple_seq_add_stmt (&new_body, bind);
5551
5552 control = create_tmp_var (unsigned_type_node, ".section");
5553 t = gimple_build_omp_continue (control, control);
5554 gimple_omp_sections_set_control (stmt, control);
5555 gimple_seq_add_stmt (&new_body, t);
5556
5557 gimple_seq_add_seq (&new_body, olist);
5558 gimple_seq_add_seq (&new_body, dlist);
5559
5560 new_body = maybe_catch_exception (new_body);
5561
5562 t = gimple_build_omp_return
5563 (!!find_omp_clause (gimple_omp_sections_clauses (stmt),
5564 OMP_CLAUSE_NOWAIT));
5565 gimple_seq_add_stmt (&new_body, t);
5566
5567 gimple_bind_set_body (new_stmt, new_body);
5568 gimple_omp_set_body (stmt, NULL);
5569
5570 gsi_replace (gsi_p, new_stmt, true);
5571 }
5572
5573
5574 /* A subroutine of lower_omp_single. Expand the simple form of
5575 a GIMPLE_OMP_SINGLE, without a copyprivate clause:
5576
5577 if (GOMP_single_start ())
5578 BODY;
5579 [ GOMP_barrier (); ] -> unless 'nowait' is present.
5580
5581 FIXME. It may be better to delay expanding the logic of this until
5582 pass_expand_omp. The expanded logic may make the job more difficult
5583 to a synchronization analysis pass. */
5584
5585 static void
5586 lower_omp_single_simple (gimple single_stmt, gimple_seq *pre_p)
5587 {
5588 tree tlabel = create_artificial_label ();
5589 tree flabel = create_artificial_label ();
5590 gimple call, cond;
5591 tree lhs, decl;
5592
5593 decl = built_in_decls[BUILT_IN_GOMP_SINGLE_START];
5594 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (decl)), NULL);
5595 call = gimple_build_call (decl, 0);
5596 gimple_call_set_lhs (call, lhs);
5597 gimple_seq_add_stmt (pre_p, call);
5598
5599 cond = gimple_build_cond (EQ_EXPR, lhs,
5600 fold_convert (TREE_TYPE (lhs), boolean_true_node),
5601 tlabel, flabel);
5602 gimple_seq_add_stmt (pre_p, cond);
5603 gimple_seq_add_stmt (pre_p, gimple_build_label (tlabel));
5604 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5605 gimple_seq_add_stmt (pre_p, gimple_build_label (flabel));
5606 }
5607
5608
5609 /* A subroutine of lower_omp_single. Expand the simple form of
5610 a GIMPLE_OMP_SINGLE, with a copyprivate clause:
5611
5612 #pragma omp single copyprivate (a, b, c)
5613
5614 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
5615
5616 {
5617 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
5618 {
5619 BODY;
5620 copyout.a = a;
5621 copyout.b = b;
5622 copyout.c = c;
5623 GOMP_single_copy_end (&copyout);
5624 }
5625 else
5626 {
5627 a = copyout_p->a;
5628 b = copyout_p->b;
5629 c = copyout_p->c;
5630 }
5631 GOMP_barrier ();
5632 }
5633
5634 FIXME. It may be better to delay expanding the logic of this until
5635 pass_expand_omp. The expanded logic may make the job more difficult
5636 to a synchronization analysis pass. */
5637
5638 static void
5639 lower_omp_single_copy (gimple single_stmt, gimple_seq *pre_p, omp_context *ctx)
5640 {
5641 tree ptr_type, t, l0, l1, l2;
5642 gimple_seq copyin_seq;
5643
5644 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
5645
5646 ptr_type = build_pointer_type (ctx->record_type);
5647 ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
5648
5649 l0 = create_artificial_label ();
5650 l1 = create_artificial_label ();
5651 l2 = create_artificial_label ();
5652
5653 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_START], 0);
5654 t = fold_convert (ptr_type, t);
5655 gimplify_assign (ctx->receiver_decl, t, pre_p);
5656
5657 t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
5658 build_int_cst (ptr_type, 0));
5659 t = build3 (COND_EXPR, void_type_node, t,
5660 build_and_jump (&l0), build_and_jump (&l1));
5661 gimplify_and_add (t, pre_p);
5662
5663 gimple_seq_add_stmt (pre_p, gimple_build_label (l0));
5664
5665 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5666
5667 copyin_seq = NULL;
5668 lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt), pre_p,
5669 &copyin_seq, ctx);
5670
5671 t = build_fold_addr_expr (ctx->sender_decl);
5672 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_END], 1, t);
5673 gimplify_and_add (t, pre_p);
5674
5675 t = build_and_jump (&l2);
5676 gimplify_and_add (t, pre_p);
5677
5678 gimple_seq_add_stmt (pre_p, gimple_build_label (l1));
5679
5680 gimple_seq_add_seq (pre_p, copyin_seq);
5681
5682 gimple_seq_add_stmt (pre_p, gimple_build_label (l2));
5683 }
5684
5685
5686 /* Expand code for an OpenMP single directive. */
5687
5688 static void
5689 lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5690 {
5691 tree block;
5692 gimple t, bind, single_stmt = gsi_stmt (*gsi_p);
5693 gimple_seq bind_body, dlist;
5694 struct gimplify_ctx gctx;
5695
5696 push_gimplify_context (&gctx);
5697
5698 bind_body = NULL;
5699 lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt),
5700 &bind_body, &dlist, ctx);
5701 lower_omp (gimple_omp_body (single_stmt), ctx);
5702
5703 gimple_seq_add_stmt (&bind_body, single_stmt);
5704
5705 if (ctx->record_type)
5706 lower_omp_single_copy (single_stmt, &bind_body, ctx);
5707 else
5708 lower_omp_single_simple (single_stmt, &bind_body);
5709
5710 gimple_omp_set_body (single_stmt, NULL);
5711
5712 gimple_seq_add_seq (&bind_body, dlist);
5713
5714 bind_body = maybe_catch_exception (bind_body);
5715
5716 t = gimple_build_omp_return
5717 (!!find_omp_clause (gimple_omp_single_clauses (single_stmt),
5718 OMP_CLAUSE_NOWAIT));
5719 gimple_seq_add_stmt (&bind_body, t);
5720
5721 block = make_node (BLOCK);
5722 bind = gimple_build_bind (NULL, bind_body, block);
5723
5724 pop_gimplify_context (bind);
5725
5726 gimple_bind_append_vars (bind, ctx->block_vars);
5727 BLOCK_VARS (block) = ctx->block_vars;
5728 gsi_replace (gsi_p, bind, true);
5729 if (BLOCK_VARS (block))
5730 TREE_USED (block) = 1;
5731 }
5732
5733
5734 /* Expand code for an OpenMP master directive. */
5735
5736 static void
5737 lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5738 {
5739 tree block, lab = NULL, x;
5740 gimple stmt = gsi_stmt (*gsi_p), bind;
5741 gimple_seq tseq;
5742 struct gimplify_ctx gctx;
5743
5744 push_gimplify_context (&gctx);
5745
5746 block = make_node (BLOCK);
5747 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
5748 block);
5749
5750 x = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
5751 x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
5752 x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
5753 tseq = NULL;
5754 gimplify_and_add (x, &tseq);
5755 gimple_bind_add_seq (bind, tseq);
5756
5757 lower_omp (gimple_omp_body (stmt), ctx);
5758 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5759 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5760 gimple_omp_set_body (stmt, NULL);
5761
5762 gimple_bind_add_stmt (bind, gimple_build_label (lab));
5763
5764 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
5765
5766 pop_gimplify_context (bind);
5767
5768 gimple_bind_append_vars (bind, ctx->block_vars);
5769 BLOCK_VARS (block) = ctx->block_vars;
5770 gsi_replace (gsi_p, bind, true);
5771 }
5772
5773
5774 /* Expand code for an OpenMP ordered directive. */
5775
5776 static void
5777 lower_omp_ordered (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5778 {
5779 tree block;
5780 gimple stmt = gsi_stmt (*gsi_p), bind, x;
5781 struct gimplify_ctx gctx;
5782
5783 push_gimplify_context (&gctx);
5784
5785 block = make_node (BLOCK);
5786 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
5787 block);
5788
5789 x = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ORDERED_START], 0);
5790 gimple_bind_add_stmt (bind, x);
5791
5792 lower_omp (gimple_omp_body (stmt), ctx);
5793 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5794 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5795 gimple_omp_set_body (stmt, NULL);
5796
5797 x = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ORDERED_END], 0);
5798 gimple_bind_add_stmt (bind, x);
5799
5800 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
5801
5802 pop_gimplify_context (bind);
5803
5804 gimple_bind_append_vars (bind, ctx->block_vars);
5805 BLOCK_VARS (block) = gimple_bind_vars (bind);
5806 gsi_replace (gsi_p, bind, true);
5807 }
5808
5809
5810 /* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
5811 substitution of a couple of function calls. But in the NAMED case,
5812 requires that languages coordinate a symbol name. It is therefore
5813 best put here in common code. */
5814
5815 static GTY((param1_is (tree), param2_is (tree)))
5816 splay_tree critical_name_mutexes;
5817
5818 static void
5819 lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5820 {
5821 tree block;
5822 tree name, lock, unlock;
5823 gimple stmt = gsi_stmt (*gsi_p), bind;
5824 gimple_seq tbody;
5825 struct gimplify_ctx gctx;
5826
5827 name = gimple_omp_critical_name (stmt);
5828 if (name)
5829 {
5830 tree decl;
5831 splay_tree_node n;
5832
5833 if (!critical_name_mutexes)
5834 critical_name_mutexes
5835 = splay_tree_new_ggc (splay_tree_compare_pointers);
5836
5837 n = splay_tree_lookup (critical_name_mutexes, (splay_tree_key) name);
5838 if (n == NULL)
5839 {
5840 char *new_str;
5841
5842 decl = create_tmp_var_raw (ptr_type_node, NULL);
5843
5844 new_str = ACONCAT ((".gomp_critical_user_",
5845 IDENTIFIER_POINTER (name), NULL));
5846 DECL_NAME (decl) = get_identifier (new_str);
5847 TREE_PUBLIC (decl) = 1;
5848 TREE_STATIC (decl) = 1;
5849 DECL_COMMON (decl) = 1;
5850 DECL_ARTIFICIAL (decl) = 1;
5851 DECL_IGNORED_P (decl) = 1;
5852 varpool_finalize_decl (decl);
5853
5854 splay_tree_insert (critical_name_mutexes, (splay_tree_key) name,
5855 (splay_tree_value) decl);
5856 }
5857 else
5858 decl = (tree) n->value;
5859
5860 lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_START];
5861 lock = build_call_expr (lock, 1, build_fold_addr_expr (decl));
5862
5863 unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_END];
5864 unlock = build_call_expr (unlock, 1, build_fold_addr_expr (decl));
5865 }
5866 else
5867 {
5868 lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_START];
5869 lock = build_call_expr (lock, 0);
5870
5871 unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_END];
5872 unlock = build_call_expr (unlock, 0);
5873 }
5874
5875 push_gimplify_context (&gctx);
5876
5877 block = make_node (BLOCK);
5878 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt), block);
5879
5880 tbody = gimple_bind_body (bind);
5881 gimplify_and_add (lock, &tbody);
5882 gimple_bind_set_body (bind, tbody);
5883
5884 lower_omp (gimple_omp_body (stmt), ctx);
5885 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5886 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5887 gimple_omp_set_body (stmt, NULL);
5888
5889 tbody = gimple_bind_body (bind);
5890 gimplify_and_add (unlock, &tbody);
5891 gimple_bind_set_body (bind, tbody);
5892
5893 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
5894
5895 pop_gimplify_context (bind);
5896 gimple_bind_append_vars (bind, ctx->block_vars);
5897 BLOCK_VARS (block) = gimple_bind_vars (bind);
5898 gsi_replace (gsi_p, bind, true);
5899 }
5900
5901
5902 /* A subroutine of lower_omp_for. Generate code to emit the predicate
5903 for a lastprivate clause. Given a loop control predicate of (V
5904 cond N2), we gate the clause on (!(V cond N2)). The lowered form
5905 is appended to *DLIST, iterator initialization is appended to
5906 *BODY_P. */
5907
5908 static void
5909 lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p,
5910 gimple_seq *dlist, struct omp_context *ctx)
5911 {
5912 tree clauses, cond, vinit;
5913 enum tree_code cond_code;
5914 gimple_seq stmts;
5915
5916 cond_code = fd->loop.cond_code;
5917 cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
5918
5919 /* When possible, use a strict equality expression. This can let VRP
5920 type optimizations deduce the value and remove a copy. */
5921 if (host_integerp (fd->loop.step, 0))
5922 {
5923 HOST_WIDE_INT step = TREE_INT_CST_LOW (fd->loop.step);
5924 if (step == 1 || step == -1)
5925 cond_code = EQ_EXPR;
5926 }
5927
5928 cond = build2 (cond_code, boolean_type_node, fd->loop.v, fd->loop.n2);
5929
5930 clauses = gimple_omp_for_clauses (fd->for_stmt);
5931 stmts = NULL;
5932 lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
5933 if (!gimple_seq_empty_p (stmts))
5934 {
5935 gimple_seq_add_seq (&stmts, *dlist);
5936 *dlist = stmts;
5937
5938 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
5939 vinit = fd->loop.n1;
5940 if (cond_code == EQ_EXPR
5941 && host_integerp (fd->loop.n2, 0)
5942 && ! integer_zerop (fd->loop.n2))
5943 vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0);
5944
5945 /* Initialize the iterator variable, so that threads that don't execute
5946 any iterations don't execute the lastprivate clauses by accident. */
5947 gimplify_assign (fd->loop.v, vinit, body_p);
5948 }
5949 }
5950
5951
5952 /* Lower code for an OpenMP loop directive. */
5953
5954 static void
5955 lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5956 {
5957 tree *rhs_p, block;
5958 struct omp_for_data fd;
5959 gimple stmt = gsi_stmt (*gsi_p), new_stmt;
5960 gimple_seq omp_for_body, body, dlist, ilist;
5961 size_t i;
5962 struct gimplify_ctx gctx;
5963
5964 push_gimplify_context (&gctx);
5965
5966 lower_omp (gimple_omp_for_pre_body (stmt), ctx);
5967 lower_omp (gimple_omp_body (stmt), ctx);
5968
5969 block = make_node (BLOCK);
5970 new_stmt = gimple_build_bind (NULL, NULL, block);
5971
5972 /* Move declaration of temporaries in the loop body before we make
5973 it go away. */
5974 omp_for_body = gimple_omp_body (stmt);
5975 if (!gimple_seq_empty_p (omp_for_body)
5976 && gimple_code (gimple_seq_first_stmt (omp_for_body)) == GIMPLE_BIND)
5977 {
5978 tree vars = gimple_bind_vars (gimple_seq_first_stmt (omp_for_body));
5979 gimple_bind_append_vars (new_stmt, vars);
5980 }
5981
5982 /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
5983 ilist = NULL;
5984 dlist = NULL;
5985 body = NULL;
5986 lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx);
5987 gimple_seq_add_seq (&body, gimple_omp_for_pre_body (stmt));
5988
5989 /* Lower the header expressions. At this point, we can assume that
5990 the header is of the form:
5991
5992 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
5993
5994 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
5995 using the .omp_data_s mapping, if needed. */
5996 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
5997 {
5998 rhs_p = gimple_omp_for_initial_ptr (stmt, i);
5999 if (!is_gimple_min_invariant (*rhs_p))
6000 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6001
6002 rhs_p = gimple_omp_for_final_ptr (stmt, i);
6003 if (!is_gimple_min_invariant (*rhs_p))
6004 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6005
6006 rhs_p = &TREE_OPERAND (gimple_omp_for_incr (stmt, i), 1);
6007 if (!is_gimple_min_invariant (*rhs_p))
6008 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6009 }
6010
6011 /* Once lowered, extract the bounds and clauses. */
6012 extract_omp_for_data (stmt, &fd, NULL);
6013
6014 lower_omp_for_lastprivate (&fd, &body, &dlist, ctx);
6015
6016 gimple_seq_add_stmt (&body, stmt);
6017 gimple_seq_add_seq (&body, gimple_omp_body (stmt));
6018
6019 gimple_seq_add_stmt (&body, gimple_build_omp_continue (fd.loop.v,
6020 fd.loop.v));
6021
6022 /* After the loop, add exit clauses. */
6023 lower_reduction_clauses (gimple_omp_for_clauses (stmt), &body, ctx);
6024 gimple_seq_add_seq (&body, dlist);
6025
6026 body = maybe_catch_exception (body);
6027
6028 /* Region exit marker goes at the end of the loop body. */
6029 gimple_seq_add_stmt (&body, gimple_build_omp_return (fd.have_nowait));
6030
6031 pop_gimplify_context (new_stmt);
6032
6033 gimple_bind_append_vars (new_stmt, ctx->block_vars);
6034 BLOCK_VARS (block) = gimple_bind_vars (new_stmt);
6035 if (BLOCK_VARS (block))
6036 TREE_USED (block) = 1;
6037
6038 gimple_bind_set_body (new_stmt, body);
6039 gimple_omp_set_body (stmt, NULL);
6040 gimple_omp_for_set_pre_body (stmt, NULL);
6041 gsi_replace (gsi_p, new_stmt, true);
6042 }
6043
6044 /* Callback for walk_stmts. Check if the current statement only contains
6045 GIMPLE_OMP_FOR or GIMPLE_OMP_PARALLEL. */
6046
6047 static tree
6048 check_combined_parallel (gimple_stmt_iterator *gsi_p,
6049 bool *handled_ops_p,
6050 struct walk_stmt_info *wi)
6051 {
6052 int *info = (int *) wi->info;
6053 gimple stmt = gsi_stmt (*gsi_p);
6054
6055 *handled_ops_p = true;
6056 switch (gimple_code (stmt))
6057 {
6058 WALK_SUBSTMTS;
6059
6060 case GIMPLE_OMP_FOR:
6061 case GIMPLE_OMP_SECTIONS:
6062 *info = *info == 0 ? 1 : -1;
6063 break;
6064 default:
6065 *info = -1;
6066 break;
6067 }
6068 return NULL;
6069 }
6070
6071 struct omp_taskcopy_context
6072 {
6073 /* This field must be at the beginning, as we do "inheritance": Some
6074 callback functions for tree-inline.c (e.g., omp_copy_decl)
6075 receive a copy_body_data pointer that is up-casted to an
6076 omp_context pointer. */
6077 copy_body_data cb;
6078 omp_context *ctx;
6079 };
6080
6081 static tree
6082 task_copyfn_copy_decl (tree var, copy_body_data *cb)
6083 {
6084 struct omp_taskcopy_context *tcctx = (struct omp_taskcopy_context *) cb;
6085
6086 if (splay_tree_lookup (tcctx->ctx->sfield_map, (splay_tree_key) var))
6087 return create_tmp_var (TREE_TYPE (var), NULL);
6088
6089 return var;
6090 }
6091
6092 static tree
6093 task_copyfn_remap_type (struct omp_taskcopy_context *tcctx, tree orig_type)
6094 {
6095 tree name, new_fields = NULL, type, f;
6096
6097 type = lang_hooks.types.make_type (RECORD_TYPE);
6098 name = DECL_NAME (TYPE_NAME (orig_type));
6099 name = build_decl (TYPE_DECL, name, type);
6100 TYPE_NAME (type) = name;
6101
6102 for (f = TYPE_FIELDS (orig_type); f ; f = TREE_CHAIN (f))
6103 {
6104 tree new_f = copy_node (f);
6105 DECL_CONTEXT (new_f) = type;
6106 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &tcctx->cb);
6107 TREE_CHAIN (new_f) = new_fields;
6108 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6109 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6110 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
6111 &tcctx->cb, NULL);
6112 new_fields = new_f;
6113 *pointer_map_insert (tcctx->cb.decl_map, f) = new_f;
6114 }
6115 TYPE_FIELDS (type) = nreverse (new_fields);
6116 layout_type (type);
6117 return type;
6118 }
6119
6120 /* Create task copyfn. */
6121
6122 static void
6123 create_task_copyfn (gimple task_stmt, omp_context *ctx)
6124 {
6125 struct function *child_cfun;
6126 tree child_fn, t, c, src, dst, f, sf, arg, sarg, decl;
6127 tree record_type, srecord_type, bind, list;
6128 bool record_needs_remap = false, srecord_needs_remap = false;
6129 splay_tree_node n;
6130 struct omp_taskcopy_context tcctx;
6131 struct gimplify_ctx gctx;
6132
6133 child_fn = gimple_omp_task_copy_fn (task_stmt);
6134 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
6135 gcc_assert (child_cfun->cfg == NULL);
6136 child_cfun->dont_save_pending_sizes_p = 1;
6137 DECL_SAVED_TREE (child_fn) = alloc_stmt_list ();
6138
6139 /* Reset DECL_CONTEXT on function arguments. */
6140 for (t = DECL_ARGUMENTS (child_fn); t; t = TREE_CHAIN (t))
6141 DECL_CONTEXT (t) = child_fn;
6142
6143 /* Populate the function. */
6144 push_gimplify_context (&gctx);
6145 current_function_decl = child_fn;
6146
6147 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
6148 TREE_SIDE_EFFECTS (bind) = 1;
6149 list = NULL;
6150 DECL_SAVED_TREE (child_fn) = bind;
6151 DECL_SOURCE_LOCATION (child_fn) = gimple_location (task_stmt);
6152
6153 /* Remap src and dst argument types if needed. */
6154 record_type = ctx->record_type;
6155 srecord_type = ctx->srecord_type;
6156 for (f = TYPE_FIELDS (record_type); f ; f = TREE_CHAIN (f))
6157 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6158 {
6159 record_needs_remap = true;
6160 break;
6161 }
6162 for (f = TYPE_FIELDS (srecord_type); f ; f = TREE_CHAIN (f))
6163 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6164 {
6165 srecord_needs_remap = true;
6166 break;
6167 }
6168
6169 if (record_needs_remap || srecord_needs_remap)
6170 {
6171 memset (&tcctx, '\0', sizeof (tcctx));
6172 tcctx.cb.src_fn = ctx->cb.src_fn;
6173 tcctx.cb.dst_fn = child_fn;
6174 tcctx.cb.src_node = cgraph_node (tcctx.cb.src_fn);
6175 tcctx.cb.dst_node = tcctx.cb.src_node;
6176 tcctx.cb.src_cfun = ctx->cb.src_cfun;
6177 tcctx.cb.copy_decl = task_copyfn_copy_decl;
6178 tcctx.cb.eh_region = -1;
6179 tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE;
6180 tcctx.cb.decl_map = pointer_map_create ();
6181 tcctx.ctx = ctx;
6182
6183 if (record_needs_remap)
6184 record_type = task_copyfn_remap_type (&tcctx, record_type);
6185 if (srecord_needs_remap)
6186 srecord_type = task_copyfn_remap_type (&tcctx, srecord_type);
6187 }
6188 else
6189 tcctx.cb.decl_map = NULL;
6190
6191 push_cfun (child_cfun);
6192
6193 arg = DECL_ARGUMENTS (child_fn);
6194 TREE_TYPE (arg) = build_pointer_type (record_type);
6195 sarg = TREE_CHAIN (arg);
6196 TREE_TYPE (sarg) = build_pointer_type (srecord_type);
6197
6198 /* First pass: initialize temporaries used in record_type and srecord_type
6199 sizes and field offsets. */
6200 if (tcctx.cb.decl_map)
6201 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6202 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6203 {
6204 tree *p;
6205
6206 decl = OMP_CLAUSE_DECL (c);
6207 p = (tree *) pointer_map_contains (tcctx.cb.decl_map, decl);
6208 if (p == NULL)
6209 continue;
6210 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6211 sf = (tree) n->value;
6212 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6213 src = build_fold_indirect_ref (sarg);
6214 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6215 t = build2 (MODIFY_EXPR, TREE_TYPE (*p), *p, src);
6216 append_to_statement_list (t, &list);
6217 }
6218
6219 /* Second pass: copy shared var pointers and copy construct non-VLA
6220 firstprivate vars. */
6221 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6222 switch (OMP_CLAUSE_CODE (c))
6223 {
6224 case OMP_CLAUSE_SHARED:
6225 decl = OMP_CLAUSE_DECL (c);
6226 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6227 if (n == NULL)
6228 break;
6229 f = (tree) n->value;
6230 if (tcctx.cb.decl_map)
6231 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6232 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6233 sf = (tree) n->value;
6234 if (tcctx.cb.decl_map)
6235 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6236 src = build_fold_indirect_ref (sarg);
6237 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6238 dst = build_fold_indirect_ref (arg);
6239 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6240 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
6241 append_to_statement_list (t, &list);
6242 break;
6243 case OMP_CLAUSE_FIRSTPRIVATE:
6244 decl = OMP_CLAUSE_DECL (c);
6245 if (is_variable_sized (decl))
6246 break;
6247 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6248 if (n == NULL)
6249 break;
6250 f = (tree) n->value;
6251 if (tcctx.cb.decl_map)
6252 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6253 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6254 if (n != NULL)
6255 {
6256 sf = (tree) n->value;
6257 if (tcctx.cb.decl_map)
6258 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6259 src = build_fold_indirect_ref (sarg);
6260 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6261 if (use_pointer_for_field (decl, NULL) || is_reference (decl))
6262 src = build_fold_indirect_ref (src);
6263 }
6264 else
6265 src = decl;
6266 dst = build_fold_indirect_ref (arg);
6267 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6268 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6269 append_to_statement_list (t, &list);
6270 break;
6271 case OMP_CLAUSE_PRIVATE:
6272 if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c))
6273 break;
6274 decl = OMP_CLAUSE_DECL (c);
6275 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6276 f = (tree) n->value;
6277 if (tcctx.cb.decl_map)
6278 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6279 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6280 if (n != NULL)
6281 {
6282 sf = (tree) n->value;
6283 if (tcctx.cb.decl_map)
6284 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6285 src = build_fold_indirect_ref (sarg);
6286 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6287 if (use_pointer_for_field (decl, NULL))
6288 src = build_fold_indirect_ref (src);
6289 }
6290 else
6291 src = decl;
6292 dst = build_fold_indirect_ref (arg);
6293 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6294 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
6295 append_to_statement_list (t, &list);
6296 break;
6297 default:
6298 break;
6299 }
6300
6301 /* Last pass: handle VLA firstprivates. */
6302 if (tcctx.cb.decl_map)
6303 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6304 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6305 {
6306 tree ind, ptr, df;
6307
6308 decl = OMP_CLAUSE_DECL (c);
6309 if (!is_variable_sized (decl))
6310 continue;
6311 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6312 if (n == NULL)
6313 continue;
6314 f = (tree) n->value;
6315 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6316 gcc_assert (DECL_HAS_VALUE_EXPR_P (decl));
6317 ind = DECL_VALUE_EXPR (decl);
6318 gcc_assert (TREE_CODE (ind) == INDIRECT_REF);
6319 gcc_assert (DECL_P (TREE_OPERAND (ind, 0)));
6320 n = splay_tree_lookup (ctx->sfield_map,
6321 (splay_tree_key) TREE_OPERAND (ind, 0));
6322 sf = (tree) n->value;
6323 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6324 src = build_fold_indirect_ref (sarg);
6325 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6326 src = build_fold_indirect_ref (src);
6327 dst = build_fold_indirect_ref (arg);
6328 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6329 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6330 append_to_statement_list (t, &list);
6331 n = splay_tree_lookup (ctx->field_map,
6332 (splay_tree_key) TREE_OPERAND (ind, 0));
6333 df = (tree) n->value;
6334 df = *(tree *) pointer_map_contains (tcctx.cb.decl_map, df);
6335 ptr = build_fold_indirect_ref (arg);
6336 ptr = build3 (COMPONENT_REF, TREE_TYPE (df), ptr, df, NULL);
6337 t = build2 (MODIFY_EXPR, TREE_TYPE (ptr), ptr,
6338 build_fold_addr_expr (dst));
6339 append_to_statement_list (t, &list);
6340 }
6341
6342 t = build1 (RETURN_EXPR, void_type_node, NULL);
6343 append_to_statement_list (t, &list);
6344
6345 if (tcctx.cb.decl_map)
6346 pointer_map_destroy (tcctx.cb.decl_map);
6347 pop_gimplify_context (NULL);
6348 BIND_EXPR_BODY (bind) = list;
6349 pop_cfun ();
6350 current_function_decl = ctx->cb.src_fn;
6351 }
6352
6353 /* Lower the OpenMP parallel or task directive in the current statement
6354 in GSI_P. CTX holds context information for the directive. */
6355
6356 static void
6357 lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6358 {
6359 tree clauses;
6360 tree child_fn, t;
6361 gimple stmt = gsi_stmt (*gsi_p);
6362 gimple par_bind, bind;
6363 gimple_seq par_body, olist, ilist, par_olist, par_ilist, new_body;
6364 struct gimplify_ctx gctx;
6365
6366 clauses = gimple_omp_taskreg_clauses (stmt);
6367 par_bind = gimple_seq_first_stmt (gimple_omp_body (stmt));
6368 par_body = gimple_bind_body (par_bind);
6369 child_fn = ctx->cb.dst_fn;
6370 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
6371 && !gimple_omp_parallel_combined_p (stmt))
6372 {
6373 struct walk_stmt_info wi;
6374 int ws_num = 0;
6375
6376 memset (&wi, 0, sizeof (wi));
6377 wi.info = &ws_num;
6378 wi.val_only = true;
6379 walk_gimple_seq (par_body, check_combined_parallel, NULL, &wi);
6380 if (ws_num == 1)
6381 gimple_omp_parallel_set_combined_p (stmt, true);
6382 }
6383 if (ctx->srecord_type)
6384 create_task_copyfn (stmt, ctx);
6385
6386 push_gimplify_context (&gctx);
6387
6388 par_olist = NULL;
6389 par_ilist = NULL;
6390 lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx);
6391 lower_omp (par_body, ctx);
6392 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL)
6393 lower_reduction_clauses (clauses, &par_olist, ctx);
6394
6395 /* Declare all the variables created by mapping and the variables
6396 declared in the scope of the parallel body. */
6397 record_vars_into (ctx->block_vars, child_fn);
6398 record_vars_into (gimple_bind_vars (par_bind), child_fn);
6399
6400 if (ctx->record_type)
6401 {
6402 ctx->sender_decl
6403 = create_tmp_var (ctx->srecord_type ? ctx->srecord_type
6404 : ctx->record_type, ".omp_data_o");
6405 TREE_ADDRESSABLE (ctx->sender_decl) = 1;
6406 gimple_omp_taskreg_set_data_arg (stmt, ctx->sender_decl);
6407 }
6408
6409 olist = NULL;
6410 ilist = NULL;
6411 lower_send_clauses (clauses, &ilist, &olist, ctx);
6412 lower_send_shared_vars (&ilist, &olist, ctx);
6413
6414 /* Once all the expansions are done, sequence all the different
6415 fragments inside gimple_omp_body. */
6416
6417 new_body = NULL;
6418
6419 if (ctx->record_type)
6420 {
6421 t = build_fold_addr_expr (ctx->sender_decl);
6422 /* fixup_child_record_type might have changed receiver_decl's type. */
6423 t = fold_convert (TREE_TYPE (ctx->receiver_decl), t);
6424 gimple_seq_add_stmt (&new_body,
6425 gimple_build_assign (ctx->receiver_decl, t));
6426 }
6427
6428 gimple_seq_add_seq (&new_body, par_ilist);
6429 gimple_seq_add_seq (&new_body, par_body);
6430 gimple_seq_add_seq (&new_body, par_olist);
6431 new_body = maybe_catch_exception (new_body);
6432 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
6433 gimple_omp_set_body (stmt, new_body);
6434
6435 bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
6436 gimple_bind_add_stmt (bind, stmt);
6437 if (ilist || olist)
6438 {
6439 gimple_seq_add_stmt (&ilist, bind);
6440 gimple_seq_add_seq (&ilist, olist);
6441 bind = gimple_build_bind (NULL, ilist, NULL);
6442 }
6443
6444 gsi_replace (gsi_p, bind, true);
6445
6446 pop_gimplify_context (NULL);
6447 }
6448
6449 /* Callback for lower_omp_1. Return non-NULL if *tp needs to be
6450 regimplified. If DATA is non-NULL, lower_omp_1 is outside
6451 of OpenMP context, but with task_shared_vars set. */
6452
6453 static tree
6454 lower_omp_regimplify_p (tree *tp, int *walk_subtrees,
6455 void *data)
6456 {
6457 tree t = *tp;
6458
6459 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
6460 if (TREE_CODE (t) == VAR_DECL && data == NULL && DECL_HAS_VALUE_EXPR_P (t))
6461 return t;
6462
6463 if (task_shared_vars
6464 && DECL_P (t)
6465 && bitmap_bit_p (task_shared_vars, DECL_UID (t)))
6466 return t;
6467
6468 /* If a global variable has been privatized, TREE_CONSTANT on
6469 ADDR_EXPR might be wrong. */
6470 if (data == NULL && TREE_CODE (t) == ADDR_EXPR)
6471 recompute_tree_invariant_for_addr_expr (t);
6472
6473 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
6474 return NULL_TREE;
6475 }
6476
6477 static void
6478 lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6479 {
6480 gimple stmt = gsi_stmt (*gsi_p);
6481 struct walk_stmt_info wi;
6482
6483 if (gimple_has_location (stmt))
6484 input_location = gimple_location (stmt);
6485
6486 if (task_shared_vars)
6487 memset (&wi, '\0', sizeof (wi));
6488
6489 /* If we have issued syntax errors, avoid doing any heavy lifting.
6490 Just replace the OpenMP directives with a NOP to avoid
6491 confusing RTL expansion. */
6492 if (errorcount && is_gimple_omp (stmt))
6493 {
6494 gsi_replace (gsi_p, gimple_build_nop (), true);
6495 return;
6496 }
6497
6498 switch (gimple_code (stmt))
6499 {
6500 case GIMPLE_COND:
6501 if ((ctx || task_shared_vars)
6502 && (walk_tree (gimple_cond_lhs_ptr (stmt), lower_omp_regimplify_p,
6503 ctx ? NULL : &wi, NULL)
6504 || walk_tree (gimple_cond_rhs_ptr (stmt), lower_omp_regimplify_p,
6505 ctx ? NULL : &wi, NULL)))
6506 gimple_regimplify_operands (stmt, gsi_p);
6507 break;
6508 case GIMPLE_CATCH:
6509 lower_omp (gimple_catch_handler (stmt), ctx);
6510 break;
6511 case GIMPLE_EH_FILTER:
6512 lower_omp (gimple_eh_filter_failure (stmt), ctx);
6513 break;
6514 case GIMPLE_TRY:
6515 lower_omp (gimple_try_eval (stmt), ctx);
6516 lower_omp (gimple_try_cleanup (stmt), ctx);
6517 break;
6518 case GIMPLE_BIND:
6519 lower_omp (gimple_bind_body (stmt), ctx);
6520 break;
6521 case GIMPLE_OMP_PARALLEL:
6522 case GIMPLE_OMP_TASK:
6523 ctx = maybe_lookup_ctx (stmt);
6524 lower_omp_taskreg (gsi_p, ctx);
6525 break;
6526 case GIMPLE_OMP_FOR:
6527 ctx = maybe_lookup_ctx (stmt);
6528 gcc_assert (ctx);
6529 lower_omp_for (gsi_p, ctx);
6530 break;
6531 case GIMPLE_OMP_SECTIONS:
6532 ctx = maybe_lookup_ctx (stmt);
6533 gcc_assert (ctx);
6534 lower_omp_sections (gsi_p, ctx);
6535 break;
6536 case GIMPLE_OMP_SINGLE:
6537 ctx = maybe_lookup_ctx (stmt);
6538 gcc_assert (ctx);
6539 lower_omp_single (gsi_p, ctx);
6540 break;
6541 case GIMPLE_OMP_MASTER:
6542 ctx = maybe_lookup_ctx (stmt);
6543 gcc_assert (ctx);
6544 lower_omp_master (gsi_p, ctx);
6545 break;
6546 case GIMPLE_OMP_ORDERED:
6547 ctx = maybe_lookup_ctx (stmt);
6548 gcc_assert (ctx);
6549 lower_omp_ordered (gsi_p, ctx);
6550 break;
6551 case GIMPLE_OMP_CRITICAL:
6552 ctx = maybe_lookup_ctx (stmt);
6553 gcc_assert (ctx);
6554 lower_omp_critical (gsi_p, ctx);
6555 break;
6556 case GIMPLE_OMP_ATOMIC_LOAD:
6557 if ((ctx || task_shared_vars)
6558 && walk_tree (gimple_omp_atomic_load_rhs_ptr (stmt),
6559 lower_omp_regimplify_p, ctx ? NULL : &wi, NULL))
6560 gimple_regimplify_operands (stmt, gsi_p);
6561 break;
6562 default:
6563 if ((ctx || task_shared_vars)
6564 && walk_gimple_op (stmt, lower_omp_regimplify_p,
6565 ctx ? NULL : &wi))
6566 gimple_regimplify_operands (stmt, gsi_p);
6567 break;
6568 }
6569 }
6570
6571 static void
6572 lower_omp (gimple_seq body, omp_context *ctx)
6573 {
6574 location_t saved_location = input_location;
6575 gimple_stmt_iterator gsi = gsi_start (body);
6576 for (gsi = gsi_start (body); !gsi_end_p (gsi); gsi_next (&gsi))
6577 lower_omp_1 (&gsi, ctx);
6578 input_location = saved_location;
6579 }
6580 \f
6581 /* Main entry point. */
6582
6583 static unsigned int
6584 execute_lower_omp (void)
6585 {
6586 gimple_seq body;
6587
6588 /* This pass always runs, to provide PROP_gimple_lomp.
6589 But there is nothing to do unless -fopenmp is given. */
6590 if (flag_openmp == 0)
6591 return 0;
6592
6593 all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
6594 delete_omp_context);
6595
6596 body = gimple_body (current_function_decl);
6597 scan_omp (body, NULL);
6598 gcc_assert (taskreg_nesting_level == 0);
6599
6600 if (all_contexts->root)
6601 {
6602 struct gimplify_ctx gctx;
6603
6604 if (task_shared_vars)
6605 push_gimplify_context (&gctx);
6606 lower_omp (body, NULL);
6607 if (task_shared_vars)
6608 pop_gimplify_context (NULL);
6609 }
6610
6611 if (all_contexts)
6612 {
6613 splay_tree_delete (all_contexts);
6614 all_contexts = NULL;
6615 }
6616 BITMAP_FREE (task_shared_vars);
6617 return 0;
6618 }
6619
6620 struct gimple_opt_pass pass_lower_omp =
6621 {
6622 {
6623 GIMPLE_PASS,
6624 "omplower", /* name */
6625 NULL, /* gate */
6626 execute_lower_omp, /* execute */
6627 NULL, /* sub */
6628 NULL, /* next */
6629 0, /* static_pass_number */
6630 0, /* tv_id */
6631 PROP_gimple_any, /* properties_required */
6632 PROP_gimple_lomp, /* properties_provided */
6633 0, /* properties_destroyed */
6634 0, /* todo_flags_start */
6635 TODO_dump_func /* todo_flags_finish */
6636 }
6637 };
6638 \f
6639 /* The following is a utility to diagnose OpenMP structured block violations.
6640 It is not part of the "omplower" pass, as that's invoked too late. It
6641 should be invoked by the respective front ends after gimplification. */
6642
6643 static splay_tree all_labels;
6644
6645 /* Check for mismatched contexts and generate an error if needed. Return
6646 true if an error is detected. */
6647
6648 static bool
6649 diagnose_sb_0 (gimple_stmt_iterator *gsi_p,
6650 gimple branch_ctx, gimple label_ctx)
6651 {
6652 if (label_ctx == branch_ctx)
6653 return false;
6654
6655
6656 /*
6657 Previously we kept track of the label's entire context in diagnose_sb_[12]
6658 so we could traverse it and issue a correct "exit" or "enter" error
6659 message upon a structured block violation.
6660
6661 We built the context by building a list with tree_cons'ing, but there is
6662 no easy counterpart in gimple tuples. It seems like far too much work
6663 for issuing exit/enter error messages. If someone really misses the
6664 distinct error message... patches welcome.
6665 */
6666
6667 #if 0
6668 /* Try to avoid confusing the user by producing and error message
6669 with correct "exit" or "enter" verbiage. We prefer "exit"
6670 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
6671 if (branch_ctx == NULL)
6672 exit_p = false;
6673 else
6674 {
6675 while (label_ctx)
6676 {
6677 if (TREE_VALUE (label_ctx) == branch_ctx)
6678 {
6679 exit_p = false;
6680 break;
6681 }
6682 label_ctx = TREE_CHAIN (label_ctx);
6683 }
6684 }
6685
6686 if (exit_p)
6687 error ("invalid exit from OpenMP structured block");
6688 else
6689 error ("invalid entry to OpenMP structured block");
6690 #endif
6691
6692 /* If it's obvious we have an invalid entry, be specific about the error. */
6693 if (branch_ctx == NULL)
6694 error ("invalid entry to OpenMP structured block");
6695 else
6696 /* Otherwise, be vague and lazy, but efficient. */
6697 error ("invalid branch to/from an OpenMP structured block");
6698
6699 gsi_replace (gsi_p, gimple_build_nop (), false);
6700 return true;
6701 }
6702
6703 /* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
6704 where each label is found. */
6705
6706 static tree
6707 diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
6708 struct walk_stmt_info *wi)
6709 {
6710 gimple context = (gimple) wi->info;
6711 gimple inner_context;
6712 gimple stmt = gsi_stmt (*gsi_p);
6713
6714 *handled_ops_p = true;
6715
6716 switch (gimple_code (stmt))
6717 {
6718 WALK_SUBSTMTS;
6719
6720 case GIMPLE_OMP_PARALLEL:
6721 case GIMPLE_OMP_TASK:
6722 case GIMPLE_OMP_SECTIONS:
6723 case GIMPLE_OMP_SINGLE:
6724 case GIMPLE_OMP_SECTION:
6725 case GIMPLE_OMP_MASTER:
6726 case GIMPLE_OMP_ORDERED:
6727 case GIMPLE_OMP_CRITICAL:
6728 /* The minimal context here is just the current OMP construct. */
6729 inner_context = stmt;
6730 wi->info = inner_context;
6731 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
6732 wi->info = context;
6733 break;
6734
6735 case GIMPLE_OMP_FOR:
6736 inner_context = stmt;
6737 wi->info = inner_context;
6738 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
6739 walk them. */
6740 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
6741 diagnose_sb_1, NULL, wi);
6742 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
6743 wi->info = context;
6744 break;
6745
6746 case GIMPLE_LABEL:
6747 splay_tree_insert (all_labels, (splay_tree_key) gimple_label_label (stmt),
6748 (splay_tree_value) context);
6749 break;
6750
6751 default:
6752 break;
6753 }
6754
6755 return NULL_TREE;
6756 }
6757
6758 /* Pass 2: Check each branch and see if its context differs from that of
6759 the destination label's context. */
6760
6761 static tree
6762 diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
6763 struct walk_stmt_info *wi)
6764 {
6765 gimple context = (gimple) wi->info;
6766 splay_tree_node n;
6767 gimple stmt = gsi_stmt (*gsi_p);
6768
6769 *handled_ops_p = true;
6770
6771 switch (gimple_code (stmt))
6772 {
6773 WALK_SUBSTMTS;
6774
6775 case GIMPLE_OMP_PARALLEL:
6776 case GIMPLE_OMP_TASK:
6777 case GIMPLE_OMP_SECTIONS:
6778 case GIMPLE_OMP_SINGLE:
6779 case GIMPLE_OMP_SECTION:
6780 case GIMPLE_OMP_MASTER:
6781 case GIMPLE_OMP_ORDERED:
6782 case GIMPLE_OMP_CRITICAL:
6783 wi->info = stmt;
6784 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
6785 wi->info = context;
6786 break;
6787
6788 case GIMPLE_OMP_FOR:
6789 wi->info = stmt;
6790 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
6791 walk them. */
6792 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
6793 diagnose_sb_2, NULL, wi);
6794 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
6795 wi->info = context;
6796 break;
6797
6798 case GIMPLE_GOTO:
6799 {
6800 tree lab = gimple_goto_dest (stmt);
6801 if (TREE_CODE (lab) != LABEL_DECL)
6802 break;
6803
6804 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
6805 diagnose_sb_0 (gsi_p, context, n ? (gimple) n->value : NULL);
6806 }
6807 break;
6808
6809 case GIMPLE_SWITCH:
6810 {
6811 unsigned int i;
6812 for (i = 0; i < gimple_switch_num_labels (stmt); ++i)
6813 {
6814 tree lab = CASE_LABEL (gimple_switch_label (stmt, i));
6815 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
6816 if (n && diagnose_sb_0 (gsi_p, context, (gimple) n->value))
6817 break;
6818 }
6819 }
6820 break;
6821
6822 case GIMPLE_RETURN:
6823 diagnose_sb_0 (gsi_p, context, NULL);
6824 break;
6825
6826 default:
6827 break;
6828 }
6829
6830 return NULL_TREE;
6831 }
6832
6833 void
6834 diagnose_omp_structured_block_errors (tree fndecl)
6835 {
6836 tree save_current = current_function_decl;
6837 struct walk_stmt_info wi;
6838 struct function *old_cfun = cfun;
6839 gimple_seq body = gimple_body (fndecl);
6840
6841 current_function_decl = fndecl;
6842 set_cfun (DECL_STRUCT_FUNCTION (fndecl));
6843
6844 all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
6845
6846 memset (&wi, 0, sizeof (wi));
6847 walk_gimple_seq (body, diagnose_sb_1, NULL, &wi);
6848
6849 memset (&wi, 0, sizeof (wi));
6850 wi.want_locations = true;
6851 walk_gimple_seq (body, diagnose_sb_2, NULL, &wi);
6852
6853 splay_tree_delete (all_labels);
6854 all_labels = NULL;
6855
6856 set_cfun (old_cfun);
6857 current_function_decl = save_current;
6858 }
6859
6860 #include "gt-omp-low.h"