Make-lang.in, [...]: Update copyright years.
[gcc.git] / gcc / omp-low.c
1 /* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
5
6 Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010
7 Free Software Foundation, Inc.
8
9 This file is part of GCC.
10
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
14 version.
15
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
24
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "tree.h"
30 #include "rtl.h"
31 #include "gimple.h"
32 #include "tree-iterator.h"
33 #include "tree-inline.h"
34 #include "langhooks.h"
35 #include "diagnostic.h"
36 #include "tree-flow.h"
37 #include "timevar.h"
38 #include "flags.h"
39 #include "function.h"
40 #include "expr.h"
41 #include "toplev.h"
42 #include "tree-pass.h"
43 #include "ggc.h"
44 #include "except.h"
45 #include "splay-tree.h"
46 #include "optabs.h"
47 #include "cfgloop.h"
48
49
50 /* Lowering of OpenMP parallel and workshare constructs proceeds in two
51 phases. The first phase scans the function looking for OMP statements
52 and then for variables that must be replaced to satisfy data sharing
53 clauses. The second phase expands code for the constructs, as well as
54 re-gimplifying things when variables have been replaced with complex
55 expressions.
56
57 Final code generation is done by pass_expand_omp. The flowgraph is
58 scanned for parallel regions which are then moved to a new
59 function, to be invoked by the thread library. */
60
61 /* Context structure. Used to store information about each parallel
62 directive in the code. */
63
64 typedef struct omp_context
65 {
66 /* This field must be at the beginning, as we do "inheritance": Some
67 callback functions for tree-inline.c (e.g., omp_copy_decl)
68 receive a copy_body_data pointer that is up-casted to an
69 omp_context pointer. */
70 copy_body_data cb;
71
72 /* The tree of contexts corresponding to the encountered constructs. */
73 struct omp_context *outer;
74 gimple stmt;
75
76 /* Map variables to fields in a structure that allows communication
77 between sending and receiving threads. */
78 splay_tree field_map;
79 tree record_type;
80 tree sender_decl;
81 tree receiver_decl;
82
83 /* These are used just by task contexts, if task firstprivate fn is
84 needed. srecord_type is used to communicate from the thread
85 that encountered the task construct to task firstprivate fn,
86 record_type is allocated by GOMP_task, initialized by task firstprivate
87 fn and passed to the task body fn. */
88 splay_tree sfield_map;
89 tree srecord_type;
90
91 /* A chain of variables to add to the top-level block surrounding the
92 construct. In the case of a parallel, this is in the child function. */
93 tree block_vars;
94
95 /* What to do with variables with implicitly determined sharing
96 attributes. */
97 enum omp_clause_default_kind default_kind;
98
99 /* Nesting depth of this context. Used to beautify error messages re
100 invalid gotos. The outermost ctx is depth 1, with depth 0 being
101 reserved for the main body of the function. */
102 int depth;
103
104 /* True if this parallel directive is nested within another. */
105 bool is_nested;
106 } omp_context;
107
108
109 struct omp_for_data_loop
110 {
111 tree v, n1, n2, step;
112 enum tree_code cond_code;
113 };
114
115 /* A structure describing the main elements of a parallel loop. */
116
117 struct omp_for_data
118 {
119 struct omp_for_data_loop loop;
120 tree chunk_size;
121 gimple for_stmt;
122 tree pre, iter_type;
123 int collapse;
124 bool have_nowait, have_ordered;
125 enum omp_clause_schedule_kind sched_kind;
126 struct omp_for_data_loop *loops;
127 };
128
129
130 static splay_tree all_contexts;
131 static int taskreg_nesting_level;
132 struct omp_region *root_omp_region;
133 static bitmap task_shared_vars;
134
135 static void scan_omp (gimple_seq, omp_context *);
136 static tree scan_omp_1_op (tree *, int *, void *);
137
138 #define WALK_SUBSTMTS \
139 case GIMPLE_BIND: \
140 case GIMPLE_TRY: \
141 case GIMPLE_CATCH: \
142 case GIMPLE_EH_FILTER: \
143 /* The sub-statements for these should be walked. */ \
144 *handled_ops_p = false; \
145 break;
146
147 /* Convenience function for calling scan_omp_1_op on tree operands. */
148
149 static inline tree
150 scan_omp_op (tree *tp, omp_context *ctx)
151 {
152 struct walk_stmt_info wi;
153
154 memset (&wi, 0, sizeof (wi));
155 wi.info = ctx;
156 wi.want_locations = true;
157
158 return walk_tree (tp, scan_omp_1_op, &wi, NULL);
159 }
160
161 static void lower_omp (gimple_seq, omp_context *);
162 static tree lookup_decl_in_outer_ctx (tree, omp_context *);
163 static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
164
165 /* Find an OpenMP clause of type KIND within CLAUSES. */
166
167 tree
168 find_omp_clause (tree clauses, enum omp_clause_code kind)
169 {
170 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
171 if (OMP_CLAUSE_CODE (clauses) == kind)
172 return clauses;
173
174 return NULL_TREE;
175 }
176
177 /* Return true if CTX is for an omp parallel. */
178
179 static inline bool
180 is_parallel_ctx (omp_context *ctx)
181 {
182 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL;
183 }
184
185
186 /* Return true if CTX is for an omp task. */
187
188 static inline bool
189 is_task_ctx (omp_context *ctx)
190 {
191 return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
192 }
193
194
195 /* Return true if CTX is for an omp parallel or omp task. */
196
197 static inline bool
198 is_taskreg_ctx (omp_context *ctx)
199 {
200 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL
201 || gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
202 }
203
204
205 /* Return true if REGION is a combined parallel+workshare region. */
206
207 static inline bool
208 is_combined_parallel (struct omp_region *region)
209 {
210 return region->is_combined_parallel;
211 }
212
213
214 /* Extract the header elements of parallel loop FOR_STMT and store
215 them into *FD. */
216
217 static void
218 extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
219 struct omp_for_data_loop *loops)
220 {
221 tree t, var, *collapse_iter, *collapse_count;
222 tree count = NULL_TREE, iter_type = long_integer_type_node;
223 struct omp_for_data_loop *loop;
224 int i;
225 struct omp_for_data_loop dummy_loop;
226 location_t loc = gimple_location (for_stmt);
227
228 fd->for_stmt = for_stmt;
229 fd->pre = NULL;
230 fd->collapse = gimple_omp_for_collapse (for_stmt);
231 if (fd->collapse > 1)
232 fd->loops = loops;
233 else
234 fd->loops = &fd->loop;
235
236 fd->have_nowait = fd->have_ordered = false;
237 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
238 fd->chunk_size = NULL_TREE;
239 collapse_iter = NULL;
240 collapse_count = NULL;
241
242 for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
243 switch (OMP_CLAUSE_CODE (t))
244 {
245 case OMP_CLAUSE_NOWAIT:
246 fd->have_nowait = true;
247 break;
248 case OMP_CLAUSE_ORDERED:
249 fd->have_ordered = true;
250 break;
251 case OMP_CLAUSE_SCHEDULE:
252 fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
253 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
254 break;
255 case OMP_CLAUSE_COLLAPSE:
256 if (fd->collapse > 1)
257 {
258 collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
259 collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
260 }
261 default:
262 break;
263 }
264
265 /* FIXME: for now map schedule(auto) to schedule(static).
266 There should be analysis to determine whether all iterations
267 are approximately the same amount of work (then schedule(static)
268 is best) or if it varies (then schedule(dynamic,N) is better). */
269 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
270 {
271 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
272 gcc_assert (fd->chunk_size == NULL);
273 }
274 gcc_assert (fd->collapse == 1 || collapse_iter != NULL);
275 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
276 gcc_assert (fd->chunk_size == NULL);
277 else if (fd->chunk_size == NULL)
278 {
279 /* We only need to compute a default chunk size for ordered
280 static loops and dynamic loops. */
281 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
282 || fd->have_ordered
283 || fd->collapse > 1)
284 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
285 ? integer_zero_node : integer_one_node;
286 }
287
288 for (i = 0; i < fd->collapse; i++)
289 {
290 if (fd->collapse == 1)
291 loop = &fd->loop;
292 else if (loops != NULL)
293 loop = loops + i;
294 else
295 loop = &dummy_loop;
296
297
298 loop->v = gimple_omp_for_index (for_stmt, i);
299 gcc_assert (SSA_VAR_P (loop->v));
300 gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
301 || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
302 var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
303 loop->n1 = gimple_omp_for_initial (for_stmt, i);
304
305 loop->cond_code = gimple_omp_for_cond (for_stmt, i);
306 loop->n2 = gimple_omp_for_final (for_stmt, i);
307 switch (loop->cond_code)
308 {
309 case LT_EXPR:
310 case GT_EXPR:
311 break;
312 case LE_EXPR:
313 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
314 loop->n2 = fold_build2_loc (loc,
315 POINTER_PLUS_EXPR, TREE_TYPE (loop->n2),
316 loop->n2, size_one_node);
317 else
318 loop->n2 = fold_build2_loc (loc,
319 PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
320 build_int_cst (TREE_TYPE (loop->n2), 1));
321 loop->cond_code = LT_EXPR;
322 break;
323 case GE_EXPR:
324 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
325 loop->n2 = fold_build2_loc (loc,
326 POINTER_PLUS_EXPR, TREE_TYPE (loop->n2),
327 loop->n2, size_int (-1));
328 else
329 loop->n2 = fold_build2_loc (loc,
330 MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
331 build_int_cst (TREE_TYPE (loop->n2), 1));
332 loop->cond_code = GT_EXPR;
333 break;
334 default:
335 gcc_unreachable ();
336 }
337
338 t = gimple_omp_for_incr (for_stmt, i);
339 gcc_assert (TREE_OPERAND (t, 0) == var);
340 switch (TREE_CODE (t))
341 {
342 case PLUS_EXPR:
343 case POINTER_PLUS_EXPR:
344 loop->step = TREE_OPERAND (t, 1);
345 break;
346 case MINUS_EXPR:
347 loop->step = TREE_OPERAND (t, 1);
348 loop->step = fold_build1_loc (loc,
349 NEGATE_EXPR, TREE_TYPE (loop->step),
350 loop->step);
351 break;
352 default:
353 gcc_unreachable ();
354 }
355
356 if (iter_type != long_long_unsigned_type_node)
357 {
358 if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
359 iter_type = long_long_unsigned_type_node;
360 else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
361 && TYPE_PRECISION (TREE_TYPE (loop->v))
362 >= TYPE_PRECISION (iter_type))
363 {
364 tree n;
365
366 if (loop->cond_code == LT_EXPR)
367 n = fold_build2_loc (loc,
368 PLUS_EXPR, TREE_TYPE (loop->v),
369 loop->n2, loop->step);
370 else
371 n = loop->n1;
372 if (TREE_CODE (n) != INTEGER_CST
373 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
374 iter_type = long_long_unsigned_type_node;
375 }
376 else if (TYPE_PRECISION (TREE_TYPE (loop->v))
377 > TYPE_PRECISION (iter_type))
378 {
379 tree n1, n2;
380
381 if (loop->cond_code == LT_EXPR)
382 {
383 n1 = loop->n1;
384 n2 = fold_build2_loc (loc,
385 PLUS_EXPR, TREE_TYPE (loop->v),
386 loop->n2, loop->step);
387 }
388 else
389 {
390 n1 = fold_build2_loc (loc,
391 MINUS_EXPR, TREE_TYPE (loop->v),
392 loop->n2, loop->step);
393 n2 = loop->n1;
394 }
395 if (TREE_CODE (n1) != INTEGER_CST
396 || TREE_CODE (n2) != INTEGER_CST
397 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
398 || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
399 iter_type = long_long_unsigned_type_node;
400 }
401 }
402
403 if (collapse_count && *collapse_count == NULL)
404 {
405 if ((i == 0 || count != NULL_TREE)
406 && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
407 && TREE_CONSTANT (loop->n1)
408 && TREE_CONSTANT (loop->n2)
409 && TREE_CODE (loop->step) == INTEGER_CST)
410 {
411 tree itype = TREE_TYPE (loop->v);
412
413 if (POINTER_TYPE_P (itype))
414 itype
415 = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
416 t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
417 t = fold_build2_loc (loc,
418 PLUS_EXPR, itype,
419 fold_convert_loc (loc, itype, loop->step), t);
420 t = fold_build2_loc (loc, PLUS_EXPR, itype, t,
421 fold_convert_loc (loc, itype, loop->n2));
422 t = fold_build2_loc (loc, MINUS_EXPR, itype, t,
423 fold_convert_loc (loc, itype, loop->n1));
424 if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
425 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype,
426 fold_build1_loc (loc, NEGATE_EXPR, itype, t),
427 fold_build1_loc (loc, NEGATE_EXPR, itype,
428 fold_convert_loc (loc, itype,
429 loop->step)));
430 else
431 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, t,
432 fold_convert_loc (loc, itype, loop->step));
433 t = fold_convert_loc (loc, long_long_unsigned_type_node, t);
434 if (count != NULL_TREE)
435 count = fold_build2_loc (loc,
436 MULT_EXPR, long_long_unsigned_type_node,
437 count, t);
438 else
439 count = t;
440 if (TREE_CODE (count) != INTEGER_CST)
441 count = NULL_TREE;
442 }
443 else
444 count = NULL_TREE;
445 }
446 }
447
448 if (count)
449 {
450 if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
451 iter_type = long_long_unsigned_type_node;
452 else
453 iter_type = long_integer_type_node;
454 }
455 else if (collapse_iter && *collapse_iter != NULL)
456 iter_type = TREE_TYPE (*collapse_iter);
457 fd->iter_type = iter_type;
458 if (collapse_iter && *collapse_iter == NULL)
459 *collapse_iter = create_tmp_var (iter_type, ".iter");
460 if (collapse_count && *collapse_count == NULL)
461 {
462 if (count)
463 *collapse_count = fold_convert_loc (loc, iter_type, count);
464 else
465 *collapse_count = create_tmp_var (iter_type, ".count");
466 }
467
468 if (fd->collapse > 1)
469 {
470 fd->loop.v = *collapse_iter;
471 fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
472 fd->loop.n2 = *collapse_count;
473 fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
474 fd->loop.cond_code = LT_EXPR;
475 }
476 }
477
478
479 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
480 is the immediate dominator of PAR_ENTRY_BB, return true if there
481 are no data dependencies that would prevent expanding the parallel
482 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
483
484 When expanding a combined parallel+workshare region, the call to
485 the child function may need additional arguments in the case of
486 GIMPLE_OMP_FOR regions. In some cases, these arguments are
487 computed out of variables passed in from the parent to the child
488 via 'struct .omp_data_s'. For instance:
489
490 #pragma omp parallel for schedule (guided, i * 4)
491 for (j ...)
492
493 Is lowered into:
494
495 # BLOCK 2 (PAR_ENTRY_BB)
496 .omp_data_o.i = i;
497 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
498
499 # BLOCK 3 (WS_ENTRY_BB)
500 .omp_data_i = &.omp_data_o;
501 D.1667 = .omp_data_i->i;
502 D.1598 = D.1667 * 4;
503 #pragma omp for schedule (guided, D.1598)
504
505 When we outline the parallel region, the call to the child function
506 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
507 that value is computed *after* the call site. So, in principle we
508 cannot do the transformation.
509
510 To see whether the code in WS_ENTRY_BB blocks the combined
511 parallel+workshare call, we collect all the variables used in the
512 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
513 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
514 call.
515
516 FIXME. If we had the SSA form built at this point, we could merely
517 hoist the code in block 3 into block 2 and be done with it. But at
518 this point we don't have dataflow information and though we could
519 hack something up here, it is really not worth the aggravation. */
520
521 static bool
522 workshare_safe_to_combine_p (basic_block ws_entry_bb)
523 {
524 struct omp_for_data fd;
525 gimple ws_stmt = last_stmt (ws_entry_bb);
526
527 if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
528 return true;
529
530 gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR);
531
532 extract_omp_for_data (ws_stmt, &fd, NULL);
533
534 if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
535 return false;
536 if (fd.iter_type != long_integer_type_node)
537 return false;
538
539 /* FIXME. We give up too easily here. If any of these arguments
540 are not constants, they will likely involve variables that have
541 been mapped into fields of .omp_data_s for sharing with the child
542 function. With appropriate data flow, it would be possible to
543 see through this. */
544 if (!is_gimple_min_invariant (fd.loop.n1)
545 || !is_gimple_min_invariant (fd.loop.n2)
546 || !is_gimple_min_invariant (fd.loop.step)
547 || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
548 return false;
549
550 return true;
551 }
552
553
554 /* Collect additional arguments needed to emit a combined
555 parallel+workshare call. WS_STMT is the workshare directive being
556 expanded. */
557
558 static tree
559 get_ws_args_for (gimple ws_stmt)
560 {
561 tree t;
562 location_t loc = gimple_location (ws_stmt);
563
564 if (gimple_code (ws_stmt) == GIMPLE_OMP_FOR)
565 {
566 struct omp_for_data fd;
567 tree ws_args;
568
569 extract_omp_for_data (ws_stmt, &fd, NULL);
570
571 ws_args = NULL_TREE;
572 if (fd.chunk_size)
573 {
574 t = fold_convert_loc (loc, long_integer_type_node, fd.chunk_size);
575 ws_args = tree_cons (NULL, t, ws_args);
576 }
577
578 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.step);
579 ws_args = tree_cons (NULL, t, ws_args);
580
581 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n2);
582 ws_args = tree_cons (NULL, t, ws_args);
583
584 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n1);
585 ws_args = tree_cons (NULL, t, ws_args);
586
587 return ws_args;
588 }
589 else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
590 {
591 /* Number of sections is equal to the number of edges from the
592 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
593 the exit of the sections region. */
594 basic_block bb = single_succ (gimple_bb (ws_stmt));
595 t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
596 t = tree_cons (NULL, t, NULL);
597 return t;
598 }
599
600 gcc_unreachable ();
601 }
602
603
604 /* Discover whether REGION is a combined parallel+workshare region. */
605
606 static void
607 determine_parallel_type (struct omp_region *region)
608 {
609 basic_block par_entry_bb, par_exit_bb;
610 basic_block ws_entry_bb, ws_exit_bb;
611
612 if (region == NULL || region->inner == NULL
613 || region->exit == NULL || region->inner->exit == NULL
614 || region->inner->cont == NULL)
615 return;
616
617 /* We only support parallel+for and parallel+sections. */
618 if (region->type != GIMPLE_OMP_PARALLEL
619 || (region->inner->type != GIMPLE_OMP_FOR
620 && region->inner->type != GIMPLE_OMP_SECTIONS))
621 return;
622
623 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
624 WS_EXIT_BB -> PAR_EXIT_BB. */
625 par_entry_bb = region->entry;
626 par_exit_bb = region->exit;
627 ws_entry_bb = region->inner->entry;
628 ws_exit_bb = region->inner->exit;
629
630 if (single_succ (par_entry_bb) == ws_entry_bb
631 && single_succ (ws_exit_bb) == par_exit_bb
632 && workshare_safe_to_combine_p (ws_entry_bb)
633 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb))
634 || (last_and_only_stmt (ws_entry_bb)
635 && last_and_only_stmt (par_exit_bb))))
636 {
637 gimple ws_stmt = last_stmt (ws_entry_bb);
638
639 if (region->inner->type == GIMPLE_OMP_FOR)
640 {
641 /* If this is a combined parallel loop, we need to determine
642 whether or not to use the combined library calls. There
643 are two cases where we do not apply the transformation:
644 static loops and any kind of ordered loop. In the first
645 case, we already open code the loop so there is no need
646 to do anything else. In the latter case, the combined
647 parallel loop call would still need extra synchronization
648 to implement ordered semantics, so there would not be any
649 gain in using the combined call. */
650 tree clauses = gimple_omp_for_clauses (ws_stmt);
651 tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
652 if (c == NULL
653 || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC
654 || find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
655 {
656 region->is_combined_parallel = false;
657 region->inner->is_combined_parallel = false;
658 return;
659 }
660 }
661
662 region->is_combined_parallel = true;
663 region->inner->is_combined_parallel = true;
664 region->ws_args = get_ws_args_for (ws_stmt);
665 }
666 }
667
668
669 /* Return true if EXPR is variable sized. */
670
671 static inline bool
672 is_variable_sized (const_tree expr)
673 {
674 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
675 }
676
677 /* Return true if DECL is a reference type. */
678
679 static inline bool
680 is_reference (tree decl)
681 {
682 return lang_hooks.decls.omp_privatize_by_reference (decl);
683 }
684
685 /* Lookup variables in the decl or field splay trees. The "maybe" form
686 allows for the variable form to not have been entered, otherwise we
687 assert that the variable must have been entered. */
688
689 static inline tree
690 lookup_decl (tree var, omp_context *ctx)
691 {
692 tree *n;
693 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
694 return *n;
695 }
696
697 static inline tree
698 maybe_lookup_decl (const_tree var, omp_context *ctx)
699 {
700 tree *n;
701 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
702 return n ? *n : NULL_TREE;
703 }
704
705 static inline tree
706 lookup_field (tree var, omp_context *ctx)
707 {
708 splay_tree_node n;
709 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
710 return (tree) n->value;
711 }
712
713 static inline tree
714 lookup_sfield (tree var, omp_context *ctx)
715 {
716 splay_tree_node n;
717 n = splay_tree_lookup (ctx->sfield_map
718 ? ctx->sfield_map : ctx->field_map,
719 (splay_tree_key) var);
720 return (tree) n->value;
721 }
722
723 static inline tree
724 maybe_lookup_field (tree var, omp_context *ctx)
725 {
726 splay_tree_node n;
727 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
728 return n ? (tree) n->value : NULL_TREE;
729 }
730
731 /* Return true if DECL should be copied by pointer. SHARED_CTX is
732 the parallel context if DECL is to be shared. */
733
734 static bool
735 use_pointer_for_field (tree decl, omp_context *shared_ctx)
736 {
737 if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
738 return true;
739
740 /* We can only use copy-in/copy-out semantics for shared variables
741 when we know the value is not accessible from an outer scope. */
742 if (shared_ctx)
743 {
744 /* ??? Trivially accessible from anywhere. But why would we even
745 be passing an address in this case? Should we simply assert
746 this to be false, or should we have a cleanup pass that removes
747 these from the list of mappings? */
748 if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
749 return true;
750
751 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
752 without analyzing the expression whether or not its location
753 is accessible to anyone else. In the case of nested parallel
754 regions it certainly may be. */
755 if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
756 return true;
757
758 /* Do not use copy-in/copy-out for variables that have their
759 address taken. */
760 if (TREE_ADDRESSABLE (decl))
761 return true;
762
763 /* Disallow copy-in/out in nested parallel if
764 decl is shared in outer parallel, otherwise
765 each thread could store the shared variable
766 in its own copy-in location, making the
767 variable no longer really shared. */
768 if (!TREE_READONLY (decl) && shared_ctx->is_nested)
769 {
770 omp_context *up;
771
772 for (up = shared_ctx->outer; up; up = up->outer)
773 if (is_taskreg_ctx (up) && maybe_lookup_decl (decl, up))
774 break;
775
776 if (up)
777 {
778 tree c;
779
780 for (c = gimple_omp_taskreg_clauses (up->stmt);
781 c; c = OMP_CLAUSE_CHAIN (c))
782 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
783 && OMP_CLAUSE_DECL (c) == decl)
784 break;
785
786 if (c)
787 return true;
788 }
789 }
790
791 /* For tasks avoid using copy-in/out, unless they are readonly
792 (in which case just copy-in is used). As tasks can be
793 deferred or executed in different thread, when GOMP_task
794 returns, the task hasn't necessarily terminated. */
795 if (!TREE_READONLY (decl) && is_task_ctx (shared_ctx))
796 {
797 tree outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
798 if (is_gimple_reg (outer))
799 {
800 /* Taking address of OUTER in lower_send_shared_vars
801 might need regimplification of everything that uses the
802 variable. */
803 if (!task_shared_vars)
804 task_shared_vars = BITMAP_ALLOC (NULL);
805 bitmap_set_bit (task_shared_vars, DECL_UID (outer));
806 TREE_ADDRESSABLE (outer) = 1;
807 }
808 return true;
809 }
810 }
811
812 return false;
813 }
814
815 /* Create a new VAR_DECL and copy information from VAR to it. */
816
817 tree
818 copy_var_decl (tree var, tree name, tree type)
819 {
820 tree copy = build_decl (DECL_SOURCE_LOCATION (var), VAR_DECL, name, type);
821
822 TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (var);
823 TREE_THIS_VOLATILE (copy) = TREE_THIS_VOLATILE (var);
824 DECL_GIMPLE_REG_P (copy) = DECL_GIMPLE_REG_P (var);
825 DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (var);
826 DECL_IGNORED_P (copy) = DECL_IGNORED_P (var);
827 DECL_CONTEXT (copy) = DECL_CONTEXT (var);
828 TREE_USED (copy) = 1;
829 DECL_SEEN_IN_BIND_EXPR_P (copy) = 1;
830
831 return copy;
832 }
833
834 /* Construct a new automatic decl similar to VAR. */
835
836 static tree
837 omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
838 {
839 tree copy = copy_var_decl (var, name, type);
840
841 DECL_CONTEXT (copy) = current_function_decl;
842 TREE_CHAIN (copy) = ctx->block_vars;
843 ctx->block_vars = copy;
844
845 return copy;
846 }
847
848 static tree
849 omp_copy_decl_1 (tree var, omp_context *ctx)
850 {
851 return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
852 }
853
854 /* Build tree nodes to access the field for VAR on the receiver side. */
855
856 static tree
857 build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
858 {
859 tree x, field = lookup_field (var, ctx);
860
861 /* If the receiver record type was remapped in the child function,
862 remap the field into the new record type. */
863 x = maybe_lookup_field (field, ctx);
864 if (x != NULL)
865 field = x;
866
867 x = build_fold_indirect_ref (ctx->receiver_decl);
868 x = build3 (COMPONENT_REF, TREE_TYPE (field), x, field, NULL);
869 if (by_ref)
870 x = build_fold_indirect_ref (x);
871
872 return x;
873 }
874
875 /* Build tree nodes to access VAR in the scope outer to CTX. In the case
876 of a parallel, this is a component reference; for workshare constructs
877 this is some variable. */
878
879 static tree
880 build_outer_var_ref (tree var, omp_context *ctx)
881 {
882 tree x;
883
884 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
885 x = var;
886 else if (is_variable_sized (var))
887 {
888 x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
889 x = build_outer_var_ref (x, ctx);
890 x = build_fold_indirect_ref (x);
891 }
892 else if (is_taskreg_ctx (ctx))
893 {
894 bool by_ref = use_pointer_for_field (var, NULL);
895 x = build_receiver_ref (var, by_ref, ctx);
896 }
897 else if (ctx->outer)
898 x = lookup_decl (var, ctx->outer);
899 else if (is_reference (var))
900 /* This can happen with orphaned constructs. If var is reference, it is
901 possible it is shared and as such valid. */
902 x = var;
903 else
904 gcc_unreachable ();
905
906 if (is_reference (var))
907 x = build_fold_indirect_ref (x);
908
909 return x;
910 }
911
912 /* Build tree nodes to access the field for VAR on the sender side. */
913
914 static tree
915 build_sender_ref (tree var, omp_context *ctx)
916 {
917 tree field = lookup_sfield (var, ctx);
918 return build3 (COMPONENT_REF, TREE_TYPE (field),
919 ctx->sender_decl, field, NULL);
920 }
921
922 /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
923
924 static void
925 install_var_field (tree var, bool by_ref, int mask, omp_context *ctx)
926 {
927 tree field, type, sfield = NULL_TREE;
928
929 gcc_assert ((mask & 1) == 0
930 || !splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
931 gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
932 || !splay_tree_lookup (ctx->sfield_map, (splay_tree_key) var));
933
934 type = TREE_TYPE (var);
935 if (by_ref)
936 type = build_pointer_type (type);
937 else if ((mask & 3) == 1 && is_reference (var))
938 type = TREE_TYPE (type);
939
940 field = build_decl (DECL_SOURCE_LOCATION (var),
941 FIELD_DECL, DECL_NAME (var), type);
942
943 /* Remember what variable this field was created for. This does have a
944 side effect of making dwarf2out ignore this member, so for helpful
945 debugging we clear it later in delete_omp_context. */
946 DECL_ABSTRACT_ORIGIN (field) = var;
947 if (type == TREE_TYPE (var))
948 {
949 DECL_ALIGN (field) = DECL_ALIGN (var);
950 DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
951 TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
952 }
953 else
954 DECL_ALIGN (field) = TYPE_ALIGN (type);
955
956 if ((mask & 3) == 3)
957 {
958 insert_field_into_struct (ctx->record_type, field);
959 if (ctx->srecord_type)
960 {
961 sfield = build_decl (DECL_SOURCE_LOCATION (var),
962 FIELD_DECL, DECL_NAME (var), type);
963 DECL_ABSTRACT_ORIGIN (sfield) = var;
964 DECL_ALIGN (sfield) = DECL_ALIGN (field);
965 DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
966 TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
967 insert_field_into_struct (ctx->srecord_type, sfield);
968 }
969 }
970 else
971 {
972 if (ctx->srecord_type == NULL_TREE)
973 {
974 tree t;
975
976 ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
977 ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
978 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
979 {
980 sfield = build_decl (DECL_SOURCE_LOCATION (var),
981 FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
982 DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
983 insert_field_into_struct (ctx->srecord_type, sfield);
984 splay_tree_insert (ctx->sfield_map,
985 (splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
986 (splay_tree_value) sfield);
987 }
988 }
989 sfield = field;
990 insert_field_into_struct ((mask & 1) ? ctx->record_type
991 : ctx->srecord_type, field);
992 }
993
994 if (mask & 1)
995 splay_tree_insert (ctx->field_map, (splay_tree_key) var,
996 (splay_tree_value) field);
997 if ((mask & 2) && ctx->sfield_map)
998 splay_tree_insert (ctx->sfield_map, (splay_tree_key) var,
999 (splay_tree_value) sfield);
1000 }
1001
1002 static tree
1003 install_var_local (tree var, omp_context *ctx)
1004 {
1005 tree new_var = omp_copy_decl_1 (var, ctx);
1006 insert_decl_map (&ctx->cb, var, new_var);
1007 return new_var;
1008 }
1009
1010 /* Adjust the replacement for DECL in CTX for the new context. This means
1011 copying the DECL_VALUE_EXPR, and fixing up the type. */
1012
1013 static void
1014 fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
1015 {
1016 tree new_decl, size;
1017
1018 new_decl = lookup_decl (decl, ctx);
1019
1020 TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
1021
1022 if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
1023 && DECL_HAS_VALUE_EXPR_P (decl))
1024 {
1025 tree ve = DECL_VALUE_EXPR (decl);
1026 walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL);
1027 SET_DECL_VALUE_EXPR (new_decl, ve);
1028 DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
1029 }
1030
1031 if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
1032 {
1033 size = remap_decl (DECL_SIZE (decl), &ctx->cb);
1034 if (size == error_mark_node)
1035 size = TYPE_SIZE (TREE_TYPE (new_decl));
1036 DECL_SIZE (new_decl) = size;
1037
1038 size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
1039 if (size == error_mark_node)
1040 size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
1041 DECL_SIZE_UNIT (new_decl) = size;
1042 }
1043 }
1044
1045 /* The callback for remap_decl. Search all containing contexts for a
1046 mapping of the variable; this avoids having to duplicate the splay
1047 tree ahead of time. We know a mapping doesn't already exist in the
1048 given context. Create new mappings to implement default semantics. */
1049
1050 static tree
1051 omp_copy_decl (tree var, copy_body_data *cb)
1052 {
1053 omp_context *ctx = (omp_context *) cb;
1054 tree new_var;
1055
1056 if (TREE_CODE (var) == LABEL_DECL)
1057 {
1058 new_var = create_artificial_label (DECL_SOURCE_LOCATION (var));
1059 DECL_CONTEXT (new_var) = current_function_decl;
1060 insert_decl_map (&ctx->cb, var, new_var);
1061 return new_var;
1062 }
1063
1064 while (!is_taskreg_ctx (ctx))
1065 {
1066 ctx = ctx->outer;
1067 if (ctx == NULL)
1068 return var;
1069 new_var = maybe_lookup_decl (var, ctx);
1070 if (new_var)
1071 return new_var;
1072 }
1073
1074 if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
1075 return var;
1076
1077 return error_mark_node;
1078 }
1079
1080
1081 /* Return the parallel region associated with STMT. */
1082
1083 /* Debugging dumps for parallel regions. */
1084 void dump_omp_region (FILE *, struct omp_region *, int);
1085 void debug_omp_region (struct omp_region *);
1086 void debug_all_omp_regions (void);
1087
1088 /* Dump the parallel region tree rooted at REGION. */
1089
1090 void
1091 dump_omp_region (FILE *file, struct omp_region *region, int indent)
1092 {
1093 fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
1094 gimple_code_name[region->type]);
1095
1096 if (region->inner)
1097 dump_omp_region (file, region->inner, indent + 4);
1098
1099 if (region->cont)
1100 {
1101 fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
1102 region->cont->index);
1103 }
1104
1105 if (region->exit)
1106 fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
1107 region->exit->index);
1108 else
1109 fprintf (file, "%*s[no exit marker]\n", indent, "");
1110
1111 if (region->next)
1112 dump_omp_region (file, region->next, indent);
1113 }
1114
1115 void
1116 debug_omp_region (struct omp_region *region)
1117 {
1118 dump_omp_region (stderr, region, 0);
1119 }
1120
1121 void
1122 debug_all_omp_regions (void)
1123 {
1124 dump_omp_region (stderr, root_omp_region, 0);
1125 }
1126
1127
1128 /* Create a new parallel region starting at STMT inside region PARENT. */
1129
1130 struct omp_region *
1131 new_omp_region (basic_block bb, enum gimple_code type,
1132 struct omp_region *parent)
1133 {
1134 struct omp_region *region = XCNEW (struct omp_region);
1135
1136 region->outer = parent;
1137 region->entry = bb;
1138 region->type = type;
1139
1140 if (parent)
1141 {
1142 /* This is a nested region. Add it to the list of inner
1143 regions in PARENT. */
1144 region->next = parent->inner;
1145 parent->inner = region;
1146 }
1147 else
1148 {
1149 /* This is a toplevel region. Add it to the list of toplevel
1150 regions in ROOT_OMP_REGION. */
1151 region->next = root_omp_region;
1152 root_omp_region = region;
1153 }
1154
1155 return region;
1156 }
1157
1158 /* Release the memory associated with the region tree rooted at REGION. */
1159
1160 static void
1161 free_omp_region_1 (struct omp_region *region)
1162 {
1163 struct omp_region *i, *n;
1164
1165 for (i = region->inner; i ; i = n)
1166 {
1167 n = i->next;
1168 free_omp_region_1 (i);
1169 }
1170
1171 free (region);
1172 }
1173
1174 /* Release the memory for the entire omp region tree. */
1175
1176 void
1177 free_omp_regions (void)
1178 {
1179 struct omp_region *r, *n;
1180 for (r = root_omp_region; r ; r = n)
1181 {
1182 n = r->next;
1183 free_omp_region_1 (r);
1184 }
1185 root_omp_region = NULL;
1186 }
1187
1188
1189 /* Create a new context, with OUTER_CTX being the surrounding context. */
1190
1191 static omp_context *
1192 new_omp_context (gimple stmt, omp_context *outer_ctx)
1193 {
1194 omp_context *ctx = XCNEW (omp_context);
1195
1196 splay_tree_insert (all_contexts, (splay_tree_key) stmt,
1197 (splay_tree_value) ctx);
1198 ctx->stmt = stmt;
1199
1200 if (outer_ctx)
1201 {
1202 ctx->outer = outer_ctx;
1203 ctx->cb = outer_ctx->cb;
1204 ctx->cb.block = NULL;
1205 ctx->depth = outer_ctx->depth + 1;
1206 }
1207 else
1208 {
1209 ctx->cb.src_fn = current_function_decl;
1210 ctx->cb.dst_fn = current_function_decl;
1211 ctx->cb.src_node = cgraph_node (current_function_decl);
1212 ctx->cb.dst_node = ctx->cb.src_node;
1213 ctx->cb.src_cfun = cfun;
1214 ctx->cb.copy_decl = omp_copy_decl;
1215 ctx->cb.eh_lp_nr = 0;
1216 ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
1217 ctx->depth = 1;
1218 }
1219
1220 ctx->cb.decl_map = pointer_map_create ();
1221
1222 return ctx;
1223 }
1224
1225 static gimple_seq maybe_catch_exception (gimple_seq);
1226
1227 /* Finalize task copyfn. */
1228
1229 static void
1230 finalize_task_copyfn (gimple task_stmt)
1231 {
1232 struct function *child_cfun;
1233 tree child_fn, old_fn;
1234 gimple_seq seq, new_seq;
1235 gimple bind;
1236
1237 child_fn = gimple_omp_task_copy_fn (task_stmt);
1238 if (child_fn == NULL_TREE)
1239 return;
1240
1241 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
1242
1243 /* Inform the callgraph about the new function. */
1244 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
1245 = cfun->curr_properties;
1246
1247 old_fn = current_function_decl;
1248 push_cfun (child_cfun);
1249 current_function_decl = child_fn;
1250 bind = gimplify_body (&DECL_SAVED_TREE (child_fn), child_fn, false);
1251 seq = gimple_seq_alloc ();
1252 gimple_seq_add_stmt (&seq, bind);
1253 new_seq = maybe_catch_exception (seq);
1254 if (new_seq != seq)
1255 {
1256 bind = gimple_build_bind (NULL, new_seq, NULL);
1257 seq = gimple_seq_alloc ();
1258 gimple_seq_add_stmt (&seq, bind);
1259 }
1260 gimple_set_body (child_fn, seq);
1261 pop_cfun ();
1262 current_function_decl = old_fn;
1263
1264 cgraph_add_new_function (child_fn, false);
1265 }
1266
1267 /* Destroy a omp_context data structures. Called through the splay tree
1268 value delete callback. */
1269
1270 static void
1271 delete_omp_context (splay_tree_value value)
1272 {
1273 omp_context *ctx = (omp_context *) value;
1274
1275 pointer_map_destroy (ctx->cb.decl_map);
1276
1277 if (ctx->field_map)
1278 splay_tree_delete (ctx->field_map);
1279 if (ctx->sfield_map)
1280 splay_tree_delete (ctx->sfield_map);
1281
1282 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1283 it produces corrupt debug information. */
1284 if (ctx->record_type)
1285 {
1286 tree t;
1287 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
1288 DECL_ABSTRACT_ORIGIN (t) = NULL;
1289 }
1290 if (ctx->srecord_type)
1291 {
1292 tree t;
1293 for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = TREE_CHAIN (t))
1294 DECL_ABSTRACT_ORIGIN (t) = NULL;
1295 }
1296
1297 if (is_task_ctx (ctx))
1298 finalize_task_copyfn (ctx->stmt);
1299
1300 XDELETE (ctx);
1301 }
1302
1303 /* Fix up RECEIVER_DECL with a type that has been remapped to the child
1304 context. */
1305
1306 static void
1307 fixup_child_record_type (omp_context *ctx)
1308 {
1309 tree f, type = ctx->record_type;
1310
1311 /* ??? It isn't sufficient to just call remap_type here, because
1312 variably_modified_type_p doesn't work the way we expect for
1313 record types. Testing each field for whether it needs remapping
1314 and creating a new record by hand works, however. */
1315 for (f = TYPE_FIELDS (type); f ; f = TREE_CHAIN (f))
1316 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
1317 break;
1318 if (f)
1319 {
1320 tree name, new_fields = NULL;
1321
1322 type = lang_hooks.types.make_type (RECORD_TYPE);
1323 name = DECL_NAME (TYPE_NAME (ctx->record_type));
1324 name = build_decl (DECL_SOURCE_LOCATION (ctx->receiver_decl),
1325 TYPE_DECL, name, type);
1326 TYPE_NAME (type) = name;
1327
1328 for (f = TYPE_FIELDS (ctx->record_type); f ; f = TREE_CHAIN (f))
1329 {
1330 tree new_f = copy_node (f);
1331 DECL_CONTEXT (new_f) = type;
1332 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
1333 TREE_CHAIN (new_f) = new_fields;
1334 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
1335 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
1336 &ctx->cb, NULL);
1337 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
1338 &ctx->cb, NULL);
1339 new_fields = new_f;
1340
1341 /* Arrange to be able to look up the receiver field
1342 given the sender field. */
1343 splay_tree_insert (ctx->field_map, (splay_tree_key) f,
1344 (splay_tree_value) new_f);
1345 }
1346 TYPE_FIELDS (type) = nreverse (new_fields);
1347 layout_type (type);
1348 }
1349
1350 TREE_TYPE (ctx->receiver_decl) = build_pointer_type (type);
1351 }
1352
1353 /* Instantiate decls as necessary in CTX to satisfy the data sharing
1354 specified by CLAUSES. */
1355
1356 static void
1357 scan_sharing_clauses (tree clauses, omp_context *ctx)
1358 {
1359 tree c, decl;
1360 bool scan_array_reductions = false;
1361
1362 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1363 {
1364 bool by_ref;
1365
1366 switch (OMP_CLAUSE_CODE (c))
1367 {
1368 case OMP_CLAUSE_PRIVATE:
1369 decl = OMP_CLAUSE_DECL (c);
1370 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
1371 goto do_private;
1372 else if (!is_variable_sized (decl))
1373 install_var_local (decl, ctx);
1374 break;
1375
1376 case OMP_CLAUSE_SHARED:
1377 gcc_assert (is_taskreg_ctx (ctx));
1378 decl = OMP_CLAUSE_DECL (c);
1379 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
1380 || !is_variable_sized (decl));
1381 /* Global variables don't need to be copied,
1382 the receiver side will use them directly. */
1383 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1384 break;
1385 by_ref = use_pointer_for_field (decl, ctx);
1386 if (! TREE_READONLY (decl)
1387 || TREE_ADDRESSABLE (decl)
1388 || by_ref
1389 || is_reference (decl))
1390 {
1391 install_var_field (decl, by_ref, 3, ctx);
1392 install_var_local (decl, ctx);
1393 break;
1394 }
1395 /* We don't need to copy const scalar vars back. */
1396 OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
1397 goto do_private;
1398
1399 case OMP_CLAUSE_LASTPRIVATE:
1400 /* Let the corresponding firstprivate clause create
1401 the variable. */
1402 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1403 break;
1404 /* FALLTHRU */
1405
1406 case OMP_CLAUSE_FIRSTPRIVATE:
1407 case OMP_CLAUSE_REDUCTION:
1408 decl = OMP_CLAUSE_DECL (c);
1409 do_private:
1410 if (is_variable_sized (decl))
1411 {
1412 if (is_task_ctx (ctx))
1413 install_var_field (decl, false, 1, ctx);
1414 break;
1415 }
1416 else if (is_taskreg_ctx (ctx))
1417 {
1418 bool global
1419 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
1420 by_ref = use_pointer_for_field (decl, NULL);
1421
1422 if (is_task_ctx (ctx)
1423 && (global || by_ref || is_reference (decl)))
1424 {
1425 install_var_field (decl, false, 1, ctx);
1426 if (!global)
1427 install_var_field (decl, by_ref, 2, ctx);
1428 }
1429 else if (!global)
1430 install_var_field (decl, by_ref, 3, ctx);
1431 }
1432 install_var_local (decl, ctx);
1433 break;
1434
1435 case OMP_CLAUSE_COPYPRIVATE:
1436 if (ctx->outer)
1437 scan_omp_op (&OMP_CLAUSE_DECL (c), ctx->outer);
1438 /* FALLTHRU */
1439
1440 case OMP_CLAUSE_COPYIN:
1441 decl = OMP_CLAUSE_DECL (c);
1442 by_ref = use_pointer_for_field (decl, NULL);
1443 install_var_field (decl, by_ref, 3, ctx);
1444 break;
1445
1446 case OMP_CLAUSE_DEFAULT:
1447 ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
1448 break;
1449
1450 case OMP_CLAUSE_IF:
1451 case OMP_CLAUSE_NUM_THREADS:
1452 case OMP_CLAUSE_SCHEDULE:
1453 if (ctx->outer)
1454 scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
1455 break;
1456
1457 case OMP_CLAUSE_NOWAIT:
1458 case OMP_CLAUSE_ORDERED:
1459 case OMP_CLAUSE_COLLAPSE:
1460 case OMP_CLAUSE_UNTIED:
1461 break;
1462
1463 default:
1464 gcc_unreachable ();
1465 }
1466 }
1467
1468 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1469 {
1470 switch (OMP_CLAUSE_CODE (c))
1471 {
1472 case OMP_CLAUSE_LASTPRIVATE:
1473 /* Let the corresponding firstprivate clause create
1474 the variable. */
1475 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1476 scan_array_reductions = true;
1477 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1478 break;
1479 /* FALLTHRU */
1480
1481 case OMP_CLAUSE_PRIVATE:
1482 case OMP_CLAUSE_FIRSTPRIVATE:
1483 case OMP_CLAUSE_REDUCTION:
1484 decl = OMP_CLAUSE_DECL (c);
1485 if (is_variable_sized (decl))
1486 install_var_local (decl, ctx);
1487 fixup_remapped_decl (decl, ctx,
1488 OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
1489 && OMP_CLAUSE_PRIVATE_DEBUG (c));
1490 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1491 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1492 scan_array_reductions = true;
1493 break;
1494
1495 case OMP_CLAUSE_SHARED:
1496 decl = OMP_CLAUSE_DECL (c);
1497 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1498 fixup_remapped_decl (decl, ctx, false);
1499 break;
1500
1501 case OMP_CLAUSE_COPYPRIVATE:
1502 case OMP_CLAUSE_COPYIN:
1503 case OMP_CLAUSE_DEFAULT:
1504 case OMP_CLAUSE_IF:
1505 case OMP_CLAUSE_NUM_THREADS:
1506 case OMP_CLAUSE_SCHEDULE:
1507 case OMP_CLAUSE_NOWAIT:
1508 case OMP_CLAUSE_ORDERED:
1509 case OMP_CLAUSE_COLLAPSE:
1510 case OMP_CLAUSE_UNTIED:
1511 break;
1512
1513 default:
1514 gcc_unreachable ();
1515 }
1516 }
1517
1518 if (scan_array_reductions)
1519 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1520 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1521 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1522 {
1523 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
1524 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
1525 }
1526 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
1527 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1528 scan_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
1529 }
1530
1531 /* Create a new name for omp child function. Returns an identifier. */
1532
1533 static GTY(()) unsigned int tmp_ompfn_id_num;
1534
1535 static tree
1536 create_omp_child_function_name (bool task_copy)
1537 {
1538 tree name = DECL_ASSEMBLER_NAME (current_function_decl);
1539 size_t len = IDENTIFIER_LENGTH (name);
1540 char *tmp_name, *prefix;
1541 const char *suffix;
1542
1543 suffix = task_copy ? "_omp_cpyfn" : "_omp_fn";
1544 prefix = XALLOCAVEC (char, len + strlen (suffix) + 1);
1545 memcpy (prefix, IDENTIFIER_POINTER (name), len);
1546 strcpy (prefix + len, suffix);
1547 #ifndef NO_DOT_IN_LABEL
1548 prefix[len] = '.';
1549 #elif !defined NO_DOLLAR_IN_LABEL
1550 prefix[len] = '$';
1551 #endif
1552 ASM_FORMAT_PRIVATE_NAME (tmp_name, prefix, tmp_ompfn_id_num++);
1553 return get_identifier (tmp_name);
1554 }
1555
1556 /* Build a decl for the omp child function. It'll not contain a body
1557 yet, just the bare decl. */
1558
1559 static void
1560 create_omp_child_function (omp_context *ctx, bool task_copy)
1561 {
1562 tree decl, type, name, t;
1563
1564 name = create_omp_child_function_name (task_copy);
1565 if (task_copy)
1566 type = build_function_type_list (void_type_node, ptr_type_node,
1567 ptr_type_node, NULL_TREE);
1568 else
1569 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
1570
1571 decl = build_decl (gimple_location (ctx->stmt),
1572 FUNCTION_DECL, name, type);
1573
1574 if (!task_copy)
1575 ctx->cb.dst_fn = decl;
1576 else
1577 gimple_omp_task_set_copy_fn (ctx->stmt, decl);
1578
1579 TREE_STATIC (decl) = 1;
1580 TREE_USED (decl) = 1;
1581 DECL_ARTIFICIAL (decl) = 1;
1582 DECL_IGNORED_P (decl) = 0;
1583 TREE_PUBLIC (decl) = 0;
1584 DECL_UNINLINABLE (decl) = 1;
1585 DECL_EXTERNAL (decl) = 0;
1586 DECL_CONTEXT (decl) = NULL_TREE;
1587 DECL_INITIAL (decl) = make_node (BLOCK);
1588
1589 t = build_decl (DECL_SOURCE_LOCATION (decl),
1590 RESULT_DECL, NULL_TREE, void_type_node);
1591 DECL_ARTIFICIAL (t) = 1;
1592 DECL_IGNORED_P (t) = 1;
1593 DECL_CONTEXT (t) = decl;
1594 DECL_RESULT (decl) = t;
1595
1596 t = build_decl (DECL_SOURCE_LOCATION (decl),
1597 PARM_DECL, get_identifier (".omp_data_i"), ptr_type_node);
1598 DECL_ARTIFICIAL (t) = 1;
1599 DECL_ARG_TYPE (t) = ptr_type_node;
1600 DECL_CONTEXT (t) = current_function_decl;
1601 TREE_USED (t) = 1;
1602 DECL_ARGUMENTS (decl) = t;
1603 if (!task_copy)
1604 ctx->receiver_decl = t;
1605 else
1606 {
1607 t = build_decl (DECL_SOURCE_LOCATION (decl),
1608 PARM_DECL, get_identifier (".omp_data_o"),
1609 ptr_type_node);
1610 DECL_ARTIFICIAL (t) = 1;
1611 DECL_ARG_TYPE (t) = ptr_type_node;
1612 DECL_CONTEXT (t) = current_function_decl;
1613 TREE_USED (t) = 1;
1614 TREE_ADDRESSABLE (t) = 1;
1615 TREE_CHAIN (t) = DECL_ARGUMENTS (decl);
1616 DECL_ARGUMENTS (decl) = t;
1617 }
1618
1619 /* Allocate memory for the function structure. The call to
1620 allocate_struct_function clobbers CFUN, so we need to restore
1621 it afterward. */
1622 push_struct_function (decl);
1623 cfun->function_end_locus = gimple_location (ctx->stmt);
1624 pop_cfun ();
1625 }
1626
1627
1628 /* Scan an OpenMP parallel directive. */
1629
1630 static void
1631 scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1632 {
1633 omp_context *ctx;
1634 tree name;
1635 gimple stmt = gsi_stmt (*gsi);
1636
1637 /* Ignore parallel directives with empty bodies, unless there
1638 are copyin clauses. */
1639 if (optimize > 0
1640 && empty_body_p (gimple_omp_body (stmt))
1641 && find_omp_clause (gimple_omp_parallel_clauses (stmt),
1642 OMP_CLAUSE_COPYIN) == NULL)
1643 {
1644 gsi_replace (gsi, gimple_build_nop (), false);
1645 return;
1646 }
1647
1648 ctx = new_omp_context (stmt, outer_ctx);
1649 if (taskreg_nesting_level > 1)
1650 ctx->is_nested = true;
1651 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1652 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1653 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1654 name = create_tmp_var_name (".omp_data_s");
1655 name = build_decl (gimple_location (stmt),
1656 TYPE_DECL, name, ctx->record_type);
1657 TYPE_NAME (ctx->record_type) = name;
1658 create_omp_child_function (ctx, false);
1659 gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
1660
1661 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx);
1662 scan_omp (gimple_omp_body (stmt), ctx);
1663
1664 if (TYPE_FIELDS (ctx->record_type) == NULL)
1665 ctx->record_type = ctx->receiver_decl = NULL;
1666 else
1667 {
1668 layout_type (ctx->record_type);
1669 fixup_child_record_type (ctx);
1670 }
1671 }
1672
1673 /* Scan an OpenMP task directive. */
1674
1675 static void
1676 scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1677 {
1678 omp_context *ctx;
1679 tree name, t;
1680 gimple stmt = gsi_stmt (*gsi);
1681 location_t loc = gimple_location (stmt);
1682
1683 /* Ignore task directives with empty bodies. */
1684 if (optimize > 0
1685 && empty_body_p (gimple_omp_body (stmt)))
1686 {
1687 gsi_replace (gsi, gimple_build_nop (), false);
1688 return;
1689 }
1690
1691 ctx = new_omp_context (stmt, outer_ctx);
1692 if (taskreg_nesting_level > 1)
1693 ctx->is_nested = true;
1694 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1695 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1696 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1697 name = create_tmp_var_name (".omp_data_s");
1698 name = build_decl (gimple_location (stmt),
1699 TYPE_DECL, name, ctx->record_type);
1700 TYPE_NAME (ctx->record_type) = name;
1701 create_omp_child_function (ctx, false);
1702 gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
1703
1704 scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
1705
1706 if (ctx->srecord_type)
1707 {
1708 name = create_tmp_var_name (".omp_data_a");
1709 name = build_decl (gimple_location (stmt),
1710 TYPE_DECL, name, ctx->srecord_type);
1711 TYPE_NAME (ctx->srecord_type) = name;
1712 create_omp_child_function (ctx, true);
1713 }
1714
1715 scan_omp (gimple_omp_body (stmt), ctx);
1716
1717 if (TYPE_FIELDS (ctx->record_type) == NULL)
1718 {
1719 ctx->record_type = ctx->receiver_decl = NULL;
1720 t = build_int_cst (long_integer_type_node, 0);
1721 gimple_omp_task_set_arg_size (stmt, t);
1722 t = build_int_cst (long_integer_type_node, 1);
1723 gimple_omp_task_set_arg_align (stmt, t);
1724 }
1725 else
1726 {
1727 tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
1728 /* Move VLA fields to the end. */
1729 p = &TYPE_FIELDS (ctx->record_type);
1730 while (*p)
1731 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
1732 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
1733 {
1734 *q = *p;
1735 *p = TREE_CHAIN (*p);
1736 TREE_CHAIN (*q) = NULL_TREE;
1737 q = &TREE_CHAIN (*q);
1738 }
1739 else
1740 p = &TREE_CHAIN (*p);
1741 *p = vla_fields;
1742 layout_type (ctx->record_type);
1743 fixup_child_record_type (ctx);
1744 if (ctx->srecord_type)
1745 layout_type (ctx->srecord_type);
1746 t = fold_convert_loc (loc, long_integer_type_node,
1747 TYPE_SIZE_UNIT (ctx->record_type));
1748 gimple_omp_task_set_arg_size (stmt, t);
1749 t = build_int_cst (long_integer_type_node,
1750 TYPE_ALIGN_UNIT (ctx->record_type));
1751 gimple_omp_task_set_arg_align (stmt, t);
1752 }
1753 }
1754
1755
1756 /* Scan an OpenMP loop directive. */
1757
1758 static void
1759 scan_omp_for (gimple stmt, omp_context *outer_ctx)
1760 {
1761 omp_context *ctx;
1762 size_t i;
1763
1764 ctx = new_omp_context (stmt, outer_ctx);
1765
1766 scan_sharing_clauses (gimple_omp_for_clauses (stmt), ctx);
1767
1768 scan_omp (gimple_omp_for_pre_body (stmt), ctx);
1769 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
1770 {
1771 scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx);
1772 scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx);
1773 scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx);
1774 scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx);
1775 }
1776 scan_omp (gimple_omp_body (stmt), ctx);
1777 }
1778
1779 /* Scan an OpenMP sections directive. */
1780
1781 static void
1782 scan_omp_sections (gimple stmt, omp_context *outer_ctx)
1783 {
1784 omp_context *ctx;
1785
1786 ctx = new_omp_context (stmt, outer_ctx);
1787 scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx);
1788 scan_omp (gimple_omp_body (stmt), ctx);
1789 }
1790
1791 /* Scan an OpenMP single directive. */
1792
1793 static void
1794 scan_omp_single (gimple stmt, omp_context *outer_ctx)
1795 {
1796 omp_context *ctx;
1797 tree name;
1798
1799 ctx = new_omp_context (stmt, outer_ctx);
1800 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1801 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1802 name = create_tmp_var_name (".omp_copy_s");
1803 name = build_decl (gimple_location (stmt),
1804 TYPE_DECL, name, ctx->record_type);
1805 TYPE_NAME (ctx->record_type) = name;
1806
1807 scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx);
1808 scan_omp (gimple_omp_body (stmt), ctx);
1809
1810 if (TYPE_FIELDS (ctx->record_type) == NULL)
1811 ctx->record_type = NULL;
1812 else
1813 layout_type (ctx->record_type);
1814 }
1815
1816
1817 /* Check OpenMP nesting restrictions. */
1818 static void
1819 check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
1820 {
1821 switch (gimple_code (stmt))
1822 {
1823 case GIMPLE_OMP_FOR:
1824 case GIMPLE_OMP_SECTIONS:
1825 case GIMPLE_OMP_SINGLE:
1826 case GIMPLE_CALL:
1827 for (; ctx != NULL; ctx = ctx->outer)
1828 switch (gimple_code (ctx->stmt))
1829 {
1830 case GIMPLE_OMP_FOR:
1831 case GIMPLE_OMP_SECTIONS:
1832 case GIMPLE_OMP_SINGLE:
1833 case GIMPLE_OMP_ORDERED:
1834 case GIMPLE_OMP_MASTER:
1835 case GIMPLE_OMP_TASK:
1836 if (is_gimple_call (stmt))
1837 {
1838 warning (0, "barrier region may not be closely nested inside "
1839 "of work-sharing, critical, ordered, master or "
1840 "explicit task region");
1841 return;
1842 }
1843 warning (0, "work-sharing region may not be closely nested inside "
1844 "of work-sharing, critical, ordered, master or explicit "
1845 "task region");
1846 return;
1847 case GIMPLE_OMP_PARALLEL:
1848 return;
1849 default:
1850 break;
1851 }
1852 break;
1853 case GIMPLE_OMP_MASTER:
1854 for (; ctx != NULL; ctx = ctx->outer)
1855 switch (gimple_code (ctx->stmt))
1856 {
1857 case GIMPLE_OMP_FOR:
1858 case GIMPLE_OMP_SECTIONS:
1859 case GIMPLE_OMP_SINGLE:
1860 case GIMPLE_OMP_TASK:
1861 warning (0, "master region may not be closely nested inside "
1862 "of work-sharing or explicit task region");
1863 return;
1864 case GIMPLE_OMP_PARALLEL:
1865 return;
1866 default:
1867 break;
1868 }
1869 break;
1870 case GIMPLE_OMP_ORDERED:
1871 for (; ctx != NULL; ctx = ctx->outer)
1872 switch (gimple_code (ctx->stmt))
1873 {
1874 case GIMPLE_OMP_CRITICAL:
1875 case GIMPLE_OMP_TASK:
1876 warning (0, "ordered region may not be closely nested inside "
1877 "of critical or explicit task region");
1878 return;
1879 case GIMPLE_OMP_FOR:
1880 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
1881 OMP_CLAUSE_ORDERED) == NULL)
1882 warning (0, "ordered region must be closely nested inside "
1883 "a loop region with an ordered clause");
1884 return;
1885 case GIMPLE_OMP_PARALLEL:
1886 return;
1887 default:
1888 break;
1889 }
1890 break;
1891 case GIMPLE_OMP_CRITICAL:
1892 for (; ctx != NULL; ctx = ctx->outer)
1893 if (gimple_code (ctx->stmt) == GIMPLE_OMP_CRITICAL
1894 && (gimple_omp_critical_name (stmt)
1895 == gimple_omp_critical_name (ctx->stmt)))
1896 {
1897 warning (0, "critical region may not be nested inside a critical "
1898 "region with the same name");
1899 return;
1900 }
1901 break;
1902 default:
1903 break;
1904 }
1905 }
1906
1907
1908 /* Helper function scan_omp.
1909
1910 Callback for walk_tree or operators in walk_gimple_stmt used to
1911 scan for OpenMP directives in TP. */
1912
1913 static tree
1914 scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
1915 {
1916 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
1917 omp_context *ctx = (omp_context *) wi->info;
1918 tree t = *tp;
1919
1920 switch (TREE_CODE (t))
1921 {
1922 case VAR_DECL:
1923 case PARM_DECL:
1924 case LABEL_DECL:
1925 case RESULT_DECL:
1926 if (ctx)
1927 *tp = remap_decl (t, &ctx->cb);
1928 break;
1929
1930 default:
1931 if (ctx && TYPE_P (t))
1932 *tp = remap_type (t, &ctx->cb);
1933 else if (!DECL_P (t))
1934 {
1935 *walk_subtrees = 1;
1936 if (ctx)
1937 TREE_TYPE (t) = remap_type (TREE_TYPE (t), &ctx->cb);
1938 }
1939 break;
1940 }
1941
1942 return NULL_TREE;
1943 }
1944
1945
1946 /* Helper function for scan_omp.
1947
1948 Callback for walk_gimple_stmt used to scan for OpenMP directives in
1949 the current statement in GSI. */
1950
1951 static tree
1952 scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
1953 struct walk_stmt_info *wi)
1954 {
1955 gimple stmt = gsi_stmt (*gsi);
1956 omp_context *ctx = (omp_context *) wi->info;
1957
1958 if (gimple_has_location (stmt))
1959 input_location = gimple_location (stmt);
1960
1961 /* Check the OpenMP nesting restrictions. */
1962 if (ctx != NULL)
1963 {
1964 if (is_gimple_omp (stmt))
1965 check_omp_nesting_restrictions (stmt, ctx);
1966 else if (is_gimple_call (stmt))
1967 {
1968 tree fndecl = gimple_call_fndecl (stmt);
1969 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
1970 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
1971 check_omp_nesting_restrictions (stmt, ctx);
1972 }
1973 }
1974
1975 *handled_ops_p = true;
1976
1977 switch (gimple_code (stmt))
1978 {
1979 case GIMPLE_OMP_PARALLEL:
1980 taskreg_nesting_level++;
1981 scan_omp_parallel (gsi, ctx);
1982 taskreg_nesting_level--;
1983 break;
1984
1985 case GIMPLE_OMP_TASK:
1986 taskreg_nesting_level++;
1987 scan_omp_task (gsi, ctx);
1988 taskreg_nesting_level--;
1989 break;
1990
1991 case GIMPLE_OMP_FOR:
1992 scan_omp_for (stmt, ctx);
1993 break;
1994
1995 case GIMPLE_OMP_SECTIONS:
1996 scan_omp_sections (stmt, ctx);
1997 break;
1998
1999 case GIMPLE_OMP_SINGLE:
2000 scan_omp_single (stmt, ctx);
2001 break;
2002
2003 case GIMPLE_OMP_SECTION:
2004 case GIMPLE_OMP_MASTER:
2005 case GIMPLE_OMP_ORDERED:
2006 case GIMPLE_OMP_CRITICAL:
2007 ctx = new_omp_context (stmt, ctx);
2008 scan_omp (gimple_omp_body (stmt), ctx);
2009 break;
2010
2011 case GIMPLE_BIND:
2012 {
2013 tree var;
2014
2015 *handled_ops_p = false;
2016 if (ctx)
2017 for (var = gimple_bind_vars (stmt); var ; var = TREE_CHAIN (var))
2018 insert_decl_map (&ctx->cb, var, var);
2019 }
2020 break;
2021 default:
2022 *handled_ops_p = false;
2023 break;
2024 }
2025
2026 return NULL_TREE;
2027 }
2028
2029
2030 /* Scan all the statements starting at the current statement. CTX
2031 contains context information about the OpenMP directives and
2032 clauses found during the scan. */
2033
2034 static void
2035 scan_omp (gimple_seq body, omp_context *ctx)
2036 {
2037 location_t saved_location;
2038 struct walk_stmt_info wi;
2039
2040 memset (&wi, 0, sizeof (wi));
2041 wi.info = ctx;
2042 wi.want_locations = true;
2043
2044 saved_location = input_location;
2045 walk_gimple_seq (body, scan_omp_1_stmt, scan_omp_1_op, &wi);
2046 input_location = saved_location;
2047 }
2048 \f
2049 /* Re-gimplification and code generation routines. */
2050
2051 /* Build a call to GOMP_barrier. */
2052
2053 static tree
2054 build_omp_barrier (void)
2055 {
2056 return build_call_expr (built_in_decls[BUILT_IN_GOMP_BARRIER], 0);
2057 }
2058
2059 /* If a context was created for STMT when it was scanned, return it. */
2060
2061 static omp_context *
2062 maybe_lookup_ctx (gimple stmt)
2063 {
2064 splay_tree_node n;
2065 n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
2066 return n ? (omp_context *) n->value : NULL;
2067 }
2068
2069
2070 /* Find the mapping for DECL in CTX or the immediately enclosing
2071 context that has a mapping for DECL.
2072
2073 If CTX is a nested parallel directive, we may have to use the decl
2074 mappings created in CTX's parent context. Suppose that we have the
2075 following parallel nesting (variable UIDs showed for clarity):
2076
2077 iD.1562 = 0;
2078 #omp parallel shared(iD.1562) -> outer parallel
2079 iD.1562 = iD.1562 + 1;
2080
2081 #omp parallel shared (iD.1562) -> inner parallel
2082 iD.1562 = iD.1562 - 1;
2083
2084 Each parallel structure will create a distinct .omp_data_s structure
2085 for copying iD.1562 in/out of the directive:
2086
2087 outer parallel .omp_data_s.1.i -> iD.1562
2088 inner parallel .omp_data_s.2.i -> iD.1562
2089
2090 A shared variable mapping will produce a copy-out operation before
2091 the parallel directive and a copy-in operation after it. So, in
2092 this case we would have:
2093
2094 iD.1562 = 0;
2095 .omp_data_o.1.i = iD.1562;
2096 #omp parallel shared(iD.1562) -> outer parallel
2097 .omp_data_i.1 = &.omp_data_o.1
2098 .omp_data_i.1->i = .omp_data_i.1->i + 1;
2099
2100 .omp_data_o.2.i = iD.1562; -> **
2101 #omp parallel shared(iD.1562) -> inner parallel
2102 .omp_data_i.2 = &.omp_data_o.2
2103 .omp_data_i.2->i = .omp_data_i.2->i - 1;
2104
2105
2106 ** This is a problem. The symbol iD.1562 cannot be referenced
2107 inside the body of the outer parallel region. But since we are
2108 emitting this copy operation while expanding the inner parallel
2109 directive, we need to access the CTX structure of the outer
2110 parallel directive to get the correct mapping:
2111
2112 .omp_data_o.2.i = .omp_data_i.1->i
2113
2114 Since there may be other workshare or parallel directives enclosing
2115 the parallel directive, it may be necessary to walk up the context
2116 parent chain. This is not a problem in general because nested
2117 parallelism happens only rarely. */
2118
2119 static tree
2120 lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2121 {
2122 tree t;
2123 omp_context *up;
2124
2125 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2126 t = maybe_lookup_decl (decl, up);
2127
2128 gcc_assert (!ctx->is_nested || t || is_global_var (decl));
2129
2130 return t ? t : decl;
2131 }
2132
2133
2134 /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
2135 in outer contexts. */
2136
2137 static tree
2138 maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2139 {
2140 tree t = NULL;
2141 omp_context *up;
2142
2143 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2144 t = maybe_lookup_decl (decl, up);
2145
2146 return t ? t : decl;
2147 }
2148
2149
2150 /* Construct the initialization value for reduction CLAUSE. */
2151
2152 tree
2153 omp_reduction_init (tree clause, tree type)
2154 {
2155 location_t loc = OMP_CLAUSE_LOCATION (clause);
2156 switch (OMP_CLAUSE_REDUCTION_CODE (clause))
2157 {
2158 case PLUS_EXPR:
2159 case MINUS_EXPR:
2160 case BIT_IOR_EXPR:
2161 case BIT_XOR_EXPR:
2162 case TRUTH_OR_EXPR:
2163 case TRUTH_ORIF_EXPR:
2164 case TRUTH_XOR_EXPR:
2165 case NE_EXPR:
2166 return fold_convert_loc (loc, type, integer_zero_node);
2167
2168 case MULT_EXPR:
2169 case TRUTH_AND_EXPR:
2170 case TRUTH_ANDIF_EXPR:
2171 case EQ_EXPR:
2172 return fold_convert_loc (loc, type, integer_one_node);
2173
2174 case BIT_AND_EXPR:
2175 return fold_convert_loc (loc, type, integer_minus_one_node);
2176
2177 case MAX_EXPR:
2178 if (SCALAR_FLOAT_TYPE_P (type))
2179 {
2180 REAL_VALUE_TYPE max, min;
2181 if (HONOR_INFINITIES (TYPE_MODE (type)))
2182 {
2183 real_inf (&max);
2184 real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
2185 }
2186 else
2187 real_maxval (&min, 1, TYPE_MODE (type));
2188 return build_real (type, min);
2189 }
2190 else
2191 {
2192 gcc_assert (INTEGRAL_TYPE_P (type));
2193 return TYPE_MIN_VALUE (type);
2194 }
2195
2196 case MIN_EXPR:
2197 if (SCALAR_FLOAT_TYPE_P (type))
2198 {
2199 REAL_VALUE_TYPE max;
2200 if (HONOR_INFINITIES (TYPE_MODE (type)))
2201 real_inf (&max);
2202 else
2203 real_maxval (&max, 0, TYPE_MODE (type));
2204 return build_real (type, max);
2205 }
2206 else
2207 {
2208 gcc_assert (INTEGRAL_TYPE_P (type));
2209 return TYPE_MAX_VALUE (type);
2210 }
2211
2212 default:
2213 gcc_unreachable ();
2214 }
2215 }
2216
2217 /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
2218 from the receiver (aka child) side and initializers for REFERENCE_TYPE
2219 private variables. Initialization statements go in ILIST, while calls
2220 to destructors go in DLIST. */
2221
2222 static void
2223 lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
2224 omp_context *ctx)
2225 {
2226 gimple_stmt_iterator diter;
2227 tree c, dtor, copyin_seq, x, ptr;
2228 bool copyin_by_ref = false;
2229 bool lastprivate_firstprivate = false;
2230 int pass;
2231
2232 *dlist = gimple_seq_alloc ();
2233 diter = gsi_start (*dlist);
2234 copyin_seq = NULL;
2235
2236 /* Do all the fixed sized types in the first pass, and the variable sized
2237 types in the second pass. This makes sure that the scalar arguments to
2238 the variable sized types are processed before we use them in the
2239 variable sized operations. */
2240 for (pass = 0; pass < 2; ++pass)
2241 {
2242 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2243 {
2244 enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
2245 tree var, new_var;
2246 bool by_ref;
2247 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2248
2249 switch (c_kind)
2250 {
2251 case OMP_CLAUSE_PRIVATE:
2252 if (OMP_CLAUSE_PRIVATE_DEBUG (c))
2253 continue;
2254 break;
2255 case OMP_CLAUSE_SHARED:
2256 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
2257 {
2258 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
2259 continue;
2260 }
2261 case OMP_CLAUSE_FIRSTPRIVATE:
2262 case OMP_CLAUSE_COPYIN:
2263 case OMP_CLAUSE_REDUCTION:
2264 break;
2265 case OMP_CLAUSE_LASTPRIVATE:
2266 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2267 {
2268 lastprivate_firstprivate = true;
2269 if (pass != 0)
2270 continue;
2271 }
2272 break;
2273 default:
2274 continue;
2275 }
2276
2277 new_var = var = OMP_CLAUSE_DECL (c);
2278 if (c_kind != OMP_CLAUSE_COPYIN)
2279 new_var = lookup_decl (var, ctx);
2280
2281 if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
2282 {
2283 if (pass != 0)
2284 continue;
2285 }
2286 else if (is_variable_sized (var))
2287 {
2288 /* For variable sized types, we need to allocate the
2289 actual storage here. Call alloca and store the
2290 result in the pointer decl that we created elsewhere. */
2291 if (pass == 0)
2292 continue;
2293
2294 if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
2295 {
2296 gimple stmt;
2297 tree tmp;
2298
2299 ptr = DECL_VALUE_EXPR (new_var);
2300 gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
2301 ptr = TREE_OPERAND (ptr, 0);
2302 gcc_assert (DECL_P (ptr));
2303 x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
2304
2305 /* void *tmp = __builtin_alloca */
2306 stmt
2307 = gimple_build_call (built_in_decls[BUILT_IN_ALLOCA], 1, x);
2308 tmp = create_tmp_var_raw (ptr_type_node, NULL);
2309 gimple_add_tmp_var (tmp);
2310 gimple_call_set_lhs (stmt, tmp);
2311
2312 gimple_seq_add_stmt (ilist, stmt);
2313
2314 x = fold_convert_loc (clause_loc, TREE_TYPE (ptr), tmp);
2315 gimplify_assign (ptr, x, ilist);
2316 }
2317 }
2318 else if (is_reference (var))
2319 {
2320 /* For references that are being privatized for Fortran,
2321 allocate new backing storage for the new pointer
2322 variable. This allows us to avoid changing all the
2323 code that expects a pointer to something that expects
2324 a direct variable. Note that this doesn't apply to
2325 C++, since reference types are disallowed in data
2326 sharing clauses there, except for NRV optimized
2327 return values. */
2328 if (pass == 0)
2329 continue;
2330
2331 x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
2332 if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
2333 {
2334 x = build_receiver_ref (var, false, ctx);
2335 x = build_fold_addr_expr_loc (clause_loc, x);
2336 }
2337 else if (TREE_CONSTANT (x))
2338 {
2339 const char *name = NULL;
2340 if (DECL_NAME (var))
2341 name = IDENTIFIER_POINTER (DECL_NAME (new_var));
2342
2343 x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
2344 name);
2345 gimple_add_tmp_var (x);
2346 TREE_ADDRESSABLE (x) = 1;
2347 x = build_fold_addr_expr_loc (clause_loc, x);
2348 }
2349 else
2350 {
2351 x = build_call_expr_loc (clause_loc,
2352 built_in_decls[BUILT_IN_ALLOCA], 1, x);
2353 }
2354
2355 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
2356 gimplify_assign (new_var, x, ilist);
2357
2358 new_var = build_fold_indirect_ref_loc (clause_loc, new_var);
2359 }
2360 else if (c_kind == OMP_CLAUSE_REDUCTION
2361 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2362 {
2363 if (pass == 0)
2364 continue;
2365 }
2366 else if (pass != 0)
2367 continue;
2368
2369 switch (OMP_CLAUSE_CODE (c))
2370 {
2371 case OMP_CLAUSE_SHARED:
2372 /* Shared global vars are just accessed directly. */
2373 if (is_global_var (new_var))
2374 break;
2375 /* Set up the DECL_VALUE_EXPR for shared variables now. This
2376 needs to be delayed until after fixup_child_record_type so
2377 that we get the correct type during the dereference. */
2378 by_ref = use_pointer_for_field (var, ctx);
2379 x = build_receiver_ref (var, by_ref, ctx);
2380 SET_DECL_VALUE_EXPR (new_var, x);
2381 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2382
2383 /* ??? If VAR is not passed by reference, and the variable
2384 hasn't been initialized yet, then we'll get a warning for
2385 the store into the omp_data_s structure. Ideally, we'd be
2386 able to notice this and not store anything at all, but
2387 we're generating code too early. Suppress the warning. */
2388 if (!by_ref)
2389 TREE_NO_WARNING (var) = 1;
2390 break;
2391
2392 case OMP_CLAUSE_LASTPRIVATE:
2393 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2394 break;
2395 /* FALLTHRU */
2396
2397 case OMP_CLAUSE_PRIVATE:
2398 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE)
2399 x = build_outer_var_ref (var, ctx);
2400 else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2401 {
2402 if (is_task_ctx (ctx))
2403 x = build_receiver_ref (var, false, ctx);
2404 else
2405 x = build_outer_var_ref (var, ctx);
2406 }
2407 else
2408 x = NULL;
2409 x = lang_hooks.decls.omp_clause_default_ctor (c, new_var, x);
2410 if (x)
2411 gimplify_and_add (x, ilist);
2412 /* FALLTHRU */
2413
2414 do_dtor:
2415 x = lang_hooks.decls.omp_clause_dtor (c, new_var);
2416 if (x)
2417 {
2418 gimple_seq tseq = NULL;
2419
2420 dtor = x;
2421 gimplify_stmt (&dtor, &tseq);
2422 gsi_insert_seq_before (&diter, tseq, GSI_SAME_STMT);
2423 }
2424 break;
2425
2426 case OMP_CLAUSE_FIRSTPRIVATE:
2427 if (is_task_ctx (ctx))
2428 {
2429 if (is_reference (var) || is_variable_sized (var))
2430 goto do_dtor;
2431 else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var,
2432 ctx))
2433 || use_pointer_for_field (var, NULL))
2434 {
2435 x = build_receiver_ref (var, false, ctx);
2436 SET_DECL_VALUE_EXPR (new_var, x);
2437 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2438 goto do_dtor;
2439 }
2440 }
2441 x = build_outer_var_ref (var, ctx);
2442 x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x);
2443 gimplify_and_add (x, ilist);
2444 goto do_dtor;
2445 break;
2446
2447 case OMP_CLAUSE_COPYIN:
2448 by_ref = use_pointer_for_field (var, NULL);
2449 x = build_receiver_ref (var, by_ref, ctx);
2450 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
2451 append_to_statement_list (x, &copyin_seq);
2452 copyin_by_ref |= by_ref;
2453 break;
2454
2455 case OMP_CLAUSE_REDUCTION:
2456 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2457 {
2458 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2459 x = build_outer_var_ref (var, ctx);
2460
2461 if (is_reference (var))
2462 x = build_fold_addr_expr_loc (clause_loc, x);
2463 SET_DECL_VALUE_EXPR (placeholder, x);
2464 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2465 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
2466 gimple_seq_add_seq (ilist,
2467 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c));
2468 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
2469 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
2470 }
2471 else
2472 {
2473 x = omp_reduction_init (c, TREE_TYPE (new_var));
2474 gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
2475 gimplify_assign (new_var, x, ilist);
2476 }
2477 break;
2478
2479 default:
2480 gcc_unreachable ();
2481 }
2482 }
2483 }
2484
2485 /* The copyin sequence is not to be executed by the main thread, since
2486 that would result in self-copies. Perhaps not visible to scalars,
2487 but it certainly is to C++ operator=. */
2488 if (copyin_seq)
2489 {
2490 x = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
2491 x = build2 (NE_EXPR, boolean_type_node, x,
2492 build_int_cst (TREE_TYPE (x), 0));
2493 x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
2494 gimplify_and_add (x, ilist);
2495 }
2496
2497 /* If any copyin variable is passed by reference, we must ensure the
2498 master thread doesn't modify it before it is copied over in all
2499 threads. Similarly for variables in both firstprivate and
2500 lastprivate clauses we need to ensure the lastprivate copying
2501 happens after firstprivate copying in all threads. */
2502 if (copyin_by_ref || lastprivate_firstprivate)
2503 gimplify_and_add (build_omp_barrier (), ilist);
2504 }
2505
2506
2507 /* Generate code to implement the LASTPRIVATE clauses. This is used for
2508 both parallel and workshare constructs. PREDICATE may be NULL if it's
2509 always true. */
2510
2511 static void
2512 lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *stmt_list,
2513 omp_context *ctx)
2514 {
2515 tree x, c, label = NULL;
2516 bool par_clauses = false;
2517
2518 /* Early exit if there are no lastprivate clauses. */
2519 clauses = find_omp_clause (clauses, OMP_CLAUSE_LASTPRIVATE);
2520 if (clauses == NULL)
2521 {
2522 /* If this was a workshare clause, see if it had been combined
2523 with its parallel. In that case, look for the clauses on the
2524 parallel statement itself. */
2525 if (is_parallel_ctx (ctx))
2526 return;
2527
2528 ctx = ctx->outer;
2529 if (ctx == NULL || !is_parallel_ctx (ctx))
2530 return;
2531
2532 clauses = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2533 OMP_CLAUSE_LASTPRIVATE);
2534 if (clauses == NULL)
2535 return;
2536 par_clauses = true;
2537 }
2538
2539 if (predicate)
2540 {
2541 gimple stmt;
2542 tree label_true, arm1, arm2;
2543
2544 label = create_artificial_label (UNKNOWN_LOCATION);
2545 label_true = create_artificial_label (UNKNOWN_LOCATION);
2546 arm1 = TREE_OPERAND (predicate, 0);
2547 arm2 = TREE_OPERAND (predicate, 1);
2548 gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
2549 gimplify_expr (&arm2, stmt_list, NULL, is_gimple_val, fb_rvalue);
2550 stmt = gimple_build_cond (TREE_CODE (predicate), arm1, arm2,
2551 label_true, label);
2552 gimple_seq_add_stmt (stmt_list, stmt);
2553 gimple_seq_add_stmt (stmt_list, gimple_build_label (label_true));
2554 }
2555
2556 for (c = clauses; c ;)
2557 {
2558 tree var, new_var;
2559 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2560
2561 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
2562 {
2563 var = OMP_CLAUSE_DECL (c);
2564 new_var = lookup_decl (var, ctx);
2565
2566 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
2567 {
2568 lower_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
2569 gimple_seq_add_seq (stmt_list,
2570 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
2571 }
2572 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) = NULL;
2573
2574 x = build_outer_var_ref (var, ctx);
2575 if (is_reference (var))
2576 new_var = build_fold_indirect_ref_loc (clause_loc, new_var);
2577 x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
2578 gimplify_and_add (x, stmt_list);
2579 }
2580 c = OMP_CLAUSE_CHAIN (c);
2581 if (c == NULL && !par_clauses)
2582 {
2583 /* If this was a workshare clause, see if it had been combined
2584 with its parallel. In that case, continue looking for the
2585 clauses also on the parallel statement itself. */
2586 if (is_parallel_ctx (ctx))
2587 break;
2588
2589 ctx = ctx->outer;
2590 if (ctx == NULL || !is_parallel_ctx (ctx))
2591 break;
2592
2593 c = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2594 OMP_CLAUSE_LASTPRIVATE);
2595 par_clauses = true;
2596 }
2597 }
2598
2599 if (label)
2600 gimple_seq_add_stmt (stmt_list, gimple_build_label (label));
2601 }
2602
2603
2604 /* Generate code to implement the REDUCTION clauses. */
2605
2606 static void
2607 lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
2608 {
2609 gimple_seq sub_seq = NULL;
2610 gimple stmt;
2611 tree x, c;
2612 int count = 0;
2613
2614 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
2615 update in that case, otherwise use a lock. */
2616 for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
2617 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
2618 {
2619 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2620 {
2621 /* Never use OMP_ATOMIC for array reductions. */
2622 count = -1;
2623 break;
2624 }
2625 count++;
2626 }
2627
2628 if (count == 0)
2629 return;
2630
2631 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2632 {
2633 tree var, ref, new_var;
2634 enum tree_code code;
2635 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2636
2637 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
2638 continue;
2639
2640 var = OMP_CLAUSE_DECL (c);
2641 new_var = lookup_decl (var, ctx);
2642 if (is_reference (var))
2643 new_var = build_fold_indirect_ref_loc (clause_loc, new_var);
2644 ref = build_outer_var_ref (var, ctx);
2645 code = OMP_CLAUSE_REDUCTION_CODE (c);
2646
2647 /* reduction(-:var) sums up the partial results, so it acts
2648 identically to reduction(+:var). */
2649 if (code == MINUS_EXPR)
2650 code = PLUS_EXPR;
2651
2652 if (count == 1)
2653 {
2654 tree addr = build_fold_addr_expr_loc (clause_loc, ref);
2655
2656 addr = save_expr (addr);
2657 ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
2658 x = fold_build2_loc (clause_loc, code, TREE_TYPE (ref), ref, new_var);
2659 x = build2 (OMP_ATOMIC, void_type_node, addr, x);
2660 gimplify_and_add (x, stmt_seqp);
2661 return;
2662 }
2663
2664 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2665 {
2666 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2667
2668 if (is_reference (var))
2669 ref = build_fold_addr_expr_loc (clause_loc, ref);
2670 SET_DECL_VALUE_EXPR (placeholder, ref);
2671 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2672 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
2673 gimple_seq_add_seq (&sub_seq, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
2674 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
2675 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
2676 }
2677 else
2678 {
2679 x = build2 (code, TREE_TYPE (ref), ref, new_var);
2680 ref = build_outer_var_ref (var, ctx);
2681 gimplify_assign (ref, x, &sub_seq);
2682 }
2683 }
2684
2685 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ATOMIC_START], 0);
2686 gimple_seq_add_stmt (stmt_seqp, stmt);
2687
2688 gimple_seq_add_seq (stmt_seqp, sub_seq);
2689
2690 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ATOMIC_END], 0);
2691 gimple_seq_add_stmt (stmt_seqp, stmt);
2692 }
2693
2694
2695 /* Generate code to implement the COPYPRIVATE clauses. */
2696
2697 static void
2698 lower_copyprivate_clauses (tree clauses, gimple_seq *slist, gimple_seq *rlist,
2699 omp_context *ctx)
2700 {
2701 tree c;
2702
2703 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2704 {
2705 tree var, ref, x;
2706 bool by_ref;
2707 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2708
2709 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
2710 continue;
2711
2712 var = OMP_CLAUSE_DECL (c);
2713 by_ref = use_pointer_for_field (var, NULL);
2714
2715 ref = build_sender_ref (var, ctx);
2716 x = lookup_decl_in_outer_ctx (var, ctx);
2717 x = by_ref ? build_fold_addr_expr_loc (clause_loc, x) : x;
2718 gimplify_assign (ref, x, slist);
2719
2720 ref = build_receiver_ref (var, by_ref, ctx);
2721 if (is_reference (var))
2722 {
2723 ref = build_fold_indirect_ref_loc (clause_loc, ref);
2724 var = build_fold_indirect_ref_loc (clause_loc, var);
2725 }
2726 x = lang_hooks.decls.omp_clause_assign_op (c, var, ref);
2727 gimplify_and_add (x, rlist);
2728 }
2729 }
2730
2731
2732 /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
2733 and REDUCTION from the sender (aka parent) side. */
2734
2735 static void
2736 lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
2737 omp_context *ctx)
2738 {
2739 tree c;
2740
2741 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2742 {
2743 tree val, ref, x, var;
2744 bool by_ref, do_in = false, do_out = false;
2745 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2746
2747 switch (OMP_CLAUSE_CODE (c))
2748 {
2749 case OMP_CLAUSE_PRIVATE:
2750 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2751 break;
2752 continue;
2753 case OMP_CLAUSE_FIRSTPRIVATE:
2754 case OMP_CLAUSE_COPYIN:
2755 case OMP_CLAUSE_LASTPRIVATE:
2756 case OMP_CLAUSE_REDUCTION:
2757 break;
2758 default:
2759 continue;
2760 }
2761
2762 val = OMP_CLAUSE_DECL (c);
2763 var = lookup_decl_in_outer_ctx (val, ctx);
2764
2765 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
2766 && is_global_var (var))
2767 continue;
2768 if (is_variable_sized (val))
2769 continue;
2770 by_ref = use_pointer_for_field (val, NULL);
2771
2772 switch (OMP_CLAUSE_CODE (c))
2773 {
2774 case OMP_CLAUSE_PRIVATE:
2775 case OMP_CLAUSE_FIRSTPRIVATE:
2776 case OMP_CLAUSE_COPYIN:
2777 do_in = true;
2778 break;
2779
2780 case OMP_CLAUSE_LASTPRIVATE:
2781 if (by_ref || is_reference (val))
2782 {
2783 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2784 continue;
2785 do_in = true;
2786 }
2787 else
2788 {
2789 do_out = true;
2790 if (lang_hooks.decls.omp_private_outer_ref (val))
2791 do_in = true;
2792 }
2793 break;
2794
2795 case OMP_CLAUSE_REDUCTION:
2796 do_in = true;
2797 do_out = !(by_ref || is_reference (val));
2798 break;
2799
2800 default:
2801 gcc_unreachable ();
2802 }
2803
2804 if (do_in)
2805 {
2806 ref = build_sender_ref (val, ctx);
2807 x = by_ref ? build_fold_addr_expr_loc (clause_loc, var) : var;
2808 gimplify_assign (ref, x, ilist);
2809 if (is_task_ctx (ctx))
2810 DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL;
2811 }
2812
2813 if (do_out)
2814 {
2815 ref = build_sender_ref (val, ctx);
2816 gimplify_assign (var, ref, olist);
2817 }
2818 }
2819 }
2820
2821 /* Generate code to implement SHARED from the sender (aka parent)
2822 side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
2823 list things that got automatically shared. */
2824
2825 static void
2826 lower_send_shared_vars (gimple_seq *ilist, gimple_seq *olist, omp_context *ctx)
2827 {
2828 tree var, ovar, nvar, f, x, record_type;
2829
2830 if (ctx->record_type == NULL)
2831 return;
2832
2833 record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type;
2834 for (f = TYPE_FIELDS (record_type); f ; f = TREE_CHAIN (f))
2835 {
2836 ovar = DECL_ABSTRACT_ORIGIN (f);
2837 nvar = maybe_lookup_decl (ovar, ctx);
2838 if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
2839 continue;
2840
2841 /* If CTX is a nested parallel directive. Find the immediately
2842 enclosing parallel or workshare construct that contains a
2843 mapping for OVAR. */
2844 var = lookup_decl_in_outer_ctx (ovar, ctx);
2845
2846 if (use_pointer_for_field (ovar, ctx))
2847 {
2848 x = build_sender_ref (ovar, ctx);
2849 var = build_fold_addr_expr (var);
2850 gimplify_assign (x, var, ilist);
2851 }
2852 else
2853 {
2854 x = build_sender_ref (ovar, ctx);
2855 gimplify_assign (x, var, ilist);
2856
2857 if (!TREE_READONLY (var)
2858 /* We don't need to receive a new reference to a result
2859 or parm decl. In fact we may not store to it as we will
2860 invalidate any pending RSO and generate wrong gimple
2861 during inlining. */
2862 && !((TREE_CODE (var) == RESULT_DECL
2863 || TREE_CODE (var) == PARM_DECL)
2864 && DECL_BY_REFERENCE (var)))
2865 {
2866 x = build_sender_ref (ovar, ctx);
2867 gimplify_assign (var, x, olist);
2868 }
2869 }
2870 }
2871 }
2872
2873
2874 /* A convenience function to build an empty GIMPLE_COND with just the
2875 condition. */
2876
2877 static gimple
2878 gimple_build_cond_empty (tree cond)
2879 {
2880 enum tree_code pred_code;
2881 tree lhs, rhs;
2882
2883 gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs);
2884 return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE);
2885 }
2886
2887
2888 /* Build the function calls to GOMP_parallel_start etc to actually
2889 generate the parallel operation. REGION is the parallel region
2890 being expanded. BB is the block where to insert the code. WS_ARGS
2891 will be set if this is a call to a combined parallel+workshare
2892 construct, it contains the list of additional arguments needed by
2893 the workshare construct. */
2894
2895 static void
2896 expand_parallel_call (struct omp_region *region, basic_block bb,
2897 gimple entry_stmt, tree ws_args)
2898 {
2899 tree t, t1, t2, val, cond, c, clauses;
2900 gimple_stmt_iterator gsi;
2901 gimple stmt;
2902 int start_ix;
2903 location_t clause_loc;
2904
2905 clauses = gimple_omp_parallel_clauses (entry_stmt);
2906
2907 /* Determine what flavor of GOMP_parallel_start we will be
2908 emitting. */
2909 start_ix = BUILT_IN_GOMP_PARALLEL_START;
2910 if (is_combined_parallel (region))
2911 {
2912 switch (region->inner->type)
2913 {
2914 case GIMPLE_OMP_FOR:
2915 gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
2916 start_ix = BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START
2917 + (region->inner->sched_kind
2918 == OMP_CLAUSE_SCHEDULE_RUNTIME
2919 ? 3 : region->inner->sched_kind);
2920 break;
2921 case GIMPLE_OMP_SECTIONS:
2922 start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS_START;
2923 break;
2924 default:
2925 gcc_unreachable ();
2926 }
2927 }
2928
2929 /* By default, the value of NUM_THREADS is zero (selected at run time)
2930 and there is no conditional. */
2931 cond = NULL_TREE;
2932 val = build_int_cst (unsigned_type_node, 0);
2933
2934 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
2935 if (c)
2936 cond = OMP_CLAUSE_IF_EXPR (c);
2937
2938 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
2939 if (c)
2940 {
2941 val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
2942 clause_loc = OMP_CLAUSE_LOCATION (c);
2943 }
2944 else
2945 clause_loc = gimple_location (entry_stmt);
2946
2947 /* Ensure 'val' is of the correct type. */
2948 val = fold_convert_loc (clause_loc, unsigned_type_node, val);
2949
2950 /* If we found the clause 'if (cond)', build either
2951 (cond != 0) or (cond ? val : 1u). */
2952 if (cond)
2953 {
2954 gimple_stmt_iterator gsi;
2955
2956 cond = gimple_boolify (cond);
2957
2958 if (integer_zerop (val))
2959 val = fold_build2_loc (clause_loc,
2960 EQ_EXPR, unsigned_type_node, cond,
2961 build_int_cst (TREE_TYPE (cond), 0));
2962 else
2963 {
2964 basic_block cond_bb, then_bb, else_bb;
2965 edge e, e_then, e_else;
2966 tree tmp_then, tmp_else, tmp_join, tmp_var;
2967
2968 tmp_var = create_tmp_var (TREE_TYPE (val), NULL);
2969 if (gimple_in_ssa_p (cfun))
2970 {
2971 tmp_then = make_ssa_name (tmp_var, NULL);
2972 tmp_else = make_ssa_name (tmp_var, NULL);
2973 tmp_join = make_ssa_name (tmp_var, NULL);
2974 }
2975 else
2976 {
2977 tmp_then = tmp_var;
2978 tmp_else = tmp_var;
2979 tmp_join = tmp_var;
2980 }
2981
2982 e = split_block (bb, NULL);
2983 cond_bb = e->src;
2984 bb = e->dest;
2985 remove_edge (e);
2986
2987 then_bb = create_empty_bb (cond_bb);
2988 else_bb = create_empty_bb (then_bb);
2989 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
2990 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
2991
2992 stmt = gimple_build_cond_empty (cond);
2993 gsi = gsi_start_bb (cond_bb);
2994 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2995
2996 gsi = gsi_start_bb (then_bb);
2997 stmt = gimple_build_assign (tmp_then, val);
2998 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
2999
3000 gsi = gsi_start_bb (else_bb);
3001 stmt = gimple_build_assign
3002 (tmp_else, build_int_cst (unsigned_type_node, 1));
3003 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3004
3005 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
3006 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
3007 e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
3008 e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
3009
3010 if (gimple_in_ssa_p (cfun))
3011 {
3012 gimple phi = create_phi_node (tmp_join, bb);
3013 SSA_NAME_DEF_STMT (tmp_join) = phi;
3014 add_phi_arg (phi, tmp_then, e_then, UNKNOWN_LOCATION);
3015 add_phi_arg (phi, tmp_else, e_else, UNKNOWN_LOCATION);
3016 }
3017
3018 val = tmp_join;
3019 }
3020
3021 gsi = gsi_start_bb (bb);
3022 val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE,
3023 false, GSI_CONTINUE_LINKING);
3024 }
3025
3026 gsi = gsi_last_bb (bb);
3027 t = gimple_omp_parallel_data_arg (entry_stmt);
3028 if (t == NULL)
3029 t1 = null_pointer_node;
3030 else
3031 t1 = build_fold_addr_expr (t);
3032 t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
3033
3034 if (ws_args)
3035 {
3036 tree args = tree_cons (NULL, t2,
3037 tree_cons (NULL, t1,
3038 tree_cons (NULL, val, ws_args)));
3039 t = build_function_call_expr (UNKNOWN_LOCATION,
3040 built_in_decls[start_ix], args);
3041 }
3042 else
3043 t = build_call_expr (built_in_decls[start_ix], 3, t2, t1, val);
3044
3045 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3046 false, GSI_CONTINUE_LINKING);
3047
3048 t = gimple_omp_parallel_data_arg (entry_stmt);
3049 if (t == NULL)
3050 t = null_pointer_node;
3051 else
3052 t = build_fold_addr_expr (t);
3053 t = build_call_expr_loc (gimple_location (entry_stmt),
3054 gimple_omp_parallel_child_fn (entry_stmt), 1, t);
3055 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3056 false, GSI_CONTINUE_LINKING);
3057
3058 t = build_call_expr_loc (gimple_location (entry_stmt),
3059 built_in_decls[BUILT_IN_GOMP_PARALLEL_END], 0);
3060 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3061 false, GSI_CONTINUE_LINKING);
3062 }
3063
3064
3065 /* Build the function call to GOMP_task to actually
3066 generate the task operation. BB is the block where to insert the code. */
3067
3068 static void
3069 expand_task_call (basic_block bb, gimple entry_stmt)
3070 {
3071 tree t, t1, t2, t3, flags, cond, c, clauses;
3072 gimple_stmt_iterator gsi;
3073 location_t loc = gimple_location (entry_stmt);
3074
3075 clauses = gimple_omp_task_clauses (entry_stmt);
3076
3077 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
3078 if (c)
3079 cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (c));
3080 else
3081 cond = boolean_true_node;
3082
3083 c = find_omp_clause (clauses, OMP_CLAUSE_UNTIED);
3084 flags = build_int_cst (unsigned_type_node, (c ? 1 : 0));
3085
3086 gsi = gsi_last_bb (bb);
3087 t = gimple_omp_task_data_arg (entry_stmt);
3088 if (t == NULL)
3089 t2 = null_pointer_node;
3090 else
3091 t2 = build_fold_addr_expr_loc (loc, t);
3092 t1 = build_fold_addr_expr_loc (loc, gimple_omp_task_child_fn (entry_stmt));
3093 t = gimple_omp_task_copy_fn (entry_stmt);
3094 if (t == NULL)
3095 t3 = null_pointer_node;
3096 else
3097 t3 = build_fold_addr_expr_loc (loc, t);
3098
3099 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_TASK], 7, t1, t2, t3,
3100 gimple_omp_task_arg_size (entry_stmt),
3101 gimple_omp_task_arg_align (entry_stmt), cond, flags);
3102
3103 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3104 false, GSI_CONTINUE_LINKING);
3105 }
3106
3107
3108 /* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
3109 catch handler and return it. This prevents programs from violating the
3110 structured block semantics with throws. */
3111
3112 static gimple_seq
3113 maybe_catch_exception (gimple_seq body)
3114 {
3115 gimple g;
3116 tree decl;
3117
3118 if (!flag_exceptions)
3119 return body;
3120
3121 if (lang_protect_cleanup_actions)
3122 decl = lang_protect_cleanup_actions ();
3123 else
3124 decl = built_in_decls[BUILT_IN_TRAP];
3125
3126 g = gimple_build_eh_must_not_throw (decl);
3127 g = gimple_build_try (body, gimple_seq_alloc_with_stmt (g),
3128 GIMPLE_TRY_CATCH);
3129
3130 return gimple_seq_alloc_with_stmt (g);
3131 }
3132
3133 /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
3134
3135 static tree
3136 list2chain (tree list)
3137 {
3138 tree t;
3139
3140 for (t = list; t; t = TREE_CHAIN (t))
3141 {
3142 tree var = TREE_VALUE (t);
3143 if (TREE_CHAIN (t))
3144 TREE_CHAIN (var) = TREE_VALUE (TREE_CHAIN (t));
3145 else
3146 TREE_CHAIN (var) = NULL_TREE;
3147 }
3148
3149 return list ? TREE_VALUE (list) : NULL_TREE;
3150 }
3151
3152
3153 /* Remove barriers in REGION->EXIT's block. Note that this is only
3154 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
3155 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
3156 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
3157 removed. */
3158
3159 static void
3160 remove_exit_barrier (struct omp_region *region)
3161 {
3162 gimple_stmt_iterator gsi;
3163 basic_block exit_bb;
3164 edge_iterator ei;
3165 edge e;
3166 gimple stmt;
3167 int any_addressable_vars = -1;
3168
3169 exit_bb = region->exit;
3170
3171 /* If the parallel region doesn't return, we don't have REGION->EXIT
3172 block at all. */
3173 if (! exit_bb)
3174 return;
3175
3176 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
3177 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
3178 statements that can appear in between are extremely limited -- no
3179 memory operations at all. Here, we allow nothing at all, so the
3180 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
3181 gsi = gsi_last_bb (exit_bb);
3182 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3183 gsi_prev (&gsi);
3184 if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
3185 return;
3186
3187 FOR_EACH_EDGE (e, ei, exit_bb->preds)
3188 {
3189 gsi = gsi_last_bb (e->src);
3190 if (gsi_end_p (gsi))
3191 continue;
3192 stmt = gsi_stmt (gsi);
3193 if (gimple_code (stmt) == GIMPLE_OMP_RETURN
3194 && !gimple_omp_return_nowait_p (stmt))
3195 {
3196 /* OpenMP 3.0 tasks unfortunately prevent this optimization
3197 in many cases. If there could be tasks queued, the barrier
3198 might be needed to let the tasks run before some local
3199 variable of the parallel that the task uses as shared
3200 runs out of scope. The task can be spawned either
3201 from within current function (this would be easy to check)
3202 or from some function it calls and gets passed an address
3203 of such a variable. */
3204 if (any_addressable_vars < 0)
3205 {
3206 gimple parallel_stmt = last_stmt (region->entry);
3207 tree child_fun = gimple_omp_parallel_child_fn (parallel_stmt);
3208 tree local_decls = DECL_STRUCT_FUNCTION (child_fun)->local_decls;
3209 tree block;
3210
3211 any_addressable_vars = 0;
3212 for (; local_decls; local_decls = TREE_CHAIN (local_decls))
3213 if (TREE_ADDRESSABLE (TREE_VALUE (local_decls)))
3214 {
3215 any_addressable_vars = 1;
3216 break;
3217 }
3218 for (block = gimple_block (stmt);
3219 !any_addressable_vars
3220 && block
3221 && TREE_CODE (block) == BLOCK;
3222 block = BLOCK_SUPERCONTEXT (block))
3223 {
3224 for (local_decls = BLOCK_VARS (block);
3225 local_decls;
3226 local_decls = TREE_CHAIN (local_decls))
3227 if (TREE_ADDRESSABLE (local_decls))
3228 {
3229 any_addressable_vars = 1;
3230 break;
3231 }
3232 if (block == gimple_block (parallel_stmt))
3233 break;
3234 }
3235 }
3236 if (!any_addressable_vars)
3237 gimple_omp_return_set_nowait (stmt);
3238 }
3239 }
3240 }
3241
3242 static void
3243 remove_exit_barriers (struct omp_region *region)
3244 {
3245 if (region->type == GIMPLE_OMP_PARALLEL)
3246 remove_exit_barrier (region);
3247
3248 if (region->inner)
3249 {
3250 region = region->inner;
3251 remove_exit_barriers (region);
3252 while (region->next)
3253 {
3254 region = region->next;
3255 remove_exit_barriers (region);
3256 }
3257 }
3258 }
3259
3260 /* Optimize omp_get_thread_num () and omp_get_num_threads ()
3261 calls. These can't be declared as const functions, but
3262 within one parallel body they are constant, so they can be
3263 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
3264 which are declared const. Similarly for task body, except
3265 that in untied task omp_get_thread_num () can change at any task
3266 scheduling point. */
3267
3268 static void
3269 optimize_omp_library_calls (gimple entry_stmt)
3270 {
3271 basic_block bb;
3272 gimple_stmt_iterator gsi;
3273 tree thr_num_id
3274 = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM]);
3275 tree num_thr_id
3276 = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS]);
3277 bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK
3278 && find_omp_clause (gimple_omp_task_clauses (entry_stmt),
3279 OMP_CLAUSE_UNTIED) != NULL);
3280
3281 FOR_EACH_BB (bb)
3282 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3283 {
3284 gimple call = gsi_stmt (gsi);
3285 tree decl;
3286
3287 if (is_gimple_call (call)
3288 && (decl = gimple_call_fndecl (call))
3289 && DECL_EXTERNAL (decl)
3290 && TREE_PUBLIC (decl)
3291 && DECL_INITIAL (decl) == NULL)
3292 {
3293 tree built_in;
3294
3295 if (DECL_NAME (decl) == thr_num_id)
3296 {
3297 /* In #pragma omp task untied omp_get_thread_num () can change
3298 during the execution of the task region. */
3299 if (untied_task)
3300 continue;
3301 built_in = built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM];
3302 }
3303 else if (DECL_NAME (decl) == num_thr_id)
3304 built_in = built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS];
3305 else
3306 continue;
3307
3308 if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
3309 || gimple_call_num_args (call) != 0)
3310 continue;
3311
3312 if (flag_exceptions && !TREE_NOTHROW (decl))
3313 continue;
3314
3315 if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
3316 || !types_compatible_p (TREE_TYPE (TREE_TYPE (decl)),
3317 TREE_TYPE (TREE_TYPE (built_in))))
3318 continue;
3319
3320 gimple_call_set_fndecl (call, built_in);
3321 }
3322 }
3323 }
3324
3325 /* Expand the OpenMP parallel or task directive starting at REGION. */
3326
3327 static void
3328 expand_omp_taskreg (struct omp_region *region)
3329 {
3330 basic_block entry_bb, exit_bb, new_bb;
3331 struct function *child_cfun;
3332 tree child_fn, block, t, ws_args, *tp;
3333 tree save_current;
3334 gimple_stmt_iterator gsi;
3335 gimple entry_stmt, stmt;
3336 edge e;
3337
3338 entry_stmt = last_stmt (region->entry);
3339 child_fn = gimple_omp_taskreg_child_fn (entry_stmt);
3340 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
3341 /* If this function has been already instrumented, make sure
3342 the child function isn't instrumented again. */
3343 child_cfun->after_tree_profile = cfun->after_tree_profile;
3344
3345 entry_bb = region->entry;
3346 exit_bb = region->exit;
3347
3348 if (is_combined_parallel (region))
3349 ws_args = region->ws_args;
3350 else
3351 ws_args = NULL_TREE;
3352
3353 if (child_cfun->cfg)
3354 {
3355 /* Due to inlining, it may happen that we have already outlined
3356 the region, in which case all we need to do is make the
3357 sub-graph unreachable and emit the parallel call. */
3358 edge entry_succ_e, exit_succ_e;
3359 gimple_stmt_iterator gsi;
3360
3361 entry_succ_e = single_succ_edge (entry_bb);
3362
3363 gsi = gsi_last_bb (entry_bb);
3364 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL
3365 || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK);
3366 gsi_remove (&gsi, true);
3367
3368 new_bb = entry_bb;
3369 if (exit_bb)
3370 {
3371 exit_succ_e = single_succ_edge (exit_bb);
3372 make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
3373 }
3374 remove_edge_and_dominated_blocks (entry_succ_e);
3375 }
3376 else
3377 {
3378 /* If the parallel region needs data sent from the parent
3379 function, then the very first statement (except possible
3380 tree profile counter updates) of the parallel body
3381 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
3382 &.OMP_DATA_O is passed as an argument to the child function,
3383 we need to replace it with the argument as seen by the child
3384 function.
3385
3386 In most cases, this will end up being the identity assignment
3387 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
3388 a function call that has been inlined, the original PARM_DECL
3389 .OMP_DATA_I may have been converted into a different local
3390 variable. In which case, we need to keep the assignment. */
3391 if (gimple_omp_taskreg_data_arg (entry_stmt))
3392 {
3393 basic_block entry_succ_bb = single_succ (entry_bb);
3394 gimple_stmt_iterator gsi;
3395 tree arg, narg;
3396 gimple parcopy_stmt = NULL;
3397
3398 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
3399 {
3400 gimple stmt;
3401
3402 gcc_assert (!gsi_end_p (gsi));
3403 stmt = gsi_stmt (gsi);
3404 if (gimple_code (stmt) != GIMPLE_ASSIGN)
3405 continue;
3406
3407 if (gimple_num_ops (stmt) == 2)
3408 {
3409 tree arg = gimple_assign_rhs1 (stmt);
3410
3411 /* We're ignore the subcode because we're
3412 effectively doing a STRIP_NOPS. */
3413
3414 if (TREE_CODE (arg) == ADDR_EXPR
3415 && TREE_OPERAND (arg, 0)
3416 == gimple_omp_taskreg_data_arg (entry_stmt))
3417 {
3418 parcopy_stmt = stmt;
3419 break;
3420 }
3421 }
3422 }
3423
3424 gcc_assert (parcopy_stmt != NULL);
3425 arg = DECL_ARGUMENTS (child_fn);
3426
3427 if (!gimple_in_ssa_p (cfun))
3428 {
3429 if (gimple_assign_lhs (parcopy_stmt) == arg)
3430 gsi_remove (&gsi, true);
3431 else
3432 {
3433 /* ?? Is setting the subcode really necessary ?? */
3434 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg));
3435 gimple_assign_set_rhs1 (parcopy_stmt, arg);
3436 }
3437 }
3438 else
3439 {
3440 /* If we are in ssa form, we must load the value from the default
3441 definition of the argument. That should not be defined now,
3442 since the argument is not used uninitialized. */
3443 gcc_assert (gimple_default_def (cfun, arg) == NULL);
3444 narg = make_ssa_name (arg, gimple_build_nop ());
3445 set_default_def (arg, narg);
3446 /* ?? Is setting the subcode really necessary ?? */
3447 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (narg));
3448 gimple_assign_set_rhs1 (parcopy_stmt, narg);
3449 update_stmt (parcopy_stmt);
3450 }
3451 }
3452
3453 /* Declare local variables needed in CHILD_CFUN. */
3454 block = DECL_INITIAL (child_fn);
3455 BLOCK_VARS (block) = list2chain (child_cfun->local_decls);
3456 /* The gimplifier could record temporaries in parallel/task block
3457 rather than in containing function's local_decls chain,
3458 which would mean cgraph missed finalizing them. Do it now. */
3459 for (t = BLOCK_VARS (block); t; t = TREE_CHAIN (t))
3460 if (TREE_CODE (t) == VAR_DECL
3461 && TREE_STATIC (t)
3462 && !DECL_EXTERNAL (t))
3463 varpool_finalize_decl (t);
3464 DECL_SAVED_TREE (child_fn) = NULL;
3465 gimple_set_body (child_fn, bb_seq (single_succ (entry_bb)));
3466 TREE_USED (block) = 1;
3467
3468 /* Reset DECL_CONTEXT on function arguments. */
3469 for (t = DECL_ARGUMENTS (child_fn); t; t = TREE_CHAIN (t))
3470 DECL_CONTEXT (t) = child_fn;
3471
3472 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
3473 so that it can be moved to the child function. */
3474 gsi = gsi_last_bb (entry_bb);
3475 stmt = gsi_stmt (gsi);
3476 gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
3477 || gimple_code (stmt) == GIMPLE_OMP_TASK));
3478 gsi_remove (&gsi, true);
3479 e = split_block (entry_bb, stmt);
3480 entry_bb = e->dest;
3481 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
3482
3483 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
3484 if (exit_bb)
3485 {
3486 gsi = gsi_last_bb (exit_bb);
3487 gcc_assert (!gsi_end_p (gsi)
3488 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3489 stmt = gimple_build_return (NULL);
3490 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
3491 gsi_remove (&gsi, true);
3492 }
3493
3494 /* Move the parallel region into CHILD_CFUN. */
3495
3496 if (gimple_in_ssa_p (cfun))
3497 {
3498 push_cfun (child_cfun);
3499 init_tree_ssa (child_cfun);
3500 init_ssa_operands ();
3501 cfun->gimple_df->in_ssa_p = true;
3502 pop_cfun ();
3503 block = NULL_TREE;
3504 }
3505 else
3506 block = gimple_block (entry_stmt);
3507
3508 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
3509 if (exit_bb)
3510 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
3511
3512 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
3513 for (tp = &child_cfun->local_decls; *tp; )
3514 if (DECL_CONTEXT (TREE_VALUE (*tp)) != cfun->decl)
3515 tp = &TREE_CHAIN (*tp);
3516 else
3517 *tp = TREE_CHAIN (*tp);
3518
3519 /* Inform the callgraph about the new function. */
3520 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
3521 = cfun->curr_properties;
3522 cgraph_add_new_function (child_fn, true);
3523
3524 /* Fix the callgraph edges for child_cfun. Those for cfun will be
3525 fixed in a following pass. */
3526 push_cfun (child_cfun);
3527 save_current = current_function_decl;
3528 current_function_decl = child_fn;
3529 if (optimize)
3530 optimize_omp_library_calls (entry_stmt);
3531 rebuild_cgraph_edges ();
3532
3533 /* Some EH regions might become dead, see PR34608. If
3534 pass_cleanup_cfg isn't the first pass to happen with the
3535 new child, these dead EH edges might cause problems.
3536 Clean them up now. */
3537 if (flag_exceptions)
3538 {
3539 basic_block bb;
3540 bool changed = false;
3541
3542 FOR_EACH_BB (bb)
3543 changed |= gimple_purge_dead_eh_edges (bb);
3544 if (changed)
3545 cleanup_tree_cfg ();
3546 }
3547 if (gimple_in_ssa_p (cfun))
3548 update_ssa (TODO_update_ssa);
3549 current_function_decl = save_current;
3550 pop_cfun ();
3551 }
3552
3553 /* Emit a library call to launch the children threads. */
3554 if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
3555 expand_parallel_call (region, new_bb, entry_stmt, ws_args);
3556 else
3557 expand_task_call (new_bb, entry_stmt);
3558 update_ssa (TODO_update_ssa_only_virtuals);
3559 }
3560
3561
3562 /* A subroutine of expand_omp_for. Generate code for a parallel
3563 loop with any schedule. Given parameters:
3564
3565 for (V = N1; V cond N2; V += STEP) BODY;
3566
3567 where COND is "<" or ">", we generate pseudocode
3568
3569 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
3570 if (more) goto L0; else goto L3;
3571 L0:
3572 V = istart0;
3573 iend = iend0;
3574 L1:
3575 BODY;
3576 V += STEP;
3577 if (V cond iend) goto L1; else goto L2;
3578 L2:
3579 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3580 L3:
3581
3582 If this is a combined omp parallel loop, instead of the call to
3583 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
3584
3585 For collapsed loops, given parameters:
3586 collapse(3)
3587 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
3588 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
3589 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
3590 BODY;
3591
3592 we generate pseudocode
3593
3594 if (cond3 is <)
3595 adj = STEP3 - 1;
3596 else
3597 adj = STEP3 + 1;
3598 count3 = (adj + N32 - N31) / STEP3;
3599 if (cond2 is <)
3600 adj = STEP2 - 1;
3601 else
3602 adj = STEP2 + 1;
3603 count2 = (adj + N22 - N21) / STEP2;
3604 if (cond1 is <)
3605 adj = STEP1 - 1;
3606 else
3607 adj = STEP1 + 1;
3608 count1 = (adj + N12 - N11) / STEP1;
3609 count = count1 * count2 * count3;
3610 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
3611 if (more) goto L0; else goto L3;
3612 L0:
3613 V = istart0;
3614 T = V;
3615 V3 = N31 + (T % count3) * STEP3;
3616 T = T / count3;
3617 V2 = N21 + (T % count2) * STEP2;
3618 T = T / count2;
3619 V1 = N11 + T * STEP1;
3620 iend = iend0;
3621 L1:
3622 BODY;
3623 V += 1;
3624 if (V < iend) goto L10; else goto L2;
3625 L10:
3626 V3 += STEP3;
3627 if (V3 cond3 N32) goto L1; else goto L11;
3628 L11:
3629 V3 = N31;
3630 V2 += STEP2;
3631 if (V2 cond2 N22) goto L1; else goto L12;
3632 L12:
3633 V2 = N21;
3634 V1 += STEP1;
3635 goto L1;
3636 L2:
3637 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3638 L3:
3639
3640 */
3641
3642 static void
3643 expand_omp_for_generic (struct omp_region *region,
3644 struct omp_for_data *fd,
3645 enum built_in_function start_fn,
3646 enum built_in_function next_fn)
3647 {
3648 tree type, istart0, iend0, iend;
3649 tree t, vmain, vback, bias = NULL_TREE;
3650 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb;
3651 basic_block l2_bb = NULL, l3_bb = NULL;
3652 gimple_stmt_iterator gsi;
3653 gimple stmt;
3654 bool in_combined_parallel = is_combined_parallel (region);
3655 bool broken_loop = region->cont == NULL;
3656 edge e, ne;
3657 tree *counts = NULL;
3658 int i;
3659
3660 gcc_assert (!broken_loop || !in_combined_parallel);
3661 gcc_assert (fd->iter_type == long_integer_type_node
3662 || !in_combined_parallel);
3663
3664 type = TREE_TYPE (fd->loop.v);
3665 istart0 = create_tmp_var (fd->iter_type, ".istart0");
3666 iend0 = create_tmp_var (fd->iter_type, ".iend0");
3667 TREE_ADDRESSABLE (istart0) = 1;
3668 TREE_ADDRESSABLE (iend0) = 1;
3669 if (gimple_in_ssa_p (cfun))
3670 {
3671 add_referenced_var (istart0);
3672 add_referenced_var (iend0);
3673 }
3674
3675 /* See if we need to bias by LLONG_MIN. */
3676 if (fd->iter_type == long_long_unsigned_type_node
3677 && TREE_CODE (type) == INTEGER_TYPE
3678 && !TYPE_UNSIGNED (type))
3679 {
3680 tree n1, n2;
3681
3682 if (fd->loop.cond_code == LT_EXPR)
3683 {
3684 n1 = fd->loop.n1;
3685 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
3686 }
3687 else
3688 {
3689 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
3690 n2 = fd->loop.n1;
3691 }
3692 if (TREE_CODE (n1) != INTEGER_CST
3693 || TREE_CODE (n2) != INTEGER_CST
3694 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
3695 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
3696 }
3697
3698 entry_bb = region->entry;
3699 cont_bb = region->cont;
3700 collapse_bb = NULL;
3701 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
3702 gcc_assert (broken_loop
3703 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
3704 l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
3705 l1_bb = single_succ (l0_bb);
3706 if (!broken_loop)
3707 {
3708 l2_bb = create_empty_bb (cont_bb);
3709 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb);
3710 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
3711 }
3712 else
3713 l2_bb = NULL;
3714 l3_bb = BRANCH_EDGE (entry_bb)->dest;
3715 exit_bb = region->exit;
3716
3717 gsi = gsi_last_bb (entry_bb);
3718
3719 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
3720 if (fd->collapse > 1)
3721 {
3722 /* collapsed loops need work for expansion in SSA form. */
3723 gcc_assert (!gimple_in_ssa_p (cfun));
3724 counts = (tree *) alloca (fd->collapse * sizeof (tree));
3725 for (i = 0; i < fd->collapse; i++)
3726 {
3727 tree itype = TREE_TYPE (fd->loops[i].v);
3728
3729 if (POINTER_TYPE_P (itype))
3730 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
3731 t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR
3732 ? -1 : 1));
3733 t = fold_build2 (PLUS_EXPR, itype,
3734 fold_convert (itype, fd->loops[i].step), t);
3735 t = fold_build2 (PLUS_EXPR, itype, t,
3736 fold_convert (itype, fd->loops[i].n2));
3737 t = fold_build2 (MINUS_EXPR, itype, t,
3738 fold_convert (itype, fd->loops[i].n1));
3739 if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
3740 t = fold_build2 (TRUNC_DIV_EXPR, itype,
3741 fold_build1 (NEGATE_EXPR, itype, t),
3742 fold_build1 (NEGATE_EXPR, itype,
3743 fold_convert (itype,
3744 fd->loops[i].step)));
3745 else
3746 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
3747 fold_convert (itype, fd->loops[i].step));
3748 t = fold_convert (type, t);
3749 if (TREE_CODE (t) == INTEGER_CST)
3750 counts[i] = t;
3751 else
3752 {
3753 counts[i] = create_tmp_var (type, ".count");
3754 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3755 true, GSI_SAME_STMT);
3756 stmt = gimple_build_assign (counts[i], t);
3757 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3758 }
3759 if (SSA_VAR_P (fd->loop.n2))
3760 {
3761 if (i == 0)
3762 t = counts[0];
3763 else
3764 {
3765 t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]);
3766 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3767 true, GSI_SAME_STMT);
3768 }
3769 stmt = gimple_build_assign (fd->loop.n2, t);
3770 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3771 }
3772 }
3773 }
3774 if (in_combined_parallel)
3775 {
3776 /* In a combined parallel loop, emit a call to
3777 GOMP_loop_foo_next. */
3778 t = build_call_expr (built_in_decls[next_fn], 2,
3779 build_fold_addr_expr (istart0),
3780 build_fold_addr_expr (iend0));
3781 }
3782 else
3783 {
3784 tree t0, t1, t2, t3, t4;
3785 /* If this is not a combined parallel loop, emit a call to
3786 GOMP_loop_foo_start in ENTRY_BB. */
3787 t4 = build_fold_addr_expr (iend0);
3788 t3 = build_fold_addr_expr (istart0);
3789 t2 = fold_convert (fd->iter_type, fd->loop.step);
3790 if (POINTER_TYPE_P (type)
3791 && TYPE_PRECISION (type) != TYPE_PRECISION (fd->iter_type))
3792 {
3793 /* Avoid casting pointers to integer of a different size. */
3794 tree itype
3795 = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
3796 t1 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n2));
3797 t0 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n1));
3798 }
3799 else
3800 {
3801 t1 = fold_convert (fd->iter_type, fd->loop.n2);
3802 t0 = fold_convert (fd->iter_type, fd->loop.n1);
3803 }
3804 if (bias)
3805 {
3806 t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
3807 t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
3808 }
3809 if (fd->iter_type == long_integer_type_node)
3810 {
3811 if (fd->chunk_size)
3812 {
3813 t = fold_convert (fd->iter_type, fd->chunk_size);
3814 t = build_call_expr (built_in_decls[start_fn], 6,
3815 t0, t1, t2, t, t3, t4);
3816 }
3817 else
3818 t = build_call_expr (built_in_decls[start_fn], 5,
3819 t0, t1, t2, t3, t4);
3820 }
3821 else
3822 {
3823 tree t5;
3824 tree c_bool_type;
3825
3826 /* The GOMP_loop_ull_*start functions have additional boolean
3827 argument, true for < loops and false for > loops.
3828 In Fortran, the C bool type can be different from
3829 boolean_type_node. */
3830 c_bool_type = TREE_TYPE (TREE_TYPE (built_in_decls[start_fn]));
3831 t5 = build_int_cst (c_bool_type,
3832 fd->loop.cond_code == LT_EXPR ? 1 : 0);
3833 if (fd->chunk_size)
3834 {
3835 t = fold_convert (fd->iter_type, fd->chunk_size);
3836 t = build_call_expr (built_in_decls[start_fn], 7,
3837 t5, t0, t1, t2, t, t3, t4);
3838 }
3839 else
3840 t = build_call_expr (built_in_decls[start_fn], 6,
3841 t5, t0, t1, t2, t3, t4);
3842 }
3843 }
3844 if (TREE_TYPE (t) != boolean_type_node)
3845 t = fold_build2 (NE_EXPR, boolean_type_node,
3846 t, build_int_cst (TREE_TYPE (t), 0));
3847 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3848 true, GSI_SAME_STMT);
3849 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
3850
3851 /* Remove the GIMPLE_OMP_FOR statement. */
3852 gsi_remove (&gsi, true);
3853
3854 /* Iteration setup for sequential loop goes in L0_BB. */
3855 gsi = gsi_start_bb (l0_bb);
3856 t = istart0;
3857 if (bias)
3858 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
3859 if (POINTER_TYPE_P (type))
3860 t = fold_convert (lang_hooks.types.type_for_size (TYPE_PRECISION (type),
3861 0), t);
3862 t = fold_convert (type, t);
3863 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3864 false, GSI_CONTINUE_LINKING);
3865 stmt = gimple_build_assign (fd->loop.v, t);
3866 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3867
3868 t = iend0;
3869 if (bias)
3870 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
3871 if (POINTER_TYPE_P (type))
3872 t = fold_convert (lang_hooks.types.type_for_size (TYPE_PRECISION (type),
3873 0), t);
3874 t = fold_convert (type, t);
3875 iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3876 false, GSI_CONTINUE_LINKING);
3877 if (fd->collapse > 1)
3878 {
3879 tree tem = create_tmp_var (type, ".tem");
3880
3881 stmt = gimple_build_assign (tem, fd->loop.v);
3882 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3883 for (i = fd->collapse - 1; i >= 0; i--)
3884 {
3885 tree vtype = TREE_TYPE (fd->loops[i].v), itype;
3886 itype = vtype;
3887 if (POINTER_TYPE_P (vtype))
3888 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (vtype), 0);
3889 t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]);
3890 t = fold_convert (itype, t);
3891 t = fold_build2 (MULT_EXPR, itype, t,
3892 fold_convert (itype, fd->loops[i].step));
3893 if (POINTER_TYPE_P (vtype))
3894 t = fold_build2 (POINTER_PLUS_EXPR, vtype,
3895 fd->loops[i].n1, fold_convert (sizetype, t));
3896 else
3897 t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t);
3898 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3899 false, GSI_CONTINUE_LINKING);
3900 stmt = gimple_build_assign (fd->loops[i].v, t);
3901 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3902 if (i != 0)
3903 {
3904 t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]);
3905 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3906 false, GSI_CONTINUE_LINKING);
3907 stmt = gimple_build_assign (tem, t);
3908 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3909 }
3910 }
3911 }
3912
3913 if (!broken_loop)
3914 {
3915 /* Code to control the increment and predicate for the sequential
3916 loop goes in the CONT_BB. */
3917 gsi = gsi_last_bb (cont_bb);
3918 stmt = gsi_stmt (gsi);
3919 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
3920 vmain = gimple_omp_continue_control_use (stmt);
3921 vback = gimple_omp_continue_control_def (stmt);
3922
3923 if (POINTER_TYPE_P (type))
3924 t = fold_build2 (POINTER_PLUS_EXPR, type, vmain,
3925 fold_convert (sizetype, fd->loop.step));
3926 else
3927 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
3928 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3929 true, GSI_SAME_STMT);
3930 stmt = gimple_build_assign (vback, t);
3931 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3932
3933 t = build2 (fd->loop.cond_code, boolean_type_node, vback, iend);
3934 stmt = gimple_build_cond_empty (t);
3935 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3936
3937 /* Remove GIMPLE_OMP_CONTINUE. */
3938 gsi_remove (&gsi, true);
3939
3940 if (fd->collapse > 1)
3941 {
3942 basic_block last_bb, bb;
3943
3944 last_bb = cont_bb;
3945 for (i = fd->collapse - 1; i >= 0; i--)
3946 {
3947 tree vtype = TREE_TYPE (fd->loops[i].v);
3948
3949 bb = create_empty_bb (last_bb);
3950 gsi = gsi_start_bb (bb);
3951
3952 if (i < fd->collapse - 1)
3953 {
3954 e = make_edge (last_bb, bb, EDGE_FALSE_VALUE);
3955 e->probability = REG_BR_PROB_BASE / 8;
3956
3957 t = fd->loops[i + 1].n1;
3958 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3959 false, GSI_CONTINUE_LINKING);
3960 stmt = gimple_build_assign (fd->loops[i + 1].v, t);
3961 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3962 }
3963 else
3964 collapse_bb = bb;
3965
3966 set_immediate_dominator (CDI_DOMINATORS, bb, last_bb);
3967
3968 if (POINTER_TYPE_P (vtype))
3969 t = fold_build2 (POINTER_PLUS_EXPR, vtype,
3970 fd->loops[i].v,
3971 fold_convert (sizetype, fd->loops[i].step));
3972 else
3973 t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v,
3974 fd->loops[i].step);
3975 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3976 false, GSI_CONTINUE_LINKING);
3977 stmt = gimple_build_assign (fd->loops[i].v, t);
3978 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3979
3980 if (i > 0)
3981 {
3982 t = fd->loops[i].n2;
3983 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3984 false, GSI_CONTINUE_LINKING);
3985 t = fold_build2 (fd->loops[i].cond_code, boolean_type_node,
3986 fd->loops[i].v, t);
3987 stmt = gimple_build_cond_empty (t);
3988 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3989 e = make_edge (bb, l1_bb, EDGE_TRUE_VALUE);
3990 e->probability = REG_BR_PROB_BASE * 7 / 8;
3991 }
3992 else
3993 make_edge (bb, l1_bb, EDGE_FALLTHRU);
3994 last_bb = bb;
3995 }
3996 }
3997
3998 /* Emit code to get the next parallel iteration in L2_BB. */
3999 gsi = gsi_start_bb (l2_bb);
4000
4001 t = build_call_expr (built_in_decls[next_fn], 2,
4002 build_fold_addr_expr (istart0),
4003 build_fold_addr_expr (iend0));
4004 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4005 false, GSI_CONTINUE_LINKING);
4006 if (TREE_TYPE (t) != boolean_type_node)
4007 t = fold_build2 (NE_EXPR, boolean_type_node,
4008 t, build_int_cst (TREE_TYPE (t), 0));
4009 stmt = gimple_build_cond_empty (t);
4010 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4011 }
4012
4013 /* Add the loop cleanup function. */
4014 gsi = gsi_last_bb (exit_bb);
4015 if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4016 t = built_in_decls[BUILT_IN_GOMP_LOOP_END_NOWAIT];
4017 else
4018 t = built_in_decls[BUILT_IN_GOMP_LOOP_END];
4019 stmt = gimple_build_call (t, 0);
4020 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
4021 gsi_remove (&gsi, true);
4022
4023 /* Connect the new blocks. */
4024 find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
4025 find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
4026
4027 if (!broken_loop)
4028 {
4029 gimple_seq phis;
4030
4031 e = find_edge (cont_bb, l3_bb);
4032 ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
4033
4034 phis = phi_nodes (l3_bb);
4035 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
4036 {
4037 gimple phi = gsi_stmt (gsi);
4038 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
4039 PHI_ARG_DEF_FROM_EDGE (phi, e));
4040 }
4041 remove_edge (e);
4042
4043 make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
4044 if (fd->collapse > 1)
4045 {
4046 e = find_edge (cont_bb, l1_bb);
4047 remove_edge (e);
4048 e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
4049 }
4050 else
4051 {
4052 e = find_edge (cont_bb, l1_bb);
4053 e->flags = EDGE_TRUE_VALUE;
4054 }
4055 e->probability = REG_BR_PROB_BASE * 7 / 8;
4056 find_edge (cont_bb, l2_bb)->probability = REG_BR_PROB_BASE / 8;
4057 make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
4058
4059 set_immediate_dominator (CDI_DOMINATORS, l2_bb,
4060 recompute_dominator (CDI_DOMINATORS, l2_bb));
4061 set_immediate_dominator (CDI_DOMINATORS, l3_bb,
4062 recompute_dominator (CDI_DOMINATORS, l3_bb));
4063 set_immediate_dominator (CDI_DOMINATORS, l0_bb,
4064 recompute_dominator (CDI_DOMINATORS, l0_bb));
4065 set_immediate_dominator (CDI_DOMINATORS, l1_bb,
4066 recompute_dominator (CDI_DOMINATORS, l1_bb));
4067 }
4068 }
4069
4070
4071 /* A subroutine of expand_omp_for. Generate code for a parallel
4072 loop with static schedule and no specified chunk size. Given
4073 parameters:
4074
4075 for (V = N1; V cond N2; V += STEP) BODY;
4076
4077 where COND is "<" or ">", we generate pseudocode
4078
4079 if (cond is <)
4080 adj = STEP - 1;
4081 else
4082 adj = STEP + 1;
4083 if ((__typeof (V)) -1 > 0 && cond is >)
4084 n = -(adj + N2 - N1) / -STEP;
4085 else
4086 n = (adj + N2 - N1) / STEP;
4087 q = n / nthreads;
4088 q += (q * nthreads != n);
4089 s0 = q * threadid;
4090 e0 = min(s0 + q, n);
4091 V = s0 * STEP + N1;
4092 if (s0 >= e0) goto L2; else goto L0;
4093 L0:
4094 e = e0 * STEP + N1;
4095 L1:
4096 BODY;
4097 V += STEP;
4098 if (V cond e) goto L1;
4099 L2:
4100 */
4101
4102 static void
4103 expand_omp_for_static_nochunk (struct omp_region *region,
4104 struct omp_for_data *fd)
4105 {
4106 tree n, q, s0, e0, e, t, nthreads, threadid;
4107 tree type, itype, vmain, vback;
4108 basic_block entry_bb, exit_bb, seq_start_bb, body_bb, cont_bb;
4109 basic_block fin_bb;
4110 gimple_stmt_iterator gsi;
4111 gimple stmt;
4112
4113 itype = type = TREE_TYPE (fd->loop.v);
4114 if (POINTER_TYPE_P (type))
4115 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
4116
4117 entry_bb = region->entry;
4118 cont_bb = region->cont;
4119 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
4120 gcc_assert (BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
4121 seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
4122 body_bb = single_succ (seq_start_bb);
4123 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4124 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4125 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4126 exit_bb = region->exit;
4127
4128 /* Iteration space partitioning goes in ENTRY_BB. */
4129 gsi = gsi_last_bb (entry_bb);
4130 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4131
4132 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
4133 t = fold_convert (itype, t);
4134 nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4135 true, GSI_SAME_STMT);
4136
4137 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
4138 t = fold_convert (itype, t);
4139 threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4140 true, GSI_SAME_STMT);
4141
4142 fd->loop.n1
4143 = force_gimple_operand_gsi (&gsi, fold_convert (type, fd->loop.n1),
4144 true, NULL_TREE, true, GSI_SAME_STMT);
4145 fd->loop.n2
4146 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.n2),
4147 true, NULL_TREE, true, GSI_SAME_STMT);
4148 fd->loop.step
4149 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.step),
4150 true, NULL_TREE, true, GSI_SAME_STMT);
4151
4152 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4153 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4154 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4155 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4156 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4157 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4158 fold_build1 (NEGATE_EXPR, itype, t),
4159 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4160 else
4161 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4162 t = fold_convert (itype, t);
4163 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4164
4165 t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads);
4166 q = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4167
4168 t = fold_build2 (MULT_EXPR, itype, q, nthreads);
4169 t = fold_build2 (NE_EXPR, itype, t, n);
4170 t = fold_build2 (PLUS_EXPR, itype, q, t);
4171 q = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4172
4173 t = build2 (MULT_EXPR, itype, q, threadid);
4174 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4175
4176 t = fold_build2 (PLUS_EXPR, itype, s0, q);
4177 t = fold_build2 (MIN_EXPR, itype, t, n);
4178 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4179
4180 t = build2 (GE_EXPR, boolean_type_node, s0, e0);
4181 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4182
4183 /* Remove the GIMPLE_OMP_FOR statement. */
4184 gsi_remove (&gsi, true);
4185
4186 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4187 gsi = gsi_start_bb (seq_start_bb);
4188
4189 t = fold_convert (itype, s0);
4190 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4191 if (POINTER_TYPE_P (type))
4192 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4193 fold_convert (sizetype, t));
4194 else
4195 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4196 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4197 false, GSI_CONTINUE_LINKING);
4198 stmt = gimple_build_assign (fd->loop.v, t);
4199 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4200
4201 t = fold_convert (itype, e0);
4202 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4203 if (POINTER_TYPE_P (type))
4204 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4205 fold_convert (sizetype, t));
4206 else
4207 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4208 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4209 false, GSI_CONTINUE_LINKING);
4210
4211 /* The code controlling the sequential loop replaces the
4212 GIMPLE_OMP_CONTINUE. */
4213 gsi = gsi_last_bb (cont_bb);
4214 stmt = gsi_stmt (gsi);
4215 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4216 vmain = gimple_omp_continue_control_use (stmt);
4217 vback = gimple_omp_continue_control_def (stmt);
4218
4219 if (POINTER_TYPE_P (type))
4220 t = fold_build2 (POINTER_PLUS_EXPR, type, vmain,
4221 fold_convert (sizetype, fd->loop.step));
4222 else
4223 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
4224 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4225 true, GSI_SAME_STMT);
4226 stmt = gimple_build_assign (vback, t);
4227 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4228
4229 t = build2 (fd->loop.cond_code, boolean_type_node, vback, e);
4230 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4231
4232 /* Remove the GIMPLE_OMP_CONTINUE statement. */
4233 gsi_remove (&gsi, true);
4234
4235 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4236 gsi = gsi_last_bb (exit_bb);
4237 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4238 force_gimple_operand_gsi (&gsi, build_omp_barrier (), false, NULL_TREE,
4239 false, GSI_SAME_STMT);
4240 gsi_remove (&gsi, true);
4241
4242 /* Connect all the blocks. */
4243 find_edge (entry_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
4244 find_edge (entry_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
4245
4246 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4247 find_edge (cont_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4248
4249 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, entry_bb);
4250 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4251 recompute_dominator (CDI_DOMINATORS, body_bb));
4252 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4253 recompute_dominator (CDI_DOMINATORS, fin_bb));
4254 }
4255
4256
4257 /* A subroutine of expand_omp_for. Generate code for a parallel
4258 loop with static schedule and a specified chunk size. Given
4259 parameters:
4260
4261 for (V = N1; V cond N2; V += STEP) BODY;
4262
4263 where COND is "<" or ">", we generate pseudocode
4264
4265 if (cond is <)
4266 adj = STEP - 1;
4267 else
4268 adj = STEP + 1;
4269 if ((__typeof (V)) -1 > 0 && cond is >)
4270 n = -(adj + N2 - N1) / -STEP;
4271 else
4272 n = (adj + N2 - N1) / STEP;
4273 trip = 0;
4274 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
4275 here so that V is defined
4276 if the loop is not entered
4277 L0:
4278 s0 = (trip * nthreads + threadid) * CHUNK;
4279 e0 = min(s0 + CHUNK, n);
4280 if (s0 < n) goto L1; else goto L4;
4281 L1:
4282 V = s0 * STEP + N1;
4283 e = e0 * STEP + N1;
4284 L2:
4285 BODY;
4286 V += STEP;
4287 if (V cond e) goto L2; else goto L3;
4288 L3:
4289 trip += 1;
4290 goto L0;
4291 L4:
4292 */
4293
4294 static void
4295 expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
4296 {
4297 tree n, s0, e0, e, t;
4298 tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
4299 tree type, itype, v_main, v_back, v_extra;
4300 basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
4301 basic_block trip_update_bb, cont_bb, fin_bb;
4302 gimple_stmt_iterator si;
4303 gimple stmt;
4304 edge se;
4305
4306 itype = type = TREE_TYPE (fd->loop.v);
4307 if (POINTER_TYPE_P (type))
4308 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
4309
4310 entry_bb = region->entry;
4311 se = split_block (entry_bb, last_stmt (entry_bb));
4312 entry_bb = se->src;
4313 iter_part_bb = se->dest;
4314 cont_bb = region->cont;
4315 gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
4316 gcc_assert (BRANCH_EDGE (iter_part_bb)->dest
4317 == FALLTHRU_EDGE (cont_bb)->dest);
4318 seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
4319 body_bb = single_succ (seq_start_bb);
4320 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4321 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4322 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4323 trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
4324 exit_bb = region->exit;
4325
4326 /* Trip and adjustment setup goes in ENTRY_BB. */
4327 si = gsi_last_bb (entry_bb);
4328 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_FOR);
4329
4330 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
4331 t = fold_convert (itype, t);
4332 nthreads = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4333 true, GSI_SAME_STMT);
4334
4335 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
4336 t = fold_convert (itype, t);
4337 threadid = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4338 true, GSI_SAME_STMT);
4339
4340 fd->loop.n1
4341 = force_gimple_operand_gsi (&si, fold_convert (type, fd->loop.n1),
4342 true, NULL_TREE, true, GSI_SAME_STMT);
4343 fd->loop.n2
4344 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.n2),
4345 true, NULL_TREE, true, GSI_SAME_STMT);
4346 fd->loop.step
4347 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.step),
4348 true, NULL_TREE, true, GSI_SAME_STMT);
4349 fd->chunk_size
4350 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->chunk_size),
4351 true, NULL_TREE, true, GSI_SAME_STMT);
4352
4353 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4354 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4355 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4356 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4357 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4358 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4359 fold_build1 (NEGATE_EXPR, itype, t),
4360 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4361 else
4362 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4363 t = fold_convert (itype, t);
4364 n = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4365 true, GSI_SAME_STMT);
4366
4367 trip_var = create_tmp_var (itype, ".trip");
4368 if (gimple_in_ssa_p (cfun))
4369 {
4370 add_referenced_var (trip_var);
4371 trip_init = make_ssa_name (trip_var, NULL);
4372 trip_main = make_ssa_name (trip_var, NULL);
4373 trip_back = make_ssa_name (trip_var, NULL);
4374 }
4375 else
4376 {
4377 trip_init = trip_var;
4378 trip_main = trip_var;
4379 trip_back = trip_var;
4380 }
4381
4382 stmt = gimple_build_assign (trip_init, build_int_cst (itype, 0));
4383 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4384
4385 t = fold_build2 (MULT_EXPR, itype, threadid, fd->chunk_size);
4386 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4387 if (POINTER_TYPE_P (type))
4388 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4389 fold_convert (sizetype, t));
4390 else
4391 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4392 v_extra = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4393 true, GSI_SAME_STMT);
4394
4395 /* Remove the GIMPLE_OMP_FOR. */
4396 gsi_remove (&si, true);
4397
4398 /* Iteration space partitioning goes in ITER_PART_BB. */
4399 si = gsi_last_bb (iter_part_bb);
4400
4401 t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads);
4402 t = fold_build2 (PLUS_EXPR, itype, t, threadid);
4403 t = fold_build2 (MULT_EXPR, itype, t, fd->chunk_size);
4404 s0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4405 false, GSI_CONTINUE_LINKING);
4406
4407 t = fold_build2 (PLUS_EXPR, itype, s0, fd->chunk_size);
4408 t = fold_build2 (MIN_EXPR, itype, t, n);
4409 e0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4410 false, GSI_CONTINUE_LINKING);
4411
4412 t = build2 (LT_EXPR, boolean_type_node, s0, n);
4413 gsi_insert_after (&si, gimple_build_cond_empty (t), GSI_CONTINUE_LINKING);
4414
4415 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4416 si = gsi_start_bb (seq_start_bb);
4417
4418 t = fold_convert (itype, s0);
4419 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4420 if (POINTER_TYPE_P (type))
4421 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4422 fold_convert (sizetype, t));
4423 else
4424 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4425 t = force_gimple_operand_gsi (&si, t, false, NULL_TREE,
4426 false, GSI_CONTINUE_LINKING);
4427 stmt = gimple_build_assign (fd->loop.v, t);
4428 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4429
4430 t = fold_convert (itype, e0);
4431 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4432 if (POINTER_TYPE_P (type))
4433 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4434 fold_convert (sizetype, t));
4435 else
4436 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4437 e = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4438 false, GSI_CONTINUE_LINKING);
4439
4440 /* The code controlling the sequential loop goes in CONT_BB,
4441 replacing the GIMPLE_OMP_CONTINUE. */
4442 si = gsi_last_bb (cont_bb);
4443 stmt = gsi_stmt (si);
4444 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4445 v_main = gimple_omp_continue_control_use (stmt);
4446 v_back = gimple_omp_continue_control_def (stmt);
4447
4448 if (POINTER_TYPE_P (type))
4449 t = fold_build2 (POINTER_PLUS_EXPR, type, v_main,
4450 fold_convert (sizetype, fd->loop.step));
4451 else
4452 t = fold_build2 (PLUS_EXPR, type, v_main, fd->loop.step);
4453 stmt = gimple_build_assign (v_back, t);
4454 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4455
4456 t = build2 (fd->loop.cond_code, boolean_type_node, v_back, e);
4457 gsi_insert_before (&si, gimple_build_cond_empty (t), GSI_SAME_STMT);
4458
4459 /* Remove GIMPLE_OMP_CONTINUE. */
4460 gsi_remove (&si, true);
4461
4462 /* Trip update code goes into TRIP_UPDATE_BB. */
4463 si = gsi_start_bb (trip_update_bb);
4464
4465 t = build_int_cst (itype, 1);
4466 t = build2 (PLUS_EXPR, itype, trip_main, t);
4467 stmt = gimple_build_assign (trip_back, t);
4468 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4469
4470 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4471 si = gsi_last_bb (exit_bb);
4472 if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
4473 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4474 false, GSI_SAME_STMT);
4475 gsi_remove (&si, true);
4476
4477 /* Connect the new blocks. */
4478 find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
4479 find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4480
4481 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4482 find_edge (cont_bb, trip_update_bb)->flags = EDGE_FALSE_VALUE;
4483
4484 redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
4485
4486 if (gimple_in_ssa_p (cfun))
4487 {
4488 gimple_stmt_iterator psi;
4489 gimple phi;
4490 edge re, ene;
4491 edge_var_map_vector head;
4492 edge_var_map *vm;
4493 size_t i;
4494
4495 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
4496 remove arguments of the phi nodes in fin_bb. We need to create
4497 appropriate phi nodes in iter_part_bb instead. */
4498 se = single_pred_edge (fin_bb);
4499 re = single_succ_edge (trip_update_bb);
4500 head = redirect_edge_var_map_vector (re);
4501 ene = single_succ_edge (entry_bb);
4502
4503 psi = gsi_start_phis (fin_bb);
4504 for (i = 0; !gsi_end_p (psi) && VEC_iterate (edge_var_map, head, i, vm);
4505 gsi_next (&psi), ++i)
4506 {
4507 gimple nphi;
4508 source_location locus;
4509
4510 phi = gsi_stmt (psi);
4511 t = gimple_phi_result (phi);
4512 gcc_assert (t == redirect_edge_var_map_result (vm));
4513 nphi = create_phi_node (t, iter_part_bb);
4514 SSA_NAME_DEF_STMT (t) = nphi;
4515
4516 t = PHI_ARG_DEF_FROM_EDGE (phi, se);
4517 locus = gimple_phi_arg_location_from_edge (phi, se);
4518
4519 /* A special case -- fd->loop.v is not yet computed in
4520 iter_part_bb, we need to use v_extra instead. */
4521 if (t == fd->loop.v)
4522 t = v_extra;
4523 add_phi_arg (nphi, t, ene, locus);
4524 locus = redirect_edge_var_map_location (vm);
4525 add_phi_arg (nphi, redirect_edge_var_map_def (vm), re, locus);
4526 }
4527 gcc_assert (!gsi_end_p (psi) && i == VEC_length (edge_var_map, head));
4528 redirect_edge_var_map_clear (re);
4529 while (1)
4530 {
4531 psi = gsi_start_phis (fin_bb);
4532 if (gsi_end_p (psi))
4533 break;
4534 remove_phi_node (&psi, false);
4535 }
4536
4537 /* Make phi node for trip. */
4538 phi = create_phi_node (trip_main, iter_part_bb);
4539 SSA_NAME_DEF_STMT (trip_main) = phi;
4540 add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb),
4541 UNKNOWN_LOCATION);
4542 add_phi_arg (phi, trip_init, single_succ_edge (entry_bb),
4543 UNKNOWN_LOCATION);
4544 }
4545
4546 set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
4547 set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
4548 recompute_dominator (CDI_DOMINATORS, iter_part_bb));
4549 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4550 recompute_dominator (CDI_DOMINATORS, fin_bb));
4551 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
4552 recompute_dominator (CDI_DOMINATORS, seq_start_bb));
4553 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4554 recompute_dominator (CDI_DOMINATORS, body_bb));
4555 }
4556
4557
4558 /* Expand the OpenMP loop defined by REGION. */
4559
4560 static void
4561 expand_omp_for (struct omp_region *region)
4562 {
4563 struct omp_for_data fd;
4564 struct omp_for_data_loop *loops;
4565
4566 loops
4567 = (struct omp_for_data_loop *)
4568 alloca (gimple_omp_for_collapse (last_stmt (region->entry))
4569 * sizeof (struct omp_for_data_loop));
4570 extract_omp_for_data (last_stmt (region->entry), &fd, loops);
4571 region->sched_kind = fd.sched_kind;
4572
4573 gcc_assert (EDGE_COUNT (region->entry->succs) == 2);
4574 BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4575 FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4576 if (region->cont)
4577 {
4578 gcc_assert (EDGE_COUNT (region->cont->succs) == 2);
4579 BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4580 FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4581 }
4582
4583 if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
4584 && !fd.have_ordered
4585 && fd.collapse == 1
4586 && region->cont != NULL)
4587 {
4588 if (fd.chunk_size == NULL)
4589 expand_omp_for_static_nochunk (region, &fd);
4590 else
4591 expand_omp_for_static_chunk (region, &fd);
4592 }
4593 else
4594 {
4595 int fn_index, start_ix, next_ix;
4596
4597 gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
4598 fn_index = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
4599 ? 3 : fd.sched_kind;
4600 fn_index += fd.have_ordered * 4;
4601 start_ix = BUILT_IN_GOMP_LOOP_STATIC_START + fn_index;
4602 next_ix = BUILT_IN_GOMP_LOOP_STATIC_NEXT + fn_index;
4603 if (fd.iter_type == long_long_unsigned_type_node)
4604 {
4605 start_ix += BUILT_IN_GOMP_LOOP_ULL_STATIC_START
4606 - BUILT_IN_GOMP_LOOP_STATIC_START;
4607 next_ix += BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
4608 - BUILT_IN_GOMP_LOOP_STATIC_NEXT;
4609 }
4610 expand_omp_for_generic (region, &fd, (enum built_in_function) start_ix,
4611 (enum built_in_function) next_ix);
4612 }
4613
4614 update_ssa (TODO_update_ssa_only_virtuals);
4615 }
4616
4617
4618 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
4619
4620 v = GOMP_sections_start (n);
4621 L0:
4622 switch (v)
4623 {
4624 case 0:
4625 goto L2;
4626 case 1:
4627 section 1;
4628 goto L1;
4629 case 2:
4630 ...
4631 case n:
4632 ...
4633 default:
4634 abort ();
4635 }
4636 L1:
4637 v = GOMP_sections_next ();
4638 goto L0;
4639 L2:
4640 reduction;
4641
4642 If this is a combined parallel sections, replace the call to
4643 GOMP_sections_start with call to GOMP_sections_next. */
4644
4645 static void
4646 expand_omp_sections (struct omp_region *region)
4647 {
4648 tree t, u, vin = NULL, vmain, vnext, l2;
4649 VEC (tree,heap) *label_vec;
4650 unsigned len;
4651 basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
4652 gimple_stmt_iterator si, switch_si;
4653 gimple sections_stmt, stmt, cont;
4654 edge_iterator ei;
4655 edge e;
4656 struct omp_region *inner;
4657 unsigned i, casei;
4658 bool exit_reachable = region->cont != NULL;
4659
4660 gcc_assert (exit_reachable == (region->exit != NULL));
4661 entry_bb = region->entry;
4662 l0_bb = single_succ (entry_bb);
4663 l1_bb = region->cont;
4664 l2_bb = region->exit;
4665 if (exit_reachable)
4666 {
4667 if (single_pred_p (l2_bb) && single_pred (l2_bb) == l0_bb)
4668 l2 = gimple_block_label (l2_bb);
4669 else
4670 {
4671 /* This can happen if there are reductions. */
4672 len = EDGE_COUNT (l0_bb->succs);
4673 gcc_assert (len > 0);
4674 e = EDGE_SUCC (l0_bb, len - 1);
4675 si = gsi_last_bb (e->dest);
4676 l2 = NULL_TREE;
4677 if (gsi_end_p (si)
4678 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4679 l2 = gimple_block_label (e->dest);
4680 else
4681 FOR_EACH_EDGE (e, ei, l0_bb->succs)
4682 {
4683 si = gsi_last_bb (e->dest);
4684 if (gsi_end_p (si)
4685 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4686 {
4687 l2 = gimple_block_label (e->dest);
4688 break;
4689 }
4690 }
4691 }
4692 default_bb = create_empty_bb (l1_bb->prev_bb);
4693 }
4694 else
4695 {
4696 default_bb = create_empty_bb (l0_bb);
4697 l2 = gimple_block_label (default_bb);
4698 }
4699
4700 /* We will build a switch() with enough cases for all the
4701 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
4702 and a default case to abort if something goes wrong. */
4703 len = EDGE_COUNT (l0_bb->succs);
4704
4705 /* Use VEC_quick_push on label_vec throughout, since we know the size
4706 in advance. */
4707 label_vec = VEC_alloc (tree, heap, len);
4708
4709 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
4710 GIMPLE_OMP_SECTIONS statement. */
4711 si = gsi_last_bb (entry_bb);
4712 sections_stmt = gsi_stmt (si);
4713 gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS);
4714 vin = gimple_omp_sections_control (sections_stmt);
4715 if (!is_combined_parallel (region))
4716 {
4717 /* If we are not inside a combined parallel+sections region,
4718 call GOMP_sections_start. */
4719 t = build_int_cst (unsigned_type_node,
4720 exit_reachable ? len - 1 : len);
4721 u = built_in_decls[BUILT_IN_GOMP_SECTIONS_START];
4722 stmt = gimple_build_call (u, 1, t);
4723 }
4724 else
4725 {
4726 /* Otherwise, call GOMP_sections_next. */
4727 u = built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT];
4728 stmt = gimple_build_call (u, 0);
4729 }
4730 gimple_call_set_lhs (stmt, vin);
4731 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4732 gsi_remove (&si, true);
4733
4734 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
4735 L0_BB. */
4736 switch_si = gsi_last_bb (l0_bb);
4737 gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH);
4738 if (exit_reachable)
4739 {
4740 cont = last_stmt (l1_bb);
4741 gcc_assert (gimple_code (cont) == GIMPLE_OMP_CONTINUE);
4742 vmain = gimple_omp_continue_control_use (cont);
4743 vnext = gimple_omp_continue_control_def (cont);
4744 }
4745 else
4746 {
4747 vmain = vin;
4748 vnext = NULL_TREE;
4749 }
4750
4751 i = 0;
4752 if (exit_reachable)
4753 {
4754 t = build3 (CASE_LABEL_EXPR, void_type_node,
4755 build_int_cst (unsigned_type_node, 0), NULL, l2);
4756 VEC_quick_push (tree, label_vec, t);
4757 i++;
4758 }
4759
4760 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
4761 for (inner = region->inner, casei = 1;
4762 inner;
4763 inner = inner->next, i++, casei++)
4764 {
4765 basic_block s_entry_bb, s_exit_bb;
4766
4767 /* Skip optional reduction region. */
4768 if (inner->type == GIMPLE_OMP_ATOMIC_LOAD)
4769 {
4770 --i;
4771 --casei;
4772 continue;
4773 }
4774
4775 s_entry_bb = inner->entry;
4776 s_exit_bb = inner->exit;
4777
4778 t = gimple_block_label (s_entry_bb);
4779 u = build_int_cst (unsigned_type_node, casei);
4780 u = build3 (CASE_LABEL_EXPR, void_type_node, u, NULL, t);
4781 VEC_quick_push (tree, label_vec, u);
4782
4783 si = gsi_last_bb (s_entry_bb);
4784 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION);
4785 gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si)));
4786 gsi_remove (&si, true);
4787 single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
4788
4789 if (s_exit_bb == NULL)
4790 continue;
4791
4792 si = gsi_last_bb (s_exit_bb);
4793 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4794 gsi_remove (&si, true);
4795
4796 single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
4797 }
4798
4799 /* Error handling code goes in DEFAULT_BB. */
4800 t = gimple_block_label (default_bb);
4801 u = build3 (CASE_LABEL_EXPR, void_type_node, NULL, NULL, t);
4802 make_edge (l0_bb, default_bb, 0);
4803
4804 stmt = gimple_build_switch_vec (vmain, u, label_vec);
4805 gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT);
4806 gsi_remove (&switch_si, true);
4807 VEC_free (tree, heap, label_vec);
4808
4809 si = gsi_start_bb (default_bb);
4810 stmt = gimple_build_call (built_in_decls[BUILT_IN_TRAP], 0);
4811 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4812
4813 if (exit_reachable)
4814 {
4815 /* Code to get the next section goes in L1_BB. */
4816 si = gsi_last_bb (l1_bb);
4817 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE);
4818
4819 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT], 0);
4820 gimple_call_set_lhs (stmt, vnext);
4821 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4822 gsi_remove (&si, true);
4823
4824 single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
4825
4826 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
4827 si = gsi_last_bb (l2_bb);
4828 if (gimple_omp_return_nowait_p (gsi_stmt (si)))
4829 t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END_NOWAIT];
4830 else
4831 t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END];
4832 stmt = gimple_build_call (t, 0);
4833 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4834 gsi_remove (&si, true);
4835 }
4836
4837 set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
4838 }
4839
4840
4841 /* Expand code for an OpenMP single directive. We've already expanded
4842 much of the code, here we simply place the GOMP_barrier call. */
4843
4844 static void
4845 expand_omp_single (struct omp_region *region)
4846 {
4847 basic_block entry_bb, exit_bb;
4848 gimple_stmt_iterator si;
4849 bool need_barrier = false;
4850
4851 entry_bb = region->entry;
4852 exit_bb = region->exit;
4853
4854 si = gsi_last_bb (entry_bb);
4855 /* The terminal barrier at the end of a GOMP_single_copy sequence cannot
4856 be removed. We need to ensure that the thread that entered the single
4857 does not exit before the data is copied out by the other threads. */
4858 if (find_omp_clause (gimple_omp_single_clauses (gsi_stmt (si)),
4859 OMP_CLAUSE_COPYPRIVATE))
4860 need_barrier = true;
4861 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE);
4862 gsi_remove (&si, true);
4863 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4864
4865 si = gsi_last_bb (exit_bb);
4866 if (!gimple_omp_return_nowait_p (gsi_stmt (si)) || need_barrier)
4867 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4868 false, GSI_SAME_STMT);
4869 gsi_remove (&si, true);
4870 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4871 }
4872
4873
4874 /* Generic expansion for OpenMP synchronization directives: master,
4875 ordered and critical. All we need to do here is remove the entry
4876 and exit markers for REGION. */
4877
4878 static void
4879 expand_omp_synch (struct omp_region *region)
4880 {
4881 basic_block entry_bb, exit_bb;
4882 gimple_stmt_iterator si;
4883
4884 entry_bb = region->entry;
4885 exit_bb = region->exit;
4886
4887 si = gsi_last_bb (entry_bb);
4888 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE
4889 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER
4890 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED
4891 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL);
4892 gsi_remove (&si, true);
4893 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4894
4895 if (exit_bb)
4896 {
4897 si = gsi_last_bb (exit_bb);
4898 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4899 gsi_remove (&si, true);
4900 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4901 }
4902 }
4903
4904 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
4905 operation as a __sync_fetch_and_op builtin. INDEX is log2 of the
4906 size of the data type, and thus usable to find the index of the builtin
4907 decl. Returns false if the expression is not of the proper form. */
4908
4909 static bool
4910 expand_omp_atomic_fetch_op (basic_block load_bb,
4911 tree addr, tree loaded_val,
4912 tree stored_val, int index)
4913 {
4914 enum built_in_function base;
4915 tree decl, itype, call;
4916 enum insn_code *optab;
4917 tree rhs;
4918 basic_block store_bb = single_succ (load_bb);
4919 gimple_stmt_iterator gsi;
4920 gimple stmt;
4921 location_t loc;
4922
4923 /* We expect to find the following sequences:
4924
4925 load_bb:
4926 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
4927
4928 store_bb:
4929 val = tmp OP something; (or: something OP tmp)
4930 GIMPLE_OMP_STORE (val)
4931
4932 ???FIXME: Allow a more flexible sequence.
4933 Perhaps use data flow to pick the statements.
4934
4935 */
4936
4937 gsi = gsi_after_labels (store_bb);
4938 stmt = gsi_stmt (gsi);
4939 loc = gimple_location (stmt);
4940 if (!is_gimple_assign (stmt))
4941 return false;
4942 gsi_next (&gsi);
4943 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE)
4944 return false;
4945
4946 if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0))
4947 return false;
4948
4949 /* Check for one of the supported fetch-op operations. */
4950 switch (gimple_assign_rhs_code (stmt))
4951 {
4952 case PLUS_EXPR:
4953 case POINTER_PLUS_EXPR:
4954 base = BUILT_IN_FETCH_AND_ADD_N;
4955 optab = sync_add_optab;
4956 break;
4957 case MINUS_EXPR:
4958 base = BUILT_IN_FETCH_AND_SUB_N;
4959 optab = sync_add_optab;
4960 break;
4961 case BIT_AND_EXPR:
4962 base = BUILT_IN_FETCH_AND_AND_N;
4963 optab = sync_and_optab;
4964 break;
4965 case BIT_IOR_EXPR:
4966 base = BUILT_IN_FETCH_AND_OR_N;
4967 optab = sync_ior_optab;
4968 break;
4969 case BIT_XOR_EXPR:
4970 base = BUILT_IN_FETCH_AND_XOR_N;
4971 optab = sync_xor_optab;
4972 break;
4973 default:
4974 return false;
4975 }
4976 /* Make sure the expression is of the proper form. */
4977 if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0))
4978 rhs = gimple_assign_rhs2 (stmt);
4979 else if (commutative_tree_code (gimple_assign_rhs_code (stmt))
4980 && operand_equal_p (gimple_assign_rhs2 (stmt), loaded_val, 0))
4981 rhs = gimple_assign_rhs1 (stmt);
4982 else
4983 return false;
4984
4985 decl = built_in_decls[base + index + 1];
4986 itype = TREE_TYPE (TREE_TYPE (decl));
4987
4988 if (optab[TYPE_MODE (itype)] == CODE_FOR_nothing)
4989 return false;
4990
4991 gsi = gsi_last_bb (load_bb);
4992 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD);
4993 call = build_call_expr_loc (loc,
4994 decl, 2, addr,
4995 fold_convert_loc (loc, itype, rhs));
4996 call = fold_convert_loc (loc, void_type_node, call);
4997 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
4998 gsi_remove (&gsi, true);
4999
5000 gsi = gsi_last_bb (store_bb);
5001 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
5002 gsi_remove (&gsi, true);
5003 gsi = gsi_last_bb (store_bb);
5004 gsi_remove (&gsi, true);
5005
5006 if (gimple_in_ssa_p (cfun))
5007 update_ssa (TODO_update_ssa_no_phi);
5008
5009 return true;
5010 }
5011
5012 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5013
5014 oldval = *addr;
5015 repeat:
5016 newval = rhs; // with oldval replacing *addr in rhs
5017 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
5018 if (oldval != newval)
5019 goto repeat;
5020
5021 INDEX is log2 of the size of the data type, and thus usable to find the
5022 index of the builtin decl. */
5023
5024 static bool
5025 expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
5026 tree addr, tree loaded_val, tree stored_val,
5027 int index)
5028 {
5029 tree loadedi, storedi, initial, new_storedi, old_vali;
5030 tree type, itype, cmpxchg, iaddr;
5031 gimple_stmt_iterator si;
5032 basic_block loop_header = single_succ (load_bb);
5033 gimple phi, stmt;
5034 edge e;
5035
5036 cmpxchg = built_in_decls[BUILT_IN_VAL_COMPARE_AND_SWAP_N + index + 1];
5037 type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5038 itype = TREE_TYPE (TREE_TYPE (cmpxchg));
5039
5040 if (sync_compare_and_swap[TYPE_MODE (itype)] == CODE_FOR_nothing)
5041 return false;
5042
5043 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
5044 si = gsi_last_bb (load_bb);
5045 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
5046
5047 /* For floating-point values, we'll need to view-convert them to integers
5048 so that we can perform the atomic compare and swap. Simplify the
5049 following code by always setting up the "i"ntegral variables. */
5050 if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
5051 {
5052 tree iaddr_val;
5053
5054 iaddr = create_tmp_var (build_pointer_type_for_mode (itype, ptr_mode,
5055 true), NULL);
5056 iaddr_val
5057 = force_gimple_operand_gsi (&si,
5058 fold_convert (TREE_TYPE (iaddr), addr),
5059 false, NULL_TREE, true, GSI_SAME_STMT);
5060 stmt = gimple_build_assign (iaddr, iaddr_val);
5061 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5062 loadedi = create_tmp_var (itype, NULL);
5063 if (gimple_in_ssa_p (cfun))
5064 {
5065 add_referenced_var (iaddr);
5066 add_referenced_var (loadedi);
5067 loadedi = make_ssa_name (loadedi, NULL);
5068 }
5069 }
5070 else
5071 {
5072 iaddr = addr;
5073 loadedi = loaded_val;
5074 }
5075
5076 initial = force_gimple_operand_gsi (&si, build_fold_indirect_ref (iaddr),
5077 true, NULL_TREE, true, GSI_SAME_STMT);
5078
5079 /* Move the value to the LOADEDI temporary. */
5080 if (gimple_in_ssa_p (cfun))
5081 {
5082 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header)));
5083 phi = create_phi_node (loadedi, loop_header);
5084 SSA_NAME_DEF_STMT (loadedi) = phi;
5085 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
5086 initial);
5087 }
5088 else
5089 gsi_insert_before (&si,
5090 gimple_build_assign (loadedi, initial),
5091 GSI_SAME_STMT);
5092 if (loadedi != loaded_val)
5093 {
5094 gimple_stmt_iterator gsi2;
5095 tree x;
5096
5097 x = build1 (VIEW_CONVERT_EXPR, type, loadedi);
5098 gsi2 = gsi_start_bb (loop_header);
5099 if (gimple_in_ssa_p (cfun))
5100 {
5101 gimple stmt;
5102 x = force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5103 true, GSI_SAME_STMT);
5104 stmt = gimple_build_assign (loaded_val, x);
5105 gsi_insert_before (&gsi2, stmt, GSI_SAME_STMT);
5106 }
5107 else
5108 {
5109 x = build2 (MODIFY_EXPR, TREE_TYPE (loaded_val), loaded_val, x);
5110 force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5111 true, GSI_SAME_STMT);
5112 }
5113 }
5114 gsi_remove (&si, true);
5115
5116 si = gsi_last_bb (store_bb);
5117 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
5118
5119 if (iaddr == addr)
5120 storedi = stored_val;
5121 else
5122 storedi =
5123 force_gimple_operand_gsi (&si,
5124 build1 (VIEW_CONVERT_EXPR, itype,
5125 stored_val), true, NULL_TREE, true,
5126 GSI_SAME_STMT);
5127
5128 /* Build the compare&swap statement. */
5129 new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
5130 new_storedi = force_gimple_operand_gsi (&si,
5131 fold_convert (TREE_TYPE (loadedi),
5132 new_storedi),
5133 true, NULL_TREE,
5134 true, GSI_SAME_STMT);
5135
5136 if (gimple_in_ssa_p (cfun))
5137 old_vali = loadedi;
5138 else
5139 {
5140 old_vali = create_tmp_var (TREE_TYPE (loadedi), NULL);
5141 if (gimple_in_ssa_p (cfun))
5142 add_referenced_var (old_vali);
5143 stmt = gimple_build_assign (old_vali, loadedi);
5144 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5145
5146 stmt = gimple_build_assign (loadedi, new_storedi);
5147 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5148 }
5149
5150 /* Note that we always perform the comparison as an integer, even for
5151 floating point. This allows the atomic operation to properly
5152 succeed even with NaNs and -0.0. */
5153 stmt = gimple_build_cond_empty
5154 (build2 (NE_EXPR, boolean_type_node,
5155 new_storedi, old_vali));
5156 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5157
5158 /* Update cfg. */
5159 e = single_succ_edge (store_bb);
5160 e->flags &= ~EDGE_FALLTHRU;
5161 e->flags |= EDGE_FALSE_VALUE;
5162
5163 e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
5164
5165 /* Copy the new value to loadedi (we already did that before the condition
5166 if we are not in SSA). */
5167 if (gimple_in_ssa_p (cfun))
5168 {
5169 phi = gimple_seq_first_stmt (phi_nodes (loop_header));
5170 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi);
5171 }
5172
5173 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
5174 gsi_remove (&si, true);
5175
5176 if (gimple_in_ssa_p (cfun))
5177 update_ssa (TODO_update_ssa_no_phi);
5178
5179 return true;
5180 }
5181
5182 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5183
5184 GOMP_atomic_start ();
5185 *addr = rhs;
5186 GOMP_atomic_end ();
5187
5188 The result is not globally atomic, but works so long as all parallel
5189 references are within #pragma omp atomic directives. According to
5190 responses received from omp@openmp.org, appears to be within spec.
5191 Which makes sense, since that's how several other compilers handle
5192 this situation as well.
5193 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
5194 expanding. STORED_VAL is the operand of the matching
5195 GIMPLE_OMP_ATOMIC_STORE.
5196
5197 We replace
5198 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
5199 loaded_val = *addr;
5200
5201 and replace
5202 GIMPLE_OMP_ATOMIC_ATORE (stored_val) with
5203 *addr = stored_val;
5204 */
5205
5206 static bool
5207 expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
5208 tree addr, tree loaded_val, tree stored_val)
5209 {
5210 gimple_stmt_iterator si;
5211 gimple stmt;
5212 tree t;
5213
5214 si = gsi_last_bb (load_bb);
5215 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
5216
5217 t = built_in_decls[BUILT_IN_GOMP_ATOMIC_START];
5218 t = build_function_call_expr (UNKNOWN_LOCATION, t, 0);
5219 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5220
5221 stmt = gimple_build_assign (loaded_val, build_fold_indirect_ref (addr));
5222 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5223 gsi_remove (&si, true);
5224
5225 si = gsi_last_bb (store_bb);
5226 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
5227
5228 stmt = gimple_build_assign (build_fold_indirect_ref (unshare_expr (addr)),
5229 stored_val);
5230 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5231
5232 t = built_in_decls[BUILT_IN_GOMP_ATOMIC_END];
5233 t = build_function_call_expr (UNKNOWN_LOCATION, t, 0);
5234 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5235 gsi_remove (&si, true);
5236
5237 if (gimple_in_ssa_p (cfun))
5238 update_ssa (TODO_update_ssa_no_phi);
5239 return true;
5240 }
5241
5242 /* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
5243 using expand_omp_atomic_fetch_op. If it failed, we try to
5244 call expand_omp_atomic_pipeline, and if it fails too, the
5245 ultimate fallback is wrapping the operation in a mutex
5246 (expand_omp_atomic_mutex). REGION is the atomic region built
5247 by build_omp_regions_1(). */
5248
5249 static void
5250 expand_omp_atomic (struct omp_region *region)
5251 {
5252 basic_block load_bb = region->entry, store_bb = region->exit;
5253 gimple load = last_stmt (load_bb), store = last_stmt (store_bb);
5254 tree loaded_val = gimple_omp_atomic_load_lhs (load);
5255 tree addr = gimple_omp_atomic_load_rhs (load);
5256 tree stored_val = gimple_omp_atomic_store_val (store);
5257 tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5258 HOST_WIDE_INT index;
5259
5260 /* Make sure the type is one of the supported sizes. */
5261 index = tree_low_cst (TYPE_SIZE_UNIT (type), 1);
5262 index = exact_log2 (index);
5263 if (index >= 0 && index <= 4)
5264 {
5265 unsigned int align = TYPE_ALIGN_UNIT (type);
5266
5267 /* __sync builtins require strict data alignment. */
5268 if (exact_log2 (align) >= index)
5269 {
5270 /* When possible, use specialized atomic update functions. */
5271 if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
5272 && store_bb == single_succ (load_bb))
5273 {
5274 if (expand_omp_atomic_fetch_op (load_bb, addr,
5275 loaded_val, stored_val, index))
5276 return;
5277 }
5278
5279 /* If we don't have specialized __sync builtins, try and implement
5280 as a compare and swap loop. */
5281 if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
5282 loaded_val, stored_val, index))
5283 return;
5284 }
5285 }
5286
5287 /* The ultimate fallback is wrapping the operation in a mutex. */
5288 expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
5289 }
5290
5291
5292 /* Expand the parallel region tree rooted at REGION. Expansion
5293 proceeds in depth-first order. Innermost regions are expanded
5294 first. This way, parallel regions that require a new function to
5295 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
5296 internal dependencies in their body. */
5297
5298 static void
5299 expand_omp (struct omp_region *region)
5300 {
5301 while (region)
5302 {
5303 location_t saved_location;
5304
5305 /* First, determine whether this is a combined parallel+workshare
5306 region. */
5307 if (region->type == GIMPLE_OMP_PARALLEL)
5308 determine_parallel_type (region);
5309
5310 if (region->inner)
5311 expand_omp (region->inner);
5312
5313 saved_location = input_location;
5314 if (gimple_has_location (last_stmt (region->entry)))
5315 input_location = gimple_location (last_stmt (region->entry));
5316
5317 switch (region->type)
5318 {
5319 case GIMPLE_OMP_PARALLEL:
5320 case GIMPLE_OMP_TASK:
5321 expand_omp_taskreg (region);
5322 break;
5323
5324 case GIMPLE_OMP_FOR:
5325 expand_omp_for (region);
5326 break;
5327
5328 case GIMPLE_OMP_SECTIONS:
5329 expand_omp_sections (region);
5330 break;
5331
5332 case GIMPLE_OMP_SECTION:
5333 /* Individual omp sections are handled together with their
5334 parent GIMPLE_OMP_SECTIONS region. */
5335 break;
5336
5337 case GIMPLE_OMP_SINGLE:
5338 expand_omp_single (region);
5339 break;
5340
5341 case GIMPLE_OMP_MASTER:
5342 case GIMPLE_OMP_ORDERED:
5343 case GIMPLE_OMP_CRITICAL:
5344 expand_omp_synch (region);
5345 break;
5346
5347 case GIMPLE_OMP_ATOMIC_LOAD:
5348 expand_omp_atomic (region);
5349 break;
5350
5351 default:
5352 gcc_unreachable ();
5353 }
5354
5355 input_location = saved_location;
5356 region = region->next;
5357 }
5358 }
5359
5360
5361 /* Helper for build_omp_regions. Scan the dominator tree starting at
5362 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
5363 true, the function ends once a single tree is built (otherwise, whole
5364 forest of OMP constructs may be built). */
5365
5366 static void
5367 build_omp_regions_1 (basic_block bb, struct omp_region *parent,
5368 bool single_tree)
5369 {
5370 gimple_stmt_iterator gsi;
5371 gimple stmt;
5372 basic_block son;
5373
5374 gsi = gsi_last_bb (bb);
5375 if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi)))
5376 {
5377 struct omp_region *region;
5378 enum gimple_code code;
5379
5380 stmt = gsi_stmt (gsi);
5381 code = gimple_code (stmt);
5382 if (code == GIMPLE_OMP_RETURN)
5383 {
5384 /* STMT is the return point out of region PARENT. Mark it
5385 as the exit point and make PARENT the immediately
5386 enclosing region. */
5387 gcc_assert (parent);
5388 region = parent;
5389 region->exit = bb;
5390 parent = parent->outer;
5391 }
5392 else if (code == GIMPLE_OMP_ATOMIC_STORE)
5393 {
5394 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
5395 GIMPLE_OMP_RETURN, but matches with
5396 GIMPLE_OMP_ATOMIC_LOAD. */
5397 gcc_assert (parent);
5398 gcc_assert (parent->type == GIMPLE_OMP_ATOMIC_LOAD);
5399 region = parent;
5400 region->exit = bb;
5401 parent = parent->outer;
5402 }
5403
5404 else if (code == GIMPLE_OMP_CONTINUE)
5405 {
5406 gcc_assert (parent);
5407 parent->cont = bb;
5408 }
5409 else if (code == GIMPLE_OMP_SECTIONS_SWITCH)
5410 {
5411 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
5412 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
5413 ;
5414 }
5415 else
5416 {
5417 /* Otherwise, this directive becomes the parent for a new
5418 region. */
5419 region = new_omp_region (bb, code, parent);
5420 parent = region;
5421 }
5422 }
5423
5424 if (single_tree && !parent)
5425 return;
5426
5427 for (son = first_dom_son (CDI_DOMINATORS, bb);
5428 son;
5429 son = next_dom_son (CDI_DOMINATORS, son))
5430 build_omp_regions_1 (son, parent, single_tree);
5431 }
5432
5433 /* Builds the tree of OMP regions rooted at ROOT, storing it to
5434 root_omp_region. */
5435
5436 static void
5437 build_omp_regions_root (basic_block root)
5438 {
5439 gcc_assert (root_omp_region == NULL);
5440 build_omp_regions_1 (root, NULL, true);
5441 gcc_assert (root_omp_region != NULL);
5442 }
5443
5444 /* Expands omp construct (and its subconstructs) starting in HEAD. */
5445
5446 void
5447 omp_expand_local (basic_block head)
5448 {
5449 build_omp_regions_root (head);
5450 if (dump_file && (dump_flags & TDF_DETAILS))
5451 {
5452 fprintf (dump_file, "\nOMP region tree\n\n");
5453 dump_omp_region (dump_file, root_omp_region, 0);
5454 fprintf (dump_file, "\n");
5455 }
5456
5457 remove_exit_barriers (root_omp_region);
5458 expand_omp (root_omp_region);
5459
5460 free_omp_regions ();
5461 }
5462
5463 /* Scan the CFG and build a tree of OMP regions. Return the root of
5464 the OMP region tree. */
5465
5466 static void
5467 build_omp_regions (void)
5468 {
5469 gcc_assert (root_omp_region == NULL);
5470 calculate_dominance_info (CDI_DOMINATORS);
5471 build_omp_regions_1 (ENTRY_BLOCK_PTR, NULL, false);
5472 }
5473
5474 /* Main entry point for expanding OMP-GIMPLE into runtime calls. */
5475
5476 static unsigned int
5477 execute_expand_omp (void)
5478 {
5479 build_omp_regions ();
5480
5481 if (!root_omp_region)
5482 return 0;
5483
5484 if (dump_file)
5485 {
5486 fprintf (dump_file, "\nOMP region tree\n\n");
5487 dump_omp_region (dump_file, root_omp_region, 0);
5488 fprintf (dump_file, "\n");
5489 }
5490
5491 remove_exit_barriers (root_omp_region);
5492
5493 expand_omp (root_omp_region);
5494
5495 cleanup_tree_cfg ();
5496
5497 free_omp_regions ();
5498
5499 return 0;
5500 }
5501
5502 /* OMP expansion -- the default pass, run before creation of SSA form. */
5503
5504 static bool
5505 gate_expand_omp (void)
5506 {
5507 return (flag_openmp != 0 && errorcount == 0);
5508 }
5509
5510 struct gimple_opt_pass pass_expand_omp =
5511 {
5512 {
5513 GIMPLE_PASS,
5514 "ompexp", /* name */
5515 gate_expand_omp, /* gate */
5516 execute_expand_omp, /* execute */
5517 NULL, /* sub */
5518 NULL, /* next */
5519 0, /* static_pass_number */
5520 TV_NONE, /* tv_id */
5521 PROP_gimple_any, /* properties_required */
5522 0, /* properties_provided */
5523 0, /* properties_destroyed */
5524 0, /* todo_flags_start */
5525 TODO_dump_func /* todo_flags_finish */
5526 }
5527 };
5528 \f
5529 /* Routines to lower OpenMP directives into OMP-GIMPLE. */
5530
5531 /* Lower the OpenMP sections directive in the current statement in GSI_P.
5532 CTX is the enclosing OMP context for the current statement. */
5533
5534 static void
5535 lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5536 {
5537 tree block, control;
5538 gimple_stmt_iterator tgsi;
5539 unsigned i, len;
5540 gimple stmt, new_stmt, bind, t;
5541 gimple_seq ilist, dlist, olist, new_body, body;
5542 struct gimplify_ctx gctx;
5543
5544 stmt = gsi_stmt (*gsi_p);
5545
5546 push_gimplify_context (&gctx);
5547
5548 dlist = NULL;
5549 ilist = NULL;
5550 lower_rec_input_clauses (gimple_omp_sections_clauses (stmt),
5551 &ilist, &dlist, ctx);
5552
5553 tgsi = gsi_start (gimple_omp_body (stmt));
5554 for (len = 0; !gsi_end_p (tgsi); len++, gsi_next (&tgsi))
5555 continue;
5556
5557 tgsi = gsi_start (gimple_omp_body (stmt));
5558 body = NULL;
5559 for (i = 0; i < len; i++, gsi_next (&tgsi))
5560 {
5561 omp_context *sctx;
5562 gimple sec_start;
5563
5564 sec_start = gsi_stmt (tgsi);
5565 sctx = maybe_lookup_ctx (sec_start);
5566 gcc_assert (sctx);
5567
5568 gimple_seq_add_stmt (&body, sec_start);
5569
5570 lower_omp (gimple_omp_body (sec_start), sctx);
5571 gimple_seq_add_seq (&body, gimple_omp_body (sec_start));
5572 gimple_omp_set_body (sec_start, NULL);
5573
5574 if (i == len - 1)
5575 {
5576 gimple_seq l = NULL;
5577 lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt), NULL,
5578 &l, ctx);
5579 gimple_seq_add_seq (&body, l);
5580 gimple_omp_section_set_last (sec_start);
5581 }
5582
5583 gimple_seq_add_stmt (&body, gimple_build_omp_return (false));
5584 }
5585
5586 block = make_node (BLOCK);
5587 bind = gimple_build_bind (NULL, body, block);
5588
5589 olist = NULL;
5590 lower_reduction_clauses (gimple_omp_sections_clauses (stmt), &olist, ctx);
5591
5592 block = make_node (BLOCK);
5593 new_stmt = gimple_build_bind (NULL, NULL, block);
5594
5595 pop_gimplify_context (new_stmt);
5596 gimple_bind_append_vars (new_stmt, ctx->block_vars);
5597 BLOCK_VARS (block) = gimple_bind_vars (bind);
5598 if (BLOCK_VARS (block))
5599 TREE_USED (block) = 1;
5600
5601 new_body = NULL;
5602 gimple_seq_add_seq (&new_body, ilist);
5603 gimple_seq_add_stmt (&new_body, stmt);
5604 gimple_seq_add_stmt (&new_body, gimple_build_omp_sections_switch ());
5605 gimple_seq_add_stmt (&new_body, bind);
5606
5607 control = create_tmp_var (unsigned_type_node, ".section");
5608 t = gimple_build_omp_continue (control, control);
5609 gimple_omp_sections_set_control (stmt, control);
5610 gimple_seq_add_stmt (&new_body, t);
5611
5612 gimple_seq_add_seq (&new_body, olist);
5613 gimple_seq_add_seq (&new_body, dlist);
5614
5615 new_body = maybe_catch_exception (new_body);
5616
5617 t = gimple_build_omp_return
5618 (!!find_omp_clause (gimple_omp_sections_clauses (stmt),
5619 OMP_CLAUSE_NOWAIT));
5620 gimple_seq_add_stmt (&new_body, t);
5621
5622 gimple_bind_set_body (new_stmt, new_body);
5623 gimple_omp_set_body (stmt, NULL);
5624
5625 gsi_replace (gsi_p, new_stmt, true);
5626 }
5627
5628
5629 /* A subroutine of lower_omp_single. Expand the simple form of
5630 a GIMPLE_OMP_SINGLE, without a copyprivate clause:
5631
5632 if (GOMP_single_start ())
5633 BODY;
5634 [ GOMP_barrier (); ] -> unless 'nowait' is present.
5635
5636 FIXME. It may be better to delay expanding the logic of this until
5637 pass_expand_omp. The expanded logic may make the job more difficult
5638 to a synchronization analysis pass. */
5639
5640 static void
5641 lower_omp_single_simple (gimple single_stmt, gimple_seq *pre_p)
5642 {
5643 location_t loc = gimple_location (single_stmt);
5644 tree tlabel = create_artificial_label (loc);
5645 tree flabel = create_artificial_label (loc);
5646 gimple call, cond;
5647 tree lhs, decl;
5648
5649 decl = built_in_decls[BUILT_IN_GOMP_SINGLE_START];
5650 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (decl)), NULL);
5651 call = gimple_build_call (decl, 0);
5652 gimple_call_set_lhs (call, lhs);
5653 gimple_seq_add_stmt (pre_p, call);
5654
5655 cond = gimple_build_cond (EQ_EXPR, lhs,
5656 fold_convert_loc (loc, TREE_TYPE (lhs),
5657 boolean_true_node),
5658 tlabel, flabel);
5659 gimple_seq_add_stmt (pre_p, cond);
5660 gimple_seq_add_stmt (pre_p, gimple_build_label (tlabel));
5661 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5662 gimple_seq_add_stmt (pre_p, gimple_build_label (flabel));
5663 }
5664
5665
5666 /* A subroutine of lower_omp_single. Expand the simple form of
5667 a GIMPLE_OMP_SINGLE, with a copyprivate clause:
5668
5669 #pragma omp single copyprivate (a, b, c)
5670
5671 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
5672
5673 {
5674 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
5675 {
5676 BODY;
5677 copyout.a = a;
5678 copyout.b = b;
5679 copyout.c = c;
5680 GOMP_single_copy_end (&copyout);
5681 }
5682 else
5683 {
5684 a = copyout_p->a;
5685 b = copyout_p->b;
5686 c = copyout_p->c;
5687 }
5688 GOMP_barrier ();
5689 }
5690
5691 FIXME. It may be better to delay expanding the logic of this until
5692 pass_expand_omp. The expanded logic may make the job more difficult
5693 to a synchronization analysis pass. */
5694
5695 static void
5696 lower_omp_single_copy (gimple single_stmt, gimple_seq *pre_p, omp_context *ctx)
5697 {
5698 tree ptr_type, t, l0, l1, l2;
5699 gimple_seq copyin_seq;
5700 location_t loc = gimple_location (single_stmt);
5701
5702 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
5703
5704 ptr_type = build_pointer_type (ctx->record_type);
5705 ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
5706
5707 l0 = create_artificial_label (loc);
5708 l1 = create_artificial_label (loc);
5709 l2 = create_artificial_label (loc);
5710
5711 t = build_call_expr_loc (loc, built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_START], 0);
5712 t = fold_convert_loc (loc, ptr_type, t);
5713 gimplify_assign (ctx->receiver_decl, t, pre_p);
5714
5715 t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
5716 build_int_cst (ptr_type, 0));
5717 t = build3 (COND_EXPR, void_type_node, t,
5718 build_and_jump (&l0), build_and_jump (&l1));
5719 gimplify_and_add (t, pre_p);
5720
5721 gimple_seq_add_stmt (pre_p, gimple_build_label (l0));
5722
5723 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5724
5725 copyin_seq = NULL;
5726 lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt), pre_p,
5727 &copyin_seq, ctx);
5728
5729 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
5730 t = build_call_expr_loc (loc, built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_END],
5731 1, t);
5732 gimplify_and_add (t, pre_p);
5733
5734 t = build_and_jump (&l2);
5735 gimplify_and_add (t, pre_p);
5736
5737 gimple_seq_add_stmt (pre_p, gimple_build_label (l1));
5738
5739 gimple_seq_add_seq (pre_p, copyin_seq);
5740
5741 gimple_seq_add_stmt (pre_p, gimple_build_label (l2));
5742 }
5743
5744
5745 /* Expand code for an OpenMP single directive. */
5746
5747 static void
5748 lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5749 {
5750 tree block;
5751 gimple t, bind, single_stmt = gsi_stmt (*gsi_p);
5752 gimple_seq bind_body, dlist;
5753 struct gimplify_ctx gctx;
5754
5755 push_gimplify_context (&gctx);
5756
5757 bind_body = NULL;
5758 lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt),
5759 &bind_body, &dlist, ctx);
5760 lower_omp (gimple_omp_body (single_stmt), ctx);
5761
5762 gimple_seq_add_stmt (&bind_body, single_stmt);
5763
5764 if (ctx->record_type)
5765 lower_omp_single_copy (single_stmt, &bind_body, ctx);
5766 else
5767 lower_omp_single_simple (single_stmt, &bind_body);
5768
5769 gimple_omp_set_body (single_stmt, NULL);
5770
5771 gimple_seq_add_seq (&bind_body, dlist);
5772
5773 bind_body = maybe_catch_exception (bind_body);
5774
5775 t = gimple_build_omp_return
5776 (!!find_omp_clause (gimple_omp_single_clauses (single_stmt),
5777 OMP_CLAUSE_NOWAIT));
5778 gimple_seq_add_stmt (&bind_body, t);
5779
5780 block = make_node (BLOCK);
5781 bind = gimple_build_bind (NULL, bind_body, block);
5782
5783 pop_gimplify_context (bind);
5784
5785 gimple_bind_append_vars (bind, ctx->block_vars);
5786 BLOCK_VARS (block) = ctx->block_vars;
5787 gsi_replace (gsi_p, bind, true);
5788 if (BLOCK_VARS (block))
5789 TREE_USED (block) = 1;
5790 }
5791
5792
5793 /* Expand code for an OpenMP master directive. */
5794
5795 static void
5796 lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5797 {
5798 tree block, lab = NULL, x;
5799 gimple stmt = gsi_stmt (*gsi_p), bind;
5800 location_t loc = gimple_location (stmt);
5801 gimple_seq tseq;
5802 struct gimplify_ctx gctx;
5803
5804 push_gimplify_context (&gctx);
5805
5806 block = make_node (BLOCK);
5807 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
5808 block);
5809
5810 x = build_call_expr_loc (loc, built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
5811 x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
5812 x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
5813 tseq = NULL;
5814 gimplify_and_add (x, &tseq);
5815 gimple_bind_add_seq (bind, tseq);
5816
5817 lower_omp (gimple_omp_body (stmt), ctx);
5818 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5819 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5820 gimple_omp_set_body (stmt, NULL);
5821
5822 gimple_bind_add_stmt (bind, gimple_build_label (lab));
5823
5824 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
5825
5826 pop_gimplify_context (bind);
5827
5828 gimple_bind_append_vars (bind, ctx->block_vars);
5829 BLOCK_VARS (block) = ctx->block_vars;
5830 gsi_replace (gsi_p, bind, true);
5831 }
5832
5833
5834 /* Expand code for an OpenMP ordered directive. */
5835
5836 static void
5837 lower_omp_ordered (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5838 {
5839 tree block;
5840 gimple stmt = gsi_stmt (*gsi_p), bind, x;
5841 struct gimplify_ctx gctx;
5842
5843 push_gimplify_context (&gctx);
5844
5845 block = make_node (BLOCK);
5846 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
5847 block);
5848
5849 x = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ORDERED_START], 0);
5850 gimple_bind_add_stmt (bind, x);
5851
5852 lower_omp (gimple_omp_body (stmt), ctx);
5853 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5854 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5855 gimple_omp_set_body (stmt, NULL);
5856
5857 x = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ORDERED_END], 0);
5858 gimple_bind_add_stmt (bind, x);
5859
5860 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
5861
5862 pop_gimplify_context (bind);
5863
5864 gimple_bind_append_vars (bind, ctx->block_vars);
5865 BLOCK_VARS (block) = gimple_bind_vars (bind);
5866 gsi_replace (gsi_p, bind, true);
5867 }
5868
5869
5870 /* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
5871 substitution of a couple of function calls. But in the NAMED case,
5872 requires that languages coordinate a symbol name. It is therefore
5873 best put here in common code. */
5874
5875 static GTY((param1_is (tree), param2_is (tree)))
5876 splay_tree critical_name_mutexes;
5877
5878 static void
5879 lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5880 {
5881 tree block;
5882 tree name, lock, unlock;
5883 gimple stmt = gsi_stmt (*gsi_p), bind;
5884 location_t loc = gimple_location (stmt);
5885 gimple_seq tbody;
5886 struct gimplify_ctx gctx;
5887
5888 name = gimple_omp_critical_name (stmt);
5889 if (name)
5890 {
5891 tree decl;
5892 splay_tree_node n;
5893
5894 if (!critical_name_mutexes)
5895 critical_name_mutexes
5896 = splay_tree_new_ggc (splay_tree_compare_pointers);
5897
5898 n = splay_tree_lookup (critical_name_mutexes, (splay_tree_key) name);
5899 if (n == NULL)
5900 {
5901 char *new_str;
5902
5903 decl = create_tmp_var_raw (ptr_type_node, NULL);
5904
5905 new_str = ACONCAT ((".gomp_critical_user_",
5906 IDENTIFIER_POINTER (name), NULL));
5907 DECL_NAME (decl) = get_identifier (new_str);
5908 TREE_PUBLIC (decl) = 1;
5909 TREE_STATIC (decl) = 1;
5910 DECL_COMMON (decl) = 1;
5911 DECL_ARTIFICIAL (decl) = 1;
5912 DECL_IGNORED_P (decl) = 1;
5913 varpool_finalize_decl (decl);
5914
5915 splay_tree_insert (critical_name_mutexes, (splay_tree_key) name,
5916 (splay_tree_value) decl);
5917 }
5918 else
5919 decl = (tree) n->value;
5920
5921 lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_START];
5922 lock = build_call_expr_loc (loc, lock, 1, build_fold_addr_expr_loc (loc, decl));
5923
5924 unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_END];
5925 unlock = build_call_expr_loc (loc, unlock, 1,
5926 build_fold_addr_expr_loc (loc, decl));
5927 }
5928 else
5929 {
5930 lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_START];
5931 lock = build_call_expr_loc (loc, lock, 0);
5932
5933 unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_END];
5934 unlock = build_call_expr_loc (loc, unlock, 0);
5935 }
5936
5937 push_gimplify_context (&gctx);
5938
5939 block = make_node (BLOCK);
5940 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt), block);
5941
5942 tbody = gimple_bind_body (bind);
5943 gimplify_and_add (lock, &tbody);
5944 gimple_bind_set_body (bind, tbody);
5945
5946 lower_omp (gimple_omp_body (stmt), ctx);
5947 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5948 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5949 gimple_omp_set_body (stmt, NULL);
5950
5951 tbody = gimple_bind_body (bind);
5952 gimplify_and_add (unlock, &tbody);
5953 gimple_bind_set_body (bind, tbody);
5954
5955 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
5956
5957 pop_gimplify_context (bind);
5958 gimple_bind_append_vars (bind, ctx->block_vars);
5959 BLOCK_VARS (block) = gimple_bind_vars (bind);
5960 gsi_replace (gsi_p, bind, true);
5961 }
5962
5963
5964 /* A subroutine of lower_omp_for. Generate code to emit the predicate
5965 for a lastprivate clause. Given a loop control predicate of (V
5966 cond N2), we gate the clause on (!(V cond N2)). The lowered form
5967 is appended to *DLIST, iterator initialization is appended to
5968 *BODY_P. */
5969
5970 static void
5971 lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p,
5972 gimple_seq *dlist, struct omp_context *ctx)
5973 {
5974 tree clauses, cond, vinit;
5975 enum tree_code cond_code;
5976 gimple_seq stmts;
5977
5978 cond_code = fd->loop.cond_code;
5979 cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
5980
5981 /* When possible, use a strict equality expression. This can let VRP
5982 type optimizations deduce the value and remove a copy. */
5983 if (host_integerp (fd->loop.step, 0))
5984 {
5985 HOST_WIDE_INT step = TREE_INT_CST_LOW (fd->loop.step);
5986 if (step == 1 || step == -1)
5987 cond_code = EQ_EXPR;
5988 }
5989
5990 cond = build2 (cond_code, boolean_type_node, fd->loop.v, fd->loop.n2);
5991
5992 clauses = gimple_omp_for_clauses (fd->for_stmt);
5993 stmts = NULL;
5994 lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
5995 if (!gimple_seq_empty_p (stmts))
5996 {
5997 gimple_seq_add_seq (&stmts, *dlist);
5998 *dlist = stmts;
5999
6000 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
6001 vinit = fd->loop.n1;
6002 if (cond_code == EQ_EXPR
6003 && host_integerp (fd->loop.n2, 0)
6004 && ! integer_zerop (fd->loop.n2))
6005 vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0);
6006
6007 /* Initialize the iterator variable, so that threads that don't execute
6008 any iterations don't execute the lastprivate clauses by accident. */
6009 gimplify_assign (fd->loop.v, vinit, body_p);
6010 }
6011 }
6012
6013
6014 /* Lower code for an OpenMP loop directive. */
6015
6016 static void
6017 lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6018 {
6019 tree *rhs_p, block;
6020 struct omp_for_data fd;
6021 gimple stmt = gsi_stmt (*gsi_p), new_stmt;
6022 gimple_seq omp_for_body, body, dlist;
6023 size_t i;
6024 struct gimplify_ctx gctx;
6025
6026 push_gimplify_context (&gctx);
6027
6028 lower_omp (gimple_omp_for_pre_body (stmt), ctx);
6029 lower_omp (gimple_omp_body (stmt), ctx);
6030
6031 block = make_node (BLOCK);
6032 new_stmt = gimple_build_bind (NULL, NULL, block);
6033
6034 /* Move declaration of temporaries in the loop body before we make
6035 it go away. */
6036 omp_for_body = gimple_omp_body (stmt);
6037 if (!gimple_seq_empty_p (omp_for_body)
6038 && gimple_code (gimple_seq_first_stmt (omp_for_body)) == GIMPLE_BIND)
6039 {
6040 tree vars = gimple_bind_vars (gimple_seq_first_stmt (omp_for_body));
6041 gimple_bind_append_vars (new_stmt, vars);
6042 }
6043
6044 /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
6045 dlist = NULL;
6046 body = NULL;
6047 lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx);
6048 gimple_seq_add_seq (&body, gimple_omp_for_pre_body (stmt));
6049
6050 /* Lower the header expressions. At this point, we can assume that
6051 the header is of the form:
6052
6053 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
6054
6055 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
6056 using the .omp_data_s mapping, if needed. */
6057 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
6058 {
6059 rhs_p = gimple_omp_for_initial_ptr (stmt, i);
6060 if (!is_gimple_min_invariant (*rhs_p))
6061 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6062
6063 rhs_p = gimple_omp_for_final_ptr (stmt, i);
6064 if (!is_gimple_min_invariant (*rhs_p))
6065 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6066
6067 rhs_p = &TREE_OPERAND (gimple_omp_for_incr (stmt, i), 1);
6068 if (!is_gimple_min_invariant (*rhs_p))
6069 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6070 }
6071
6072 /* Once lowered, extract the bounds and clauses. */
6073 extract_omp_for_data (stmt, &fd, NULL);
6074
6075 lower_omp_for_lastprivate (&fd, &body, &dlist, ctx);
6076
6077 gimple_seq_add_stmt (&body, stmt);
6078 gimple_seq_add_seq (&body, gimple_omp_body (stmt));
6079
6080 gimple_seq_add_stmt (&body, gimple_build_omp_continue (fd.loop.v,
6081 fd.loop.v));
6082
6083 /* After the loop, add exit clauses. */
6084 lower_reduction_clauses (gimple_omp_for_clauses (stmt), &body, ctx);
6085 gimple_seq_add_seq (&body, dlist);
6086
6087 body = maybe_catch_exception (body);
6088
6089 /* Region exit marker goes at the end of the loop body. */
6090 gimple_seq_add_stmt (&body, gimple_build_omp_return (fd.have_nowait));
6091
6092 pop_gimplify_context (new_stmt);
6093
6094 gimple_bind_append_vars (new_stmt, ctx->block_vars);
6095 BLOCK_VARS (block) = gimple_bind_vars (new_stmt);
6096 if (BLOCK_VARS (block))
6097 TREE_USED (block) = 1;
6098
6099 gimple_bind_set_body (new_stmt, body);
6100 gimple_omp_set_body (stmt, NULL);
6101 gimple_omp_for_set_pre_body (stmt, NULL);
6102 gsi_replace (gsi_p, new_stmt, true);
6103 }
6104
6105 /* Callback for walk_stmts. Check if the current statement only contains
6106 GIMPLE_OMP_FOR or GIMPLE_OMP_PARALLEL. */
6107
6108 static tree
6109 check_combined_parallel (gimple_stmt_iterator *gsi_p,
6110 bool *handled_ops_p,
6111 struct walk_stmt_info *wi)
6112 {
6113 int *info = (int *) wi->info;
6114 gimple stmt = gsi_stmt (*gsi_p);
6115
6116 *handled_ops_p = true;
6117 switch (gimple_code (stmt))
6118 {
6119 WALK_SUBSTMTS;
6120
6121 case GIMPLE_OMP_FOR:
6122 case GIMPLE_OMP_SECTIONS:
6123 *info = *info == 0 ? 1 : -1;
6124 break;
6125 default:
6126 *info = -1;
6127 break;
6128 }
6129 return NULL;
6130 }
6131
6132 struct omp_taskcopy_context
6133 {
6134 /* This field must be at the beginning, as we do "inheritance": Some
6135 callback functions for tree-inline.c (e.g., omp_copy_decl)
6136 receive a copy_body_data pointer that is up-casted to an
6137 omp_context pointer. */
6138 copy_body_data cb;
6139 omp_context *ctx;
6140 };
6141
6142 static tree
6143 task_copyfn_copy_decl (tree var, copy_body_data *cb)
6144 {
6145 struct omp_taskcopy_context *tcctx = (struct omp_taskcopy_context *) cb;
6146
6147 if (splay_tree_lookup (tcctx->ctx->sfield_map, (splay_tree_key) var))
6148 return create_tmp_var (TREE_TYPE (var), NULL);
6149
6150 return var;
6151 }
6152
6153 static tree
6154 task_copyfn_remap_type (struct omp_taskcopy_context *tcctx, tree orig_type)
6155 {
6156 tree name, new_fields = NULL, type, f;
6157
6158 type = lang_hooks.types.make_type (RECORD_TYPE);
6159 name = DECL_NAME (TYPE_NAME (orig_type));
6160 name = build_decl (gimple_location (tcctx->ctx->stmt),
6161 TYPE_DECL, name, type);
6162 TYPE_NAME (type) = name;
6163
6164 for (f = TYPE_FIELDS (orig_type); f ; f = TREE_CHAIN (f))
6165 {
6166 tree new_f = copy_node (f);
6167 DECL_CONTEXT (new_f) = type;
6168 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &tcctx->cb);
6169 TREE_CHAIN (new_f) = new_fields;
6170 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6171 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6172 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
6173 &tcctx->cb, NULL);
6174 new_fields = new_f;
6175 *pointer_map_insert (tcctx->cb.decl_map, f) = new_f;
6176 }
6177 TYPE_FIELDS (type) = nreverse (new_fields);
6178 layout_type (type);
6179 return type;
6180 }
6181
6182 /* Create task copyfn. */
6183
6184 static void
6185 create_task_copyfn (gimple task_stmt, omp_context *ctx)
6186 {
6187 struct function *child_cfun;
6188 tree child_fn, t, c, src, dst, f, sf, arg, sarg, decl;
6189 tree record_type, srecord_type, bind, list;
6190 bool record_needs_remap = false, srecord_needs_remap = false;
6191 splay_tree_node n;
6192 struct omp_taskcopy_context tcctx;
6193 struct gimplify_ctx gctx;
6194 location_t loc = gimple_location (task_stmt);
6195
6196 child_fn = gimple_omp_task_copy_fn (task_stmt);
6197 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
6198 gcc_assert (child_cfun->cfg == NULL);
6199 child_cfun->dont_save_pending_sizes_p = 1;
6200 DECL_SAVED_TREE (child_fn) = alloc_stmt_list ();
6201
6202 /* Reset DECL_CONTEXT on function arguments. */
6203 for (t = DECL_ARGUMENTS (child_fn); t; t = TREE_CHAIN (t))
6204 DECL_CONTEXT (t) = child_fn;
6205
6206 /* Populate the function. */
6207 push_gimplify_context (&gctx);
6208 current_function_decl = child_fn;
6209
6210 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
6211 TREE_SIDE_EFFECTS (bind) = 1;
6212 list = NULL;
6213 DECL_SAVED_TREE (child_fn) = bind;
6214 DECL_SOURCE_LOCATION (child_fn) = gimple_location (task_stmt);
6215
6216 /* Remap src and dst argument types if needed. */
6217 record_type = ctx->record_type;
6218 srecord_type = ctx->srecord_type;
6219 for (f = TYPE_FIELDS (record_type); f ; f = TREE_CHAIN (f))
6220 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6221 {
6222 record_needs_remap = true;
6223 break;
6224 }
6225 for (f = TYPE_FIELDS (srecord_type); f ; f = TREE_CHAIN (f))
6226 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6227 {
6228 srecord_needs_remap = true;
6229 break;
6230 }
6231
6232 if (record_needs_remap || srecord_needs_remap)
6233 {
6234 memset (&tcctx, '\0', sizeof (tcctx));
6235 tcctx.cb.src_fn = ctx->cb.src_fn;
6236 tcctx.cb.dst_fn = child_fn;
6237 tcctx.cb.src_node = cgraph_node (tcctx.cb.src_fn);
6238 tcctx.cb.dst_node = tcctx.cb.src_node;
6239 tcctx.cb.src_cfun = ctx->cb.src_cfun;
6240 tcctx.cb.copy_decl = task_copyfn_copy_decl;
6241 tcctx.cb.eh_lp_nr = 0;
6242 tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE;
6243 tcctx.cb.decl_map = pointer_map_create ();
6244 tcctx.ctx = ctx;
6245
6246 if (record_needs_remap)
6247 record_type = task_copyfn_remap_type (&tcctx, record_type);
6248 if (srecord_needs_remap)
6249 srecord_type = task_copyfn_remap_type (&tcctx, srecord_type);
6250 }
6251 else
6252 tcctx.cb.decl_map = NULL;
6253
6254 push_cfun (child_cfun);
6255
6256 arg = DECL_ARGUMENTS (child_fn);
6257 TREE_TYPE (arg) = build_pointer_type (record_type);
6258 sarg = TREE_CHAIN (arg);
6259 TREE_TYPE (sarg) = build_pointer_type (srecord_type);
6260
6261 /* First pass: initialize temporaries used in record_type and srecord_type
6262 sizes and field offsets. */
6263 if (tcctx.cb.decl_map)
6264 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6265 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6266 {
6267 tree *p;
6268
6269 decl = OMP_CLAUSE_DECL (c);
6270 p = (tree *) pointer_map_contains (tcctx.cb.decl_map, decl);
6271 if (p == NULL)
6272 continue;
6273 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6274 sf = (tree) n->value;
6275 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6276 src = build_fold_indirect_ref_loc (loc, sarg);
6277 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6278 t = build2 (MODIFY_EXPR, TREE_TYPE (*p), *p, src);
6279 append_to_statement_list (t, &list);
6280 }
6281
6282 /* Second pass: copy shared var pointers and copy construct non-VLA
6283 firstprivate vars. */
6284 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6285 switch (OMP_CLAUSE_CODE (c))
6286 {
6287 case OMP_CLAUSE_SHARED:
6288 decl = OMP_CLAUSE_DECL (c);
6289 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6290 if (n == NULL)
6291 break;
6292 f = (tree) n->value;
6293 if (tcctx.cb.decl_map)
6294 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6295 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6296 sf = (tree) n->value;
6297 if (tcctx.cb.decl_map)
6298 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6299 src = build_fold_indirect_ref_loc (loc, sarg);
6300 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6301 dst = build_fold_indirect_ref_loc (loc, arg);
6302 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6303 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
6304 append_to_statement_list (t, &list);
6305 break;
6306 case OMP_CLAUSE_FIRSTPRIVATE:
6307 decl = OMP_CLAUSE_DECL (c);
6308 if (is_variable_sized (decl))
6309 break;
6310 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6311 if (n == NULL)
6312 break;
6313 f = (tree) n->value;
6314 if (tcctx.cb.decl_map)
6315 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6316 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6317 if (n != NULL)
6318 {
6319 sf = (tree) n->value;
6320 if (tcctx.cb.decl_map)
6321 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6322 src = build_fold_indirect_ref_loc (loc, sarg);
6323 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6324 if (use_pointer_for_field (decl, NULL) || is_reference (decl))
6325 src = build_fold_indirect_ref_loc (loc, src);
6326 }
6327 else
6328 src = decl;
6329 dst = build_fold_indirect_ref_loc (loc, arg);
6330 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6331 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6332 append_to_statement_list (t, &list);
6333 break;
6334 case OMP_CLAUSE_PRIVATE:
6335 if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c))
6336 break;
6337 decl = OMP_CLAUSE_DECL (c);
6338 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6339 f = (tree) n->value;
6340 if (tcctx.cb.decl_map)
6341 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6342 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6343 if (n != NULL)
6344 {
6345 sf = (tree) n->value;
6346 if (tcctx.cb.decl_map)
6347 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6348 src = build_fold_indirect_ref_loc (loc, sarg);
6349 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6350 if (use_pointer_for_field (decl, NULL))
6351 src = build_fold_indirect_ref_loc (loc, src);
6352 }
6353 else
6354 src = decl;
6355 dst = build_fold_indirect_ref_loc (loc, arg);
6356 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6357 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
6358 append_to_statement_list (t, &list);
6359 break;
6360 default:
6361 break;
6362 }
6363
6364 /* Last pass: handle VLA firstprivates. */
6365 if (tcctx.cb.decl_map)
6366 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6367 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6368 {
6369 tree ind, ptr, df;
6370
6371 decl = OMP_CLAUSE_DECL (c);
6372 if (!is_variable_sized (decl))
6373 continue;
6374 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6375 if (n == NULL)
6376 continue;
6377 f = (tree) n->value;
6378 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6379 gcc_assert (DECL_HAS_VALUE_EXPR_P (decl));
6380 ind = DECL_VALUE_EXPR (decl);
6381 gcc_assert (TREE_CODE (ind) == INDIRECT_REF);
6382 gcc_assert (DECL_P (TREE_OPERAND (ind, 0)));
6383 n = splay_tree_lookup (ctx->sfield_map,
6384 (splay_tree_key) TREE_OPERAND (ind, 0));
6385 sf = (tree) n->value;
6386 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6387 src = build_fold_indirect_ref_loc (loc, sarg);
6388 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6389 src = build_fold_indirect_ref_loc (loc, src);
6390 dst = build_fold_indirect_ref_loc (loc, arg);
6391 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6392 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6393 append_to_statement_list (t, &list);
6394 n = splay_tree_lookup (ctx->field_map,
6395 (splay_tree_key) TREE_OPERAND (ind, 0));
6396 df = (tree) n->value;
6397 df = *(tree *) pointer_map_contains (tcctx.cb.decl_map, df);
6398 ptr = build_fold_indirect_ref_loc (loc, arg);
6399 ptr = build3 (COMPONENT_REF, TREE_TYPE (df), ptr, df, NULL);
6400 t = build2 (MODIFY_EXPR, TREE_TYPE (ptr), ptr,
6401 build_fold_addr_expr_loc (loc, dst));
6402 append_to_statement_list (t, &list);
6403 }
6404
6405 t = build1 (RETURN_EXPR, void_type_node, NULL);
6406 append_to_statement_list (t, &list);
6407
6408 if (tcctx.cb.decl_map)
6409 pointer_map_destroy (tcctx.cb.decl_map);
6410 pop_gimplify_context (NULL);
6411 BIND_EXPR_BODY (bind) = list;
6412 pop_cfun ();
6413 current_function_decl = ctx->cb.src_fn;
6414 }
6415
6416 /* Lower the OpenMP parallel or task directive in the current statement
6417 in GSI_P. CTX holds context information for the directive. */
6418
6419 static void
6420 lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6421 {
6422 tree clauses;
6423 tree child_fn, t;
6424 gimple stmt = gsi_stmt (*gsi_p);
6425 gimple par_bind, bind;
6426 gimple_seq par_body, olist, ilist, par_olist, par_ilist, new_body;
6427 struct gimplify_ctx gctx;
6428 location_t loc = gimple_location (stmt);
6429
6430 clauses = gimple_omp_taskreg_clauses (stmt);
6431 par_bind = gimple_seq_first_stmt (gimple_omp_body (stmt));
6432 par_body = gimple_bind_body (par_bind);
6433 child_fn = ctx->cb.dst_fn;
6434 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
6435 && !gimple_omp_parallel_combined_p (stmt))
6436 {
6437 struct walk_stmt_info wi;
6438 int ws_num = 0;
6439
6440 memset (&wi, 0, sizeof (wi));
6441 wi.info = &ws_num;
6442 wi.val_only = true;
6443 walk_gimple_seq (par_body, check_combined_parallel, NULL, &wi);
6444 if (ws_num == 1)
6445 gimple_omp_parallel_set_combined_p (stmt, true);
6446 }
6447 if (ctx->srecord_type)
6448 create_task_copyfn (stmt, ctx);
6449
6450 push_gimplify_context (&gctx);
6451
6452 par_olist = NULL;
6453 par_ilist = NULL;
6454 lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx);
6455 lower_omp (par_body, ctx);
6456 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL)
6457 lower_reduction_clauses (clauses, &par_olist, ctx);
6458
6459 /* Declare all the variables created by mapping and the variables
6460 declared in the scope of the parallel body. */
6461 record_vars_into (ctx->block_vars, child_fn);
6462 record_vars_into (gimple_bind_vars (par_bind), child_fn);
6463
6464 if (ctx->record_type)
6465 {
6466 ctx->sender_decl
6467 = create_tmp_var (ctx->srecord_type ? ctx->srecord_type
6468 : ctx->record_type, ".omp_data_o");
6469 TREE_ADDRESSABLE (ctx->sender_decl) = 1;
6470 gimple_omp_taskreg_set_data_arg (stmt, ctx->sender_decl);
6471 }
6472
6473 olist = NULL;
6474 ilist = NULL;
6475 lower_send_clauses (clauses, &ilist, &olist, ctx);
6476 lower_send_shared_vars (&ilist, &olist, ctx);
6477
6478 /* Once all the expansions are done, sequence all the different
6479 fragments inside gimple_omp_body. */
6480
6481 new_body = NULL;
6482
6483 if (ctx->record_type)
6484 {
6485 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
6486 /* fixup_child_record_type might have changed receiver_decl's type. */
6487 t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
6488 gimple_seq_add_stmt (&new_body,
6489 gimple_build_assign (ctx->receiver_decl, t));
6490 }
6491
6492 gimple_seq_add_seq (&new_body, par_ilist);
6493 gimple_seq_add_seq (&new_body, par_body);
6494 gimple_seq_add_seq (&new_body, par_olist);
6495 new_body = maybe_catch_exception (new_body);
6496 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
6497 gimple_omp_set_body (stmt, new_body);
6498
6499 bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
6500 gimple_bind_add_stmt (bind, stmt);
6501 if (ilist || olist)
6502 {
6503 gimple_seq_add_stmt (&ilist, bind);
6504 gimple_seq_add_seq (&ilist, olist);
6505 bind = gimple_build_bind (NULL, ilist, NULL);
6506 }
6507
6508 gsi_replace (gsi_p, bind, true);
6509
6510 pop_gimplify_context (NULL);
6511 }
6512
6513 /* Callback for lower_omp_1. Return non-NULL if *tp needs to be
6514 regimplified. If DATA is non-NULL, lower_omp_1 is outside
6515 of OpenMP context, but with task_shared_vars set. */
6516
6517 static tree
6518 lower_omp_regimplify_p (tree *tp, int *walk_subtrees,
6519 void *data)
6520 {
6521 tree t = *tp;
6522
6523 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
6524 if (TREE_CODE (t) == VAR_DECL && data == NULL && DECL_HAS_VALUE_EXPR_P (t))
6525 return t;
6526
6527 if (task_shared_vars
6528 && DECL_P (t)
6529 && bitmap_bit_p (task_shared_vars, DECL_UID (t)))
6530 return t;
6531
6532 /* If a global variable has been privatized, TREE_CONSTANT on
6533 ADDR_EXPR might be wrong. */
6534 if (data == NULL && TREE_CODE (t) == ADDR_EXPR)
6535 recompute_tree_invariant_for_addr_expr (t);
6536
6537 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
6538 return NULL_TREE;
6539 }
6540
6541 static void
6542 lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6543 {
6544 gimple stmt = gsi_stmt (*gsi_p);
6545 struct walk_stmt_info wi;
6546
6547 if (gimple_has_location (stmt))
6548 input_location = gimple_location (stmt);
6549
6550 if (task_shared_vars)
6551 memset (&wi, '\0', sizeof (wi));
6552
6553 /* If we have issued syntax errors, avoid doing any heavy lifting.
6554 Just replace the OpenMP directives with a NOP to avoid
6555 confusing RTL expansion. */
6556 if (errorcount && is_gimple_omp (stmt))
6557 {
6558 gsi_replace (gsi_p, gimple_build_nop (), true);
6559 return;
6560 }
6561
6562 switch (gimple_code (stmt))
6563 {
6564 case GIMPLE_COND:
6565 if ((ctx || task_shared_vars)
6566 && (walk_tree (gimple_cond_lhs_ptr (stmt), lower_omp_regimplify_p,
6567 ctx ? NULL : &wi, NULL)
6568 || walk_tree (gimple_cond_rhs_ptr (stmt), lower_omp_regimplify_p,
6569 ctx ? NULL : &wi, NULL)))
6570 gimple_regimplify_operands (stmt, gsi_p);
6571 break;
6572 case GIMPLE_CATCH:
6573 lower_omp (gimple_catch_handler (stmt), ctx);
6574 break;
6575 case GIMPLE_EH_FILTER:
6576 lower_omp (gimple_eh_filter_failure (stmt), ctx);
6577 break;
6578 case GIMPLE_TRY:
6579 lower_omp (gimple_try_eval (stmt), ctx);
6580 lower_omp (gimple_try_cleanup (stmt), ctx);
6581 break;
6582 case GIMPLE_BIND:
6583 lower_omp (gimple_bind_body (stmt), ctx);
6584 break;
6585 case GIMPLE_OMP_PARALLEL:
6586 case GIMPLE_OMP_TASK:
6587 ctx = maybe_lookup_ctx (stmt);
6588 lower_omp_taskreg (gsi_p, ctx);
6589 break;
6590 case GIMPLE_OMP_FOR:
6591 ctx = maybe_lookup_ctx (stmt);
6592 gcc_assert (ctx);
6593 lower_omp_for (gsi_p, ctx);
6594 break;
6595 case GIMPLE_OMP_SECTIONS:
6596 ctx = maybe_lookup_ctx (stmt);
6597 gcc_assert (ctx);
6598 lower_omp_sections (gsi_p, ctx);
6599 break;
6600 case GIMPLE_OMP_SINGLE:
6601 ctx = maybe_lookup_ctx (stmt);
6602 gcc_assert (ctx);
6603 lower_omp_single (gsi_p, ctx);
6604 break;
6605 case GIMPLE_OMP_MASTER:
6606 ctx = maybe_lookup_ctx (stmt);
6607 gcc_assert (ctx);
6608 lower_omp_master (gsi_p, ctx);
6609 break;
6610 case GIMPLE_OMP_ORDERED:
6611 ctx = maybe_lookup_ctx (stmt);
6612 gcc_assert (ctx);
6613 lower_omp_ordered (gsi_p, ctx);
6614 break;
6615 case GIMPLE_OMP_CRITICAL:
6616 ctx = maybe_lookup_ctx (stmt);
6617 gcc_assert (ctx);
6618 lower_omp_critical (gsi_p, ctx);
6619 break;
6620 case GIMPLE_OMP_ATOMIC_LOAD:
6621 if ((ctx || task_shared_vars)
6622 && walk_tree (gimple_omp_atomic_load_rhs_ptr (stmt),
6623 lower_omp_regimplify_p, ctx ? NULL : &wi, NULL))
6624 gimple_regimplify_operands (stmt, gsi_p);
6625 break;
6626 default:
6627 if ((ctx || task_shared_vars)
6628 && walk_gimple_op (stmt, lower_omp_regimplify_p,
6629 ctx ? NULL : &wi))
6630 gimple_regimplify_operands (stmt, gsi_p);
6631 break;
6632 }
6633 }
6634
6635 static void
6636 lower_omp (gimple_seq body, omp_context *ctx)
6637 {
6638 location_t saved_location = input_location;
6639 gimple_stmt_iterator gsi = gsi_start (body);
6640 for (gsi = gsi_start (body); !gsi_end_p (gsi); gsi_next (&gsi))
6641 lower_omp_1 (&gsi, ctx);
6642 input_location = saved_location;
6643 }
6644 \f
6645 /* Main entry point. */
6646
6647 static unsigned int
6648 execute_lower_omp (void)
6649 {
6650 gimple_seq body;
6651
6652 /* This pass always runs, to provide PROP_gimple_lomp.
6653 But there is nothing to do unless -fopenmp is given. */
6654 if (flag_openmp == 0)
6655 return 0;
6656
6657 all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
6658 delete_omp_context);
6659
6660 body = gimple_body (current_function_decl);
6661 scan_omp (body, NULL);
6662 gcc_assert (taskreg_nesting_level == 0);
6663
6664 if (all_contexts->root)
6665 {
6666 struct gimplify_ctx gctx;
6667
6668 if (task_shared_vars)
6669 push_gimplify_context (&gctx);
6670 lower_omp (body, NULL);
6671 if (task_shared_vars)
6672 pop_gimplify_context (NULL);
6673 }
6674
6675 if (all_contexts)
6676 {
6677 splay_tree_delete (all_contexts);
6678 all_contexts = NULL;
6679 }
6680 BITMAP_FREE (task_shared_vars);
6681 return 0;
6682 }
6683
6684 struct gimple_opt_pass pass_lower_omp =
6685 {
6686 {
6687 GIMPLE_PASS,
6688 "omplower", /* name */
6689 NULL, /* gate */
6690 execute_lower_omp, /* execute */
6691 NULL, /* sub */
6692 NULL, /* next */
6693 0, /* static_pass_number */
6694 TV_NONE, /* tv_id */
6695 PROP_gimple_any, /* properties_required */
6696 PROP_gimple_lomp, /* properties_provided */
6697 0, /* properties_destroyed */
6698 0, /* todo_flags_start */
6699 TODO_dump_func /* todo_flags_finish */
6700 }
6701 };
6702 \f
6703 /* The following is a utility to diagnose OpenMP structured block violations.
6704 It is not part of the "omplower" pass, as that's invoked too late. It
6705 should be invoked by the respective front ends after gimplification. */
6706
6707 static splay_tree all_labels;
6708
6709 /* Check for mismatched contexts and generate an error if needed. Return
6710 true if an error is detected. */
6711
6712 static bool
6713 diagnose_sb_0 (gimple_stmt_iterator *gsi_p,
6714 gimple branch_ctx, gimple label_ctx)
6715 {
6716 if (label_ctx == branch_ctx)
6717 return false;
6718
6719
6720 /*
6721 Previously we kept track of the label's entire context in diagnose_sb_[12]
6722 so we could traverse it and issue a correct "exit" or "enter" error
6723 message upon a structured block violation.
6724
6725 We built the context by building a list with tree_cons'ing, but there is
6726 no easy counterpart in gimple tuples. It seems like far too much work
6727 for issuing exit/enter error messages. If someone really misses the
6728 distinct error message... patches welcome.
6729 */
6730
6731 #if 0
6732 /* Try to avoid confusing the user by producing and error message
6733 with correct "exit" or "enter" verbiage. We prefer "exit"
6734 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
6735 if (branch_ctx == NULL)
6736 exit_p = false;
6737 else
6738 {
6739 while (label_ctx)
6740 {
6741 if (TREE_VALUE (label_ctx) == branch_ctx)
6742 {
6743 exit_p = false;
6744 break;
6745 }
6746 label_ctx = TREE_CHAIN (label_ctx);
6747 }
6748 }
6749
6750 if (exit_p)
6751 error ("invalid exit from OpenMP structured block");
6752 else
6753 error ("invalid entry to OpenMP structured block");
6754 #endif
6755
6756 /* If it's obvious we have an invalid entry, be specific about the error. */
6757 if (branch_ctx == NULL)
6758 error ("invalid entry to OpenMP structured block");
6759 else
6760 /* Otherwise, be vague and lazy, but efficient. */
6761 error ("invalid branch to/from an OpenMP structured block");
6762
6763 gsi_replace (gsi_p, gimple_build_nop (), false);
6764 return true;
6765 }
6766
6767 /* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
6768 where each label is found. */
6769
6770 static tree
6771 diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
6772 struct walk_stmt_info *wi)
6773 {
6774 gimple context = (gimple) wi->info;
6775 gimple inner_context;
6776 gimple stmt = gsi_stmt (*gsi_p);
6777
6778 *handled_ops_p = true;
6779
6780 switch (gimple_code (stmt))
6781 {
6782 WALK_SUBSTMTS;
6783
6784 case GIMPLE_OMP_PARALLEL:
6785 case GIMPLE_OMP_TASK:
6786 case GIMPLE_OMP_SECTIONS:
6787 case GIMPLE_OMP_SINGLE:
6788 case GIMPLE_OMP_SECTION:
6789 case GIMPLE_OMP_MASTER:
6790 case GIMPLE_OMP_ORDERED:
6791 case GIMPLE_OMP_CRITICAL:
6792 /* The minimal context here is just the current OMP construct. */
6793 inner_context = stmt;
6794 wi->info = inner_context;
6795 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
6796 wi->info = context;
6797 break;
6798
6799 case GIMPLE_OMP_FOR:
6800 inner_context = stmt;
6801 wi->info = inner_context;
6802 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
6803 walk them. */
6804 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
6805 diagnose_sb_1, NULL, wi);
6806 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
6807 wi->info = context;
6808 break;
6809
6810 case GIMPLE_LABEL:
6811 splay_tree_insert (all_labels, (splay_tree_key) gimple_label_label (stmt),
6812 (splay_tree_value) context);
6813 break;
6814
6815 default:
6816 break;
6817 }
6818
6819 return NULL_TREE;
6820 }
6821
6822 /* Pass 2: Check each branch and see if its context differs from that of
6823 the destination label's context. */
6824
6825 static tree
6826 diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
6827 struct walk_stmt_info *wi)
6828 {
6829 gimple context = (gimple) wi->info;
6830 splay_tree_node n;
6831 gimple stmt = gsi_stmt (*gsi_p);
6832
6833 *handled_ops_p = true;
6834
6835 switch (gimple_code (stmt))
6836 {
6837 WALK_SUBSTMTS;
6838
6839 case GIMPLE_OMP_PARALLEL:
6840 case GIMPLE_OMP_TASK:
6841 case GIMPLE_OMP_SECTIONS:
6842 case GIMPLE_OMP_SINGLE:
6843 case GIMPLE_OMP_SECTION:
6844 case GIMPLE_OMP_MASTER:
6845 case GIMPLE_OMP_ORDERED:
6846 case GIMPLE_OMP_CRITICAL:
6847 wi->info = stmt;
6848 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
6849 wi->info = context;
6850 break;
6851
6852 case GIMPLE_OMP_FOR:
6853 wi->info = stmt;
6854 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
6855 walk them. */
6856 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
6857 diagnose_sb_2, NULL, wi);
6858 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
6859 wi->info = context;
6860 break;
6861
6862 case GIMPLE_COND:
6863 {
6864 tree lab = gimple_cond_true_label (stmt);
6865 if (lab)
6866 {
6867 n = splay_tree_lookup (all_labels,
6868 (splay_tree_key) lab);
6869 diagnose_sb_0 (gsi_p, context,
6870 n ? (gimple) n->value : NULL);
6871 }
6872 lab = gimple_cond_false_label (stmt);
6873 if (lab)
6874 {
6875 n = splay_tree_lookup (all_labels,
6876 (splay_tree_key) lab);
6877 diagnose_sb_0 (gsi_p, context,
6878 n ? (gimple) n->value : NULL);
6879 }
6880 }
6881 break;
6882
6883 case GIMPLE_GOTO:
6884 {
6885 tree lab = gimple_goto_dest (stmt);
6886 if (TREE_CODE (lab) != LABEL_DECL)
6887 break;
6888
6889 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
6890 diagnose_sb_0 (gsi_p, context, n ? (gimple) n->value : NULL);
6891 }
6892 break;
6893
6894 case GIMPLE_SWITCH:
6895 {
6896 unsigned int i;
6897 for (i = 0; i < gimple_switch_num_labels (stmt); ++i)
6898 {
6899 tree lab = CASE_LABEL (gimple_switch_label (stmt, i));
6900 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
6901 if (n && diagnose_sb_0 (gsi_p, context, (gimple) n->value))
6902 break;
6903 }
6904 }
6905 break;
6906
6907 case GIMPLE_RETURN:
6908 diagnose_sb_0 (gsi_p, context, NULL);
6909 break;
6910
6911 default:
6912 break;
6913 }
6914
6915 return NULL_TREE;
6916 }
6917
6918 static unsigned int
6919 diagnose_omp_structured_block_errors (void)
6920 {
6921 struct walk_stmt_info wi;
6922 gimple_seq body = gimple_body (current_function_decl);
6923
6924 all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
6925
6926 memset (&wi, 0, sizeof (wi));
6927 walk_gimple_seq (body, diagnose_sb_1, NULL, &wi);
6928
6929 memset (&wi, 0, sizeof (wi));
6930 wi.want_locations = true;
6931 walk_gimple_seq (body, diagnose_sb_2, NULL, &wi);
6932
6933 splay_tree_delete (all_labels);
6934 all_labels = NULL;
6935
6936 return 0;
6937 }
6938
6939 static bool
6940 gate_diagnose_omp_blocks (void)
6941 {
6942 return flag_openmp != 0;
6943 }
6944
6945 struct gimple_opt_pass pass_diagnose_omp_blocks =
6946 {
6947 {
6948 GIMPLE_PASS,
6949 "*diagnose_omp_blocks", /* name */
6950 gate_diagnose_omp_blocks, /* gate */
6951 diagnose_omp_structured_block_errors, /* execute */
6952 NULL, /* sub */
6953 NULL, /* next */
6954 0, /* static_pass_number */
6955 TV_NONE, /* tv_id */
6956 PROP_gimple_any, /* properties_required */
6957 0, /* properties_provided */
6958 0, /* properties_destroyed */
6959 0, /* todo_flags_start */
6960 0, /* todo_flags_finish */
6961 }
6962 };
6963
6964 #include "gt-omp-low.h"