tree.c (valid_constant_size_p): New function.
[gcc.git] / gcc / omp-low.c
1 /* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
5
6 Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
7 Free Software Foundation, Inc.
8
9 This file is part of GCC.
10
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
14 version.
15
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
24
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "tree.h"
30 #include "rtl.h"
31 #include "gimple.h"
32 #include "tree-iterator.h"
33 #include "tree-inline.h"
34 #include "langhooks.h"
35 #include "diagnostic-core.h"
36 #include "tree-flow.h"
37 #include "timevar.h"
38 #include "flags.h"
39 #include "function.h"
40 #include "expr.h"
41 #include "tree-pass.h"
42 #include "ggc.h"
43 #include "except.h"
44 #include "splay-tree.h"
45 #include "optabs.h"
46 #include "cfgloop.h"
47
48
49 /* Lowering of OpenMP parallel and workshare constructs proceeds in two
50 phases. The first phase scans the function looking for OMP statements
51 and then for variables that must be replaced to satisfy data sharing
52 clauses. The second phase expands code for the constructs, as well as
53 re-gimplifying things when variables have been replaced with complex
54 expressions.
55
56 Final code generation is done by pass_expand_omp. The flowgraph is
57 scanned for parallel regions which are then moved to a new
58 function, to be invoked by the thread library. */
59
60 /* Context structure. Used to store information about each parallel
61 directive in the code. */
62
63 typedef struct omp_context
64 {
65 /* This field must be at the beginning, as we do "inheritance": Some
66 callback functions for tree-inline.c (e.g., omp_copy_decl)
67 receive a copy_body_data pointer that is up-casted to an
68 omp_context pointer. */
69 copy_body_data cb;
70
71 /* The tree of contexts corresponding to the encountered constructs. */
72 struct omp_context *outer;
73 gimple stmt;
74
75 /* Map variables to fields in a structure that allows communication
76 between sending and receiving threads. */
77 splay_tree field_map;
78 tree record_type;
79 tree sender_decl;
80 tree receiver_decl;
81
82 /* These are used just by task contexts, if task firstprivate fn is
83 needed. srecord_type is used to communicate from the thread
84 that encountered the task construct to task firstprivate fn,
85 record_type is allocated by GOMP_task, initialized by task firstprivate
86 fn and passed to the task body fn. */
87 splay_tree sfield_map;
88 tree srecord_type;
89
90 /* A chain of variables to add to the top-level block surrounding the
91 construct. In the case of a parallel, this is in the child function. */
92 tree block_vars;
93
94 /* What to do with variables with implicitly determined sharing
95 attributes. */
96 enum omp_clause_default_kind default_kind;
97
98 /* Nesting depth of this context. Used to beautify error messages re
99 invalid gotos. The outermost ctx is depth 1, with depth 0 being
100 reserved for the main body of the function. */
101 int depth;
102
103 /* True if this parallel directive is nested within another. */
104 bool is_nested;
105 } omp_context;
106
107
108 struct omp_for_data_loop
109 {
110 tree v, n1, n2, step;
111 enum tree_code cond_code;
112 };
113
114 /* A structure describing the main elements of a parallel loop. */
115
116 struct omp_for_data
117 {
118 struct omp_for_data_loop loop;
119 tree chunk_size;
120 gimple for_stmt;
121 tree pre, iter_type;
122 int collapse;
123 bool have_nowait, have_ordered;
124 enum omp_clause_schedule_kind sched_kind;
125 struct omp_for_data_loop *loops;
126 };
127
128
129 static splay_tree all_contexts;
130 static int taskreg_nesting_level;
131 struct omp_region *root_omp_region;
132 static bitmap task_shared_vars;
133
134 static void scan_omp (gimple_seq, omp_context *);
135 static tree scan_omp_1_op (tree *, int *, void *);
136
137 #define WALK_SUBSTMTS \
138 case GIMPLE_BIND: \
139 case GIMPLE_TRY: \
140 case GIMPLE_CATCH: \
141 case GIMPLE_EH_FILTER: \
142 case GIMPLE_TRANSACTION: \
143 /* The sub-statements for these should be walked. */ \
144 *handled_ops_p = false; \
145 break;
146
147 /* Convenience function for calling scan_omp_1_op on tree operands. */
148
149 static inline tree
150 scan_omp_op (tree *tp, omp_context *ctx)
151 {
152 struct walk_stmt_info wi;
153
154 memset (&wi, 0, sizeof (wi));
155 wi.info = ctx;
156 wi.want_locations = true;
157
158 return walk_tree (tp, scan_omp_1_op, &wi, NULL);
159 }
160
161 static void lower_omp (gimple_seq, omp_context *);
162 static tree lookup_decl_in_outer_ctx (tree, omp_context *);
163 static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
164
165 /* Find an OpenMP clause of type KIND within CLAUSES. */
166
167 tree
168 find_omp_clause (tree clauses, enum omp_clause_code kind)
169 {
170 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
171 if (OMP_CLAUSE_CODE (clauses) == kind)
172 return clauses;
173
174 return NULL_TREE;
175 }
176
177 /* Return true if CTX is for an omp parallel. */
178
179 static inline bool
180 is_parallel_ctx (omp_context *ctx)
181 {
182 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL;
183 }
184
185
186 /* Return true if CTX is for an omp task. */
187
188 static inline bool
189 is_task_ctx (omp_context *ctx)
190 {
191 return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
192 }
193
194
195 /* Return true if CTX is for an omp parallel or omp task. */
196
197 static inline bool
198 is_taskreg_ctx (omp_context *ctx)
199 {
200 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL
201 || gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
202 }
203
204
205 /* Return true if REGION is a combined parallel+workshare region. */
206
207 static inline bool
208 is_combined_parallel (struct omp_region *region)
209 {
210 return region->is_combined_parallel;
211 }
212
213
214 /* Extract the header elements of parallel loop FOR_STMT and store
215 them into *FD. */
216
217 static void
218 extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
219 struct omp_for_data_loop *loops)
220 {
221 tree t, var, *collapse_iter, *collapse_count;
222 tree count = NULL_TREE, iter_type = long_integer_type_node;
223 struct omp_for_data_loop *loop;
224 int i;
225 struct omp_for_data_loop dummy_loop;
226 location_t loc = gimple_location (for_stmt);
227
228 fd->for_stmt = for_stmt;
229 fd->pre = NULL;
230 fd->collapse = gimple_omp_for_collapse (for_stmt);
231 if (fd->collapse > 1)
232 fd->loops = loops;
233 else
234 fd->loops = &fd->loop;
235
236 fd->have_nowait = fd->have_ordered = false;
237 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
238 fd->chunk_size = NULL_TREE;
239 collapse_iter = NULL;
240 collapse_count = NULL;
241
242 for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
243 switch (OMP_CLAUSE_CODE (t))
244 {
245 case OMP_CLAUSE_NOWAIT:
246 fd->have_nowait = true;
247 break;
248 case OMP_CLAUSE_ORDERED:
249 fd->have_ordered = true;
250 break;
251 case OMP_CLAUSE_SCHEDULE:
252 fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
253 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
254 break;
255 case OMP_CLAUSE_COLLAPSE:
256 if (fd->collapse > 1)
257 {
258 collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
259 collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
260 }
261 default:
262 break;
263 }
264
265 /* FIXME: for now map schedule(auto) to schedule(static).
266 There should be analysis to determine whether all iterations
267 are approximately the same amount of work (then schedule(static)
268 is best) or if it varies (then schedule(dynamic,N) is better). */
269 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
270 {
271 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
272 gcc_assert (fd->chunk_size == NULL);
273 }
274 gcc_assert (fd->collapse == 1 || collapse_iter != NULL);
275 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
276 gcc_assert (fd->chunk_size == NULL);
277 else if (fd->chunk_size == NULL)
278 {
279 /* We only need to compute a default chunk size for ordered
280 static loops and dynamic loops. */
281 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
282 || fd->have_ordered
283 || fd->collapse > 1)
284 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
285 ? integer_zero_node : integer_one_node;
286 }
287
288 for (i = 0; i < fd->collapse; i++)
289 {
290 if (fd->collapse == 1)
291 loop = &fd->loop;
292 else if (loops != NULL)
293 loop = loops + i;
294 else
295 loop = &dummy_loop;
296
297
298 loop->v = gimple_omp_for_index (for_stmt, i);
299 gcc_assert (SSA_VAR_P (loop->v));
300 gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
301 || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
302 var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
303 loop->n1 = gimple_omp_for_initial (for_stmt, i);
304
305 loop->cond_code = gimple_omp_for_cond (for_stmt, i);
306 loop->n2 = gimple_omp_for_final (for_stmt, i);
307 switch (loop->cond_code)
308 {
309 case LT_EXPR:
310 case GT_EXPR:
311 break;
312 case LE_EXPR:
313 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
314 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, 1);
315 else
316 loop->n2 = fold_build2_loc (loc,
317 PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
318 build_int_cst (TREE_TYPE (loop->n2), 1));
319 loop->cond_code = LT_EXPR;
320 break;
321 case GE_EXPR:
322 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
323 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, -1);
324 else
325 loop->n2 = fold_build2_loc (loc,
326 MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
327 build_int_cst (TREE_TYPE (loop->n2), 1));
328 loop->cond_code = GT_EXPR;
329 break;
330 default:
331 gcc_unreachable ();
332 }
333
334 t = gimple_omp_for_incr (for_stmt, i);
335 gcc_assert (TREE_OPERAND (t, 0) == var);
336 switch (TREE_CODE (t))
337 {
338 case PLUS_EXPR:
339 loop->step = TREE_OPERAND (t, 1);
340 break;
341 case POINTER_PLUS_EXPR:
342 loop->step = fold_convert (ssizetype, TREE_OPERAND (t, 1));
343 break;
344 case MINUS_EXPR:
345 loop->step = TREE_OPERAND (t, 1);
346 loop->step = fold_build1_loc (loc,
347 NEGATE_EXPR, TREE_TYPE (loop->step),
348 loop->step);
349 break;
350 default:
351 gcc_unreachable ();
352 }
353
354 if (iter_type != long_long_unsigned_type_node)
355 {
356 if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
357 iter_type = long_long_unsigned_type_node;
358 else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
359 && TYPE_PRECISION (TREE_TYPE (loop->v))
360 >= TYPE_PRECISION (iter_type))
361 {
362 tree n;
363
364 if (loop->cond_code == LT_EXPR)
365 n = fold_build2_loc (loc,
366 PLUS_EXPR, TREE_TYPE (loop->v),
367 loop->n2, loop->step);
368 else
369 n = loop->n1;
370 if (TREE_CODE (n) != INTEGER_CST
371 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
372 iter_type = long_long_unsigned_type_node;
373 }
374 else if (TYPE_PRECISION (TREE_TYPE (loop->v))
375 > TYPE_PRECISION (iter_type))
376 {
377 tree n1, n2;
378
379 if (loop->cond_code == LT_EXPR)
380 {
381 n1 = loop->n1;
382 n2 = fold_build2_loc (loc,
383 PLUS_EXPR, TREE_TYPE (loop->v),
384 loop->n2, loop->step);
385 }
386 else
387 {
388 n1 = fold_build2_loc (loc,
389 MINUS_EXPR, TREE_TYPE (loop->v),
390 loop->n2, loop->step);
391 n2 = loop->n1;
392 }
393 if (TREE_CODE (n1) != INTEGER_CST
394 || TREE_CODE (n2) != INTEGER_CST
395 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
396 || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
397 iter_type = long_long_unsigned_type_node;
398 }
399 }
400
401 if (collapse_count && *collapse_count == NULL)
402 {
403 if ((i == 0 || count != NULL_TREE)
404 && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
405 && TREE_CONSTANT (loop->n1)
406 && TREE_CONSTANT (loop->n2)
407 && TREE_CODE (loop->step) == INTEGER_CST)
408 {
409 tree itype = TREE_TYPE (loop->v);
410
411 if (POINTER_TYPE_P (itype))
412 itype = signed_type_for (itype);
413 t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
414 t = fold_build2_loc (loc,
415 PLUS_EXPR, itype,
416 fold_convert_loc (loc, itype, loop->step), t);
417 t = fold_build2_loc (loc, PLUS_EXPR, itype, t,
418 fold_convert_loc (loc, itype, loop->n2));
419 t = fold_build2_loc (loc, MINUS_EXPR, itype, t,
420 fold_convert_loc (loc, itype, loop->n1));
421 if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
422 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype,
423 fold_build1_loc (loc, NEGATE_EXPR, itype, t),
424 fold_build1_loc (loc, NEGATE_EXPR, itype,
425 fold_convert_loc (loc, itype,
426 loop->step)));
427 else
428 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, t,
429 fold_convert_loc (loc, itype, loop->step));
430 t = fold_convert_loc (loc, long_long_unsigned_type_node, t);
431 if (count != NULL_TREE)
432 count = fold_build2_loc (loc,
433 MULT_EXPR, long_long_unsigned_type_node,
434 count, t);
435 else
436 count = t;
437 if (TREE_CODE (count) != INTEGER_CST)
438 count = NULL_TREE;
439 }
440 else
441 count = NULL_TREE;
442 }
443 }
444
445 if (count)
446 {
447 if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
448 iter_type = long_long_unsigned_type_node;
449 else
450 iter_type = long_integer_type_node;
451 }
452 else if (collapse_iter && *collapse_iter != NULL)
453 iter_type = TREE_TYPE (*collapse_iter);
454 fd->iter_type = iter_type;
455 if (collapse_iter && *collapse_iter == NULL)
456 *collapse_iter = create_tmp_var (iter_type, ".iter");
457 if (collapse_count && *collapse_count == NULL)
458 {
459 if (count)
460 *collapse_count = fold_convert_loc (loc, iter_type, count);
461 else
462 *collapse_count = create_tmp_var (iter_type, ".count");
463 }
464
465 if (fd->collapse > 1)
466 {
467 fd->loop.v = *collapse_iter;
468 fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
469 fd->loop.n2 = *collapse_count;
470 fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
471 fd->loop.cond_code = LT_EXPR;
472 }
473 }
474
475
476 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
477 is the immediate dominator of PAR_ENTRY_BB, return true if there
478 are no data dependencies that would prevent expanding the parallel
479 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
480
481 When expanding a combined parallel+workshare region, the call to
482 the child function may need additional arguments in the case of
483 GIMPLE_OMP_FOR regions. In some cases, these arguments are
484 computed out of variables passed in from the parent to the child
485 via 'struct .omp_data_s'. For instance:
486
487 #pragma omp parallel for schedule (guided, i * 4)
488 for (j ...)
489
490 Is lowered into:
491
492 # BLOCK 2 (PAR_ENTRY_BB)
493 .omp_data_o.i = i;
494 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
495
496 # BLOCK 3 (WS_ENTRY_BB)
497 .omp_data_i = &.omp_data_o;
498 D.1667 = .omp_data_i->i;
499 D.1598 = D.1667 * 4;
500 #pragma omp for schedule (guided, D.1598)
501
502 When we outline the parallel region, the call to the child function
503 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
504 that value is computed *after* the call site. So, in principle we
505 cannot do the transformation.
506
507 To see whether the code in WS_ENTRY_BB blocks the combined
508 parallel+workshare call, we collect all the variables used in the
509 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
510 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
511 call.
512
513 FIXME. If we had the SSA form built at this point, we could merely
514 hoist the code in block 3 into block 2 and be done with it. But at
515 this point we don't have dataflow information and though we could
516 hack something up here, it is really not worth the aggravation. */
517
518 static bool
519 workshare_safe_to_combine_p (basic_block ws_entry_bb)
520 {
521 struct omp_for_data fd;
522 gimple ws_stmt = last_stmt (ws_entry_bb);
523
524 if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
525 return true;
526
527 gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR);
528
529 extract_omp_for_data (ws_stmt, &fd, NULL);
530
531 if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
532 return false;
533 if (fd.iter_type != long_integer_type_node)
534 return false;
535
536 /* FIXME. We give up too easily here. If any of these arguments
537 are not constants, they will likely involve variables that have
538 been mapped into fields of .omp_data_s for sharing with the child
539 function. With appropriate data flow, it would be possible to
540 see through this. */
541 if (!is_gimple_min_invariant (fd.loop.n1)
542 || !is_gimple_min_invariant (fd.loop.n2)
543 || !is_gimple_min_invariant (fd.loop.step)
544 || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
545 return false;
546
547 return true;
548 }
549
550
551 /* Collect additional arguments needed to emit a combined
552 parallel+workshare call. WS_STMT is the workshare directive being
553 expanded. */
554
555 static VEC(tree,gc) *
556 get_ws_args_for (gimple ws_stmt)
557 {
558 tree t;
559 location_t loc = gimple_location (ws_stmt);
560 VEC(tree,gc) *ws_args;
561
562 if (gimple_code (ws_stmt) == GIMPLE_OMP_FOR)
563 {
564 struct omp_for_data fd;
565
566 extract_omp_for_data (ws_stmt, &fd, NULL);
567
568 ws_args = VEC_alloc (tree, gc, 3 + (fd.chunk_size != 0));
569
570 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n1);
571 VEC_quick_push (tree, ws_args, t);
572
573 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n2);
574 VEC_quick_push (tree, ws_args, t);
575
576 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.step);
577 VEC_quick_push (tree, ws_args, t);
578
579 if (fd.chunk_size)
580 {
581 t = fold_convert_loc (loc, long_integer_type_node, fd.chunk_size);
582 VEC_quick_push (tree, ws_args, t);
583 }
584
585 return ws_args;
586 }
587 else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
588 {
589 /* Number of sections is equal to the number of edges from the
590 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
591 the exit of the sections region. */
592 basic_block bb = single_succ (gimple_bb (ws_stmt));
593 t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
594 ws_args = VEC_alloc (tree, gc, 1);
595 VEC_quick_push (tree, ws_args, t);
596 return ws_args;
597 }
598
599 gcc_unreachable ();
600 }
601
602
603 /* Discover whether REGION is a combined parallel+workshare region. */
604
605 static void
606 determine_parallel_type (struct omp_region *region)
607 {
608 basic_block par_entry_bb, par_exit_bb;
609 basic_block ws_entry_bb, ws_exit_bb;
610
611 if (region == NULL || region->inner == NULL
612 || region->exit == NULL || region->inner->exit == NULL
613 || region->inner->cont == NULL)
614 return;
615
616 /* We only support parallel+for and parallel+sections. */
617 if (region->type != GIMPLE_OMP_PARALLEL
618 || (region->inner->type != GIMPLE_OMP_FOR
619 && region->inner->type != GIMPLE_OMP_SECTIONS))
620 return;
621
622 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
623 WS_EXIT_BB -> PAR_EXIT_BB. */
624 par_entry_bb = region->entry;
625 par_exit_bb = region->exit;
626 ws_entry_bb = region->inner->entry;
627 ws_exit_bb = region->inner->exit;
628
629 if (single_succ (par_entry_bb) == ws_entry_bb
630 && single_succ (ws_exit_bb) == par_exit_bb
631 && workshare_safe_to_combine_p (ws_entry_bb)
632 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb))
633 || (last_and_only_stmt (ws_entry_bb)
634 && last_and_only_stmt (par_exit_bb))))
635 {
636 gimple ws_stmt = last_stmt (ws_entry_bb);
637
638 if (region->inner->type == GIMPLE_OMP_FOR)
639 {
640 /* If this is a combined parallel loop, we need to determine
641 whether or not to use the combined library calls. There
642 are two cases where we do not apply the transformation:
643 static loops and any kind of ordered loop. In the first
644 case, we already open code the loop so there is no need
645 to do anything else. In the latter case, the combined
646 parallel loop call would still need extra synchronization
647 to implement ordered semantics, so there would not be any
648 gain in using the combined call. */
649 tree clauses = gimple_omp_for_clauses (ws_stmt);
650 tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
651 if (c == NULL
652 || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC
653 || find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
654 {
655 region->is_combined_parallel = false;
656 region->inner->is_combined_parallel = false;
657 return;
658 }
659 }
660
661 region->is_combined_parallel = true;
662 region->inner->is_combined_parallel = true;
663 region->ws_args = get_ws_args_for (ws_stmt);
664 }
665 }
666
667
668 /* Return true if EXPR is variable sized. */
669
670 static inline bool
671 is_variable_sized (const_tree expr)
672 {
673 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
674 }
675
676 /* Return true if DECL is a reference type. */
677
678 static inline bool
679 is_reference (tree decl)
680 {
681 return lang_hooks.decls.omp_privatize_by_reference (decl);
682 }
683
684 /* Lookup variables in the decl or field splay trees. The "maybe" form
685 allows for the variable form to not have been entered, otherwise we
686 assert that the variable must have been entered. */
687
688 static inline tree
689 lookup_decl (tree var, omp_context *ctx)
690 {
691 tree *n;
692 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
693 return *n;
694 }
695
696 static inline tree
697 maybe_lookup_decl (const_tree var, omp_context *ctx)
698 {
699 tree *n;
700 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
701 return n ? *n : NULL_TREE;
702 }
703
704 static inline tree
705 lookup_field (tree var, omp_context *ctx)
706 {
707 splay_tree_node n;
708 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
709 return (tree) n->value;
710 }
711
712 static inline tree
713 lookup_sfield (tree var, omp_context *ctx)
714 {
715 splay_tree_node n;
716 n = splay_tree_lookup (ctx->sfield_map
717 ? ctx->sfield_map : ctx->field_map,
718 (splay_tree_key) var);
719 return (tree) n->value;
720 }
721
722 static inline tree
723 maybe_lookup_field (tree var, omp_context *ctx)
724 {
725 splay_tree_node n;
726 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
727 return n ? (tree) n->value : NULL_TREE;
728 }
729
730 /* Return true if DECL should be copied by pointer. SHARED_CTX is
731 the parallel context if DECL is to be shared. */
732
733 static bool
734 use_pointer_for_field (tree decl, omp_context *shared_ctx)
735 {
736 if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
737 return true;
738
739 /* We can only use copy-in/copy-out semantics for shared variables
740 when we know the value is not accessible from an outer scope. */
741 if (shared_ctx)
742 {
743 /* ??? Trivially accessible from anywhere. But why would we even
744 be passing an address in this case? Should we simply assert
745 this to be false, or should we have a cleanup pass that removes
746 these from the list of mappings? */
747 if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
748 return true;
749
750 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
751 without analyzing the expression whether or not its location
752 is accessible to anyone else. In the case of nested parallel
753 regions it certainly may be. */
754 if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
755 return true;
756
757 /* Do not use copy-in/copy-out for variables that have their
758 address taken. */
759 if (TREE_ADDRESSABLE (decl))
760 return true;
761
762 /* Disallow copy-in/out in nested parallel if
763 decl is shared in outer parallel, otherwise
764 each thread could store the shared variable
765 in its own copy-in location, making the
766 variable no longer really shared. */
767 if (!TREE_READONLY (decl) && shared_ctx->is_nested)
768 {
769 omp_context *up;
770
771 for (up = shared_ctx->outer; up; up = up->outer)
772 if (is_taskreg_ctx (up) && maybe_lookup_decl (decl, up))
773 break;
774
775 if (up)
776 {
777 tree c;
778
779 for (c = gimple_omp_taskreg_clauses (up->stmt);
780 c; c = OMP_CLAUSE_CHAIN (c))
781 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
782 && OMP_CLAUSE_DECL (c) == decl)
783 break;
784
785 if (c)
786 goto maybe_mark_addressable_and_ret;
787 }
788 }
789
790 /* For tasks avoid using copy-in/out, unless they are readonly
791 (in which case just copy-in is used). As tasks can be
792 deferred or executed in different thread, when GOMP_task
793 returns, the task hasn't necessarily terminated. */
794 if (!TREE_READONLY (decl) && is_task_ctx (shared_ctx))
795 {
796 tree outer;
797 maybe_mark_addressable_and_ret:
798 outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
799 if (is_gimple_reg (outer))
800 {
801 /* Taking address of OUTER in lower_send_shared_vars
802 might need regimplification of everything that uses the
803 variable. */
804 if (!task_shared_vars)
805 task_shared_vars = BITMAP_ALLOC (NULL);
806 bitmap_set_bit (task_shared_vars, DECL_UID (outer));
807 TREE_ADDRESSABLE (outer) = 1;
808 }
809 return true;
810 }
811 }
812
813 return false;
814 }
815
816 /* Create a new VAR_DECL and copy information from VAR to it. */
817
818 tree
819 copy_var_decl (tree var, tree name, tree type)
820 {
821 tree copy = build_decl (DECL_SOURCE_LOCATION (var), VAR_DECL, name, type);
822
823 TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (var);
824 TREE_THIS_VOLATILE (copy) = TREE_THIS_VOLATILE (var);
825 DECL_GIMPLE_REG_P (copy) = DECL_GIMPLE_REG_P (var);
826 DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (var);
827 DECL_IGNORED_P (copy) = DECL_IGNORED_P (var);
828 DECL_CONTEXT (copy) = DECL_CONTEXT (var);
829 TREE_USED (copy) = 1;
830 DECL_SEEN_IN_BIND_EXPR_P (copy) = 1;
831
832 return copy;
833 }
834
835 /* Construct a new automatic decl similar to VAR. */
836
837 static tree
838 omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
839 {
840 tree copy = copy_var_decl (var, name, type);
841
842 DECL_CONTEXT (copy) = current_function_decl;
843 DECL_CHAIN (copy) = ctx->block_vars;
844 ctx->block_vars = copy;
845
846 return copy;
847 }
848
849 static tree
850 omp_copy_decl_1 (tree var, omp_context *ctx)
851 {
852 return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
853 }
854
855 /* Build tree nodes to access the field for VAR on the receiver side. */
856
857 static tree
858 build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
859 {
860 tree x, field = lookup_field (var, ctx);
861
862 /* If the receiver record type was remapped in the child function,
863 remap the field into the new record type. */
864 x = maybe_lookup_field (field, ctx);
865 if (x != NULL)
866 field = x;
867
868 x = build_simple_mem_ref (ctx->receiver_decl);
869 x = build3 (COMPONENT_REF, TREE_TYPE (field), x, field, NULL);
870 if (by_ref)
871 x = build_simple_mem_ref (x);
872
873 return x;
874 }
875
876 /* Build tree nodes to access VAR in the scope outer to CTX. In the case
877 of a parallel, this is a component reference; for workshare constructs
878 this is some variable. */
879
880 static tree
881 build_outer_var_ref (tree var, omp_context *ctx)
882 {
883 tree x;
884
885 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
886 x = var;
887 else if (is_variable_sized (var))
888 {
889 x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
890 x = build_outer_var_ref (x, ctx);
891 x = build_simple_mem_ref (x);
892 }
893 else if (is_taskreg_ctx (ctx))
894 {
895 bool by_ref = use_pointer_for_field (var, NULL);
896 x = build_receiver_ref (var, by_ref, ctx);
897 }
898 else if (ctx->outer)
899 x = lookup_decl (var, ctx->outer);
900 else if (is_reference (var))
901 /* This can happen with orphaned constructs. If var is reference, it is
902 possible it is shared and as such valid. */
903 x = var;
904 else
905 gcc_unreachable ();
906
907 if (is_reference (var))
908 x = build_simple_mem_ref (x);
909
910 return x;
911 }
912
913 /* Build tree nodes to access the field for VAR on the sender side. */
914
915 static tree
916 build_sender_ref (tree var, omp_context *ctx)
917 {
918 tree field = lookup_sfield (var, ctx);
919 return build3 (COMPONENT_REF, TREE_TYPE (field),
920 ctx->sender_decl, field, NULL);
921 }
922
923 /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
924
925 static void
926 install_var_field (tree var, bool by_ref, int mask, omp_context *ctx)
927 {
928 tree field, type, sfield = NULL_TREE;
929
930 gcc_assert ((mask & 1) == 0
931 || !splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
932 gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
933 || !splay_tree_lookup (ctx->sfield_map, (splay_tree_key) var));
934
935 type = TREE_TYPE (var);
936 if (by_ref)
937 type = build_pointer_type (type);
938 else if ((mask & 3) == 1 && is_reference (var))
939 type = TREE_TYPE (type);
940
941 field = build_decl (DECL_SOURCE_LOCATION (var),
942 FIELD_DECL, DECL_NAME (var), type);
943
944 /* Remember what variable this field was created for. This does have a
945 side effect of making dwarf2out ignore this member, so for helpful
946 debugging we clear it later in delete_omp_context. */
947 DECL_ABSTRACT_ORIGIN (field) = var;
948 if (type == TREE_TYPE (var))
949 {
950 DECL_ALIGN (field) = DECL_ALIGN (var);
951 DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
952 TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
953 }
954 else
955 DECL_ALIGN (field) = TYPE_ALIGN (type);
956
957 if ((mask & 3) == 3)
958 {
959 insert_field_into_struct (ctx->record_type, field);
960 if (ctx->srecord_type)
961 {
962 sfield = build_decl (DECL_SOURCE_LOCATION (var),
963 FIELD_DECL, DECL_NAME (var), type);
964 DECL_ABSTRACT_ORIGIN (sfield) = var;
965 DECL_ALIGN (sfield) = DECL_ALIGN (field);
966 DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
967 TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
968 insert_field_into_struct (ctx->srecord_type, sfield);
969 }
970 }
971 else
972 {
973 if (ctx->srecord_type == NULL_TREE)
974 {
975 tree t;
976
977 ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
978 ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
979 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
980 {
981 sfield = build_decl (DECL_SOURCE_LOCATION (var),
982 FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
983 DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
984 insert_field_into_struct (ctx->srecord_type, sfield);
985 splay_tree_insert (ctx->sfield_map,
986 (splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
987 (splay_tree_value) sfield);
988 }
989 }
990 sfield = field;
991 insert_field_into_struct ((mask & 1) ? ctx->record_type
992 : ctx->srecord_type, field);
993 }
994
995 if (mask & 1)
996 splay_tree_insert (ctx->field_map, (splay_tree_key) var,
997 (splay_tree_value) field);
998 if ((mask & 2) && ctx->sfield_map)
999 splay_tree_insert (ctx->sfield_map, (splay_tree_key) var,
1000 (splay_tree_value) sfield);
1001 }
1002
1003 static tree
1004 install_var_local (tree var, omp_context *ctx)
1005 {
1006 tree new_var = omp_copy_decl_1 (var, ctx);
1007 insert_decl_map (&ctx->cb, var, new_var);
1008 return new_var;
1009 }
1010
1011 /* Adjust the replacement for DECL in CTX for the new context. This means
1012 copying the DECL_VALUE_EXPR, and fixing up the type. */
1013
1014 static void
1015 fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
1016 {
1017 tree new_decl, size;
1018
1019 new_decl = lookup_decl (decl, ctx);
1020
1021 TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
1022
1023 if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
1024 && DECL_HAS_VALUE_EXPR_P (decl))
1025 {
1026 tree ve = DECL_VALUE_EXPR (decl);
1027 walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL);
1028 SET_DECL_VALUE_EXPR (new_decl, ve);
1029 DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
1030 }
1031
1032 if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
1033 {
1034 size = remap_decl (DECL_SIZE (decl), &ctx->cb);
1035 if (size == error_mark_node)
1036 size = TYPE_SIZE (TREE_TYPE (new_decl));
1037 DECL_SIZE (new_decl) = size;
1038
1039 size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
1040 if (size == error_mark_node)
1041 size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
1042 DECL_SIZE_UNIT (new_decl) = size;
1043 }
1044 }
1045
1046 /* The callback for remap_decl. Search all containing contexts for a
1047 mapping of the variable; this avoids having to duplicate the splay
1048 tree ahead of time. We know a mapping doesn't already exist in the
1049 given context. Create new mappings to implement default semantics. */
1050
1051 static tree
1052 omp_copy_decl (tree var, copy_body_data *cb)
1053 {
1054 omp_context *ctx = (omp_context *) cb;
1055 tree new_var;
1056
1057 if (TREE_CODE (var) == LABEL_DECL)
1058 {
1059 new_var = create_artificial_label (DECL_SOURCE_LOCATION (var));
1060 DECL_CONTEXT (new_var) = current_function_decl;
1061 insert_decl_map (&ctx->cb, var, new_var);
1062 return new_var;
1063 }
1064
1065 while (!is_taskreg_ctx (ctx))
1066 {
1067 ctx = ctx->outer;
1068 if (ctx == NULL)
1069 return var;
1070 new_var = maybe_lookup_decl (var, ctx);
1071 if (new_var)
1072 return new_var;
1073 }
1074
1075 if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
1076 return var;
1077
1078 return error_mark_node;
1079 }
1080
1081
1082 /* Return the parallel region associated with STMT. */
1083
1084 /* Debugging dumps for parallel regions. */
1085 void dump_omp_region (FILE *, struct omp_region *, int);
1086 void debug_omp_region (struct omp_region *);
1087 void debug_all_omp_regions (void);
1088
1089 /* Dump the parallel region tree rooted at REGION. */
1090
1091 void
1092 dump_omp_region (FILE *file, struct omp_region *region, int indent)
1093 {
1094 fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
1095 gimple_code_name[region->type]);
1096
1097 if (region->inner)
1098 dump_omp_region (file, region->inner, indent + 4);
1099
1100 if (region->cont)
1101 {
1102 fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
1103 region->cont->index);
1104 }
1105
1106 if (region->exit)
1107 fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
1108 region->exit->index);
1109 else
1110 fprintf (file, "%*s[no exit marker]\n", indent, "");
1111
1112 if (region->next)
1113 dump_omp_region (file, region->next, indent);
1114 }
1115
1116 DEBUG_FUNCTION void
1117 debug_omp_region (struct omp_region *region)
1118 {
1119 dump_omp_region (stderr, region, 0);
1120 }
1121
1122 DEBUG_FUNCTION void
1123 debug_all_omp_regions (void)
1124 {
1125 dump_omp_region (stderr, root_omp_region, 0);
1126 }
1127
1128
1129 /* Create a new parallel region starting at STMT inside region PARENT. */
1130
1131 struct omp_region *
1132 new_omp_region (basic_block bb, enum gimple_code type,
1133 struct omp_region *parent)
1134 {
1135 struct omp_region *region = XCNEW (struct omp_region);
1136
1137 region->outer = parent;
1138 region->entry = bb;
1139 region->type = type;
1140
1141 if (parent)
1142 {
1143 /* This is a nested region. Add it to the list of inner
1144 regions in PARENT. */
1145 region->next = parent->inner;
1146 parent->inner = region;
1147 }
1148 else
1149 {
1150 /* This is a toplevel region. Add it to the list of toplevel
1151 regions in ROOT_OMP_REGION. */
1152 region->next = root_omp_region;
1153 root_omp_region = region;
1154 }
1155
1156 return region;
1157 }
1158
1159 /* Release the memory associated with the region tree rooted at REGION. */
1160
1161 static void
1162 free_omp_region_1 (struct omp_region *region)
1163 {
1164 struct omp_region *i, *n;
1165
1166 for (i = region->inner; i ; i = n)
1167 {
1168 n = i->next;
1169 free_omp_region_1 (i);
1170 }
1171
1172 free (region);
1173 }
1174
1175 /* Release the memory for the entire omp region tree. */
1176
1177 void
1178 free_omp_regions (void)
1179 {
1180 struct omp_region *r, *n;
1181 for (r = root_omp_region; r ; r = n)
1182 {
1183 n = r->next;
1184 free_omp_region_1 (r);
1185 }
1186 root_omp_region = NULL;
1187 }
1188
1189
1190 /* Create a new context, with OUTER_CTX being the surrounding context. */
1191
1192 static omp_context *
1193 new_omp_context (gimple stmt, omp_context *outer_ctx)
1194 {
1195 omp_context *ctx = XCNEW (omp_context);
1196
1197 splay_tree_insert (all_contexts, (splay_tree_key) stmt,
1198 (splay_tree_value) ctx);
1199 ctx->stmt = stmt;
1200
1201 if (outer_ctx)
1202 {
1203 ctx->outer = outer_ctx;
1204 ctx->cb = outer_ctx->cb;
1205 ctx->cb.block = NULL;
1206 ctx->depth = outer_ctx->depth + 1;
1207 }
1208 else
1209 {
1210 ctx->cb.src_fn = current_function_decl;
1211 ctx->cb.dst_fn = current_function_decl;
1212 ctx->cb.src_node = cgraph_get_node (current_function_decl);
1213 gcc_checking_assert (ctx->cb.src_node);
1214 ctx->cb.dst_node = ctx->cb.src_node;
1215 ctx->cb.src_cfun = cfun;
1216 ctx->cb.copy_decl = omp_copy_decl;
1217 ctx->cb.eh_lp_nr = 0;
1218 ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
1219 ctx->depth = 1;
1220 }
1221
1222 ctx->cb.decl_map = pointer_map_create ();
1223
1224 return ctx;
1225 }
1226
1227 static gimple_seq maybe_catch_exception (gimple_seq);
1228
1229 /* Finalize task copyfn. */
1230
1231 static void
1232 finalize_task_copyfn (gimple task_stmt)
1233 {
1234 struct function *child_cfun;
1235 tree child_fn, old_fn;
1236 gimple_seq seq, new_seq;
1237 gimple bind;
1238
1239 child_fn = gimple_omp_task_copy_fn (task_stmt);
1240 if (child_fn == NULL_TREE)
1241 return;
1242
1243 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
1244
1245 /* Inform the callgraph about the new function. */
1246 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
1247 = cfun->curr_properties & ~PROP_loops;
1248
1249 old_fn = current_function_decl;
1250 push_cfun (child_cfun);
1251 current_function_decl = child_fn;
1252 bind = gimplify_body (child_fn, false);
1253 seq = gimple_seq_alloc ();
1254 gimple_seq_add_stmt (&seq, bind);
1255 new_seq = maybe_catch_exception (seq);
1256 if (new_seq != seq)
1257 {
1258 bind = gimple_build_bind (NULL, new_seq, NULL);
1259 seq = gimple_seq_alloc ();
1260 gimple_seq_add_stmt (&seq, bind);
1261 }
1262 gimple_set_body (child_fn, seq);
1263 pop_cfun ();
1264 current_function_decl = old_fn;
1265
1266 cgraph_add_new_function (child_fn, false);
1267 }
1268
1269 /* Destroy a omp_context data structures. Called through the splay tree
1270 value delete callback. */
1271
1272 static void
1273 delete_omp_context (splay_tree_value value)
1274 {
1275 omp_context *ctx = (omp_context *) value;
1276
1277 pointer_map_destroy (ctx->cb.decl_map);
1278
1279 if (ctx->field_map)
1280 splay_tree_delete (ctx->field_map);
1281 if (ctx->sfield_map)
1282 splay_tree_delete (ctx->sfield_map);
1283
1284 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1285 it produces corrupt debug information. */
1286 if (ctx->record_type)
1287 {
1288 tree t;
1289 for (t = TYPE_FIELDS (ctx->record_type); t ; t = DECL_CHAIN (t))
1290 DECL_ABSTRACT_ORIGIN (t) = NULL;
1291 }
1292 if (ctx->srecord_type)
1293 {
1294 tree t;
1295 for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = DECL_CHAIN (t))
1296 DECL_ABSTRACT_ORIGIN (t) = NULL;
1297 }
1298
1299 if (is_task_ctx (ctx))
1300 finalize_task_copyfn (ctx->stmt);
1301
1302 XDELETE (ctx);
1303 }
1304
1305 /* Fix up RECEIVER_DECL with a type that has been remapped to the child
1306 context. */
1307
1308 static void
1309 fixup_child_record_type (omp_context *ctx)
1310 {
1311 tree f, type = ctx->record_type;
1312
1313 /* ??? It isn't sufficient to just call remap_type here, because
1314 variably_modified_type_p doesn't work the way we expect for
1315 record types. Testing each field for whether it needs remapping
1316 and creating a new record by hand works, however. */
1317 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
1318 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
1319 break;
1320 if (f)
1321 {
1322 tree name, new_fields = NULL;
1323
1324 type = lang_hooks.types.make_type (RECORD_TYPE);
1325 name = DECL_NAME (TYPE_NAME (ctx->record_type));
1326 name = build_decl (DECL_SOURCE_LOCATION (ctx->receiver_decl),
1327 TYPE_DECL, name, type);
1328 TYPE_NAME (type) = name;
1329
1330 for (f = TYPE_FIELDS (ctx->record_type); f ; f = DECL_CHAIN (f))
1331 {
1332 tree new_f = copy_node (f);
1333 DECL_CONTEXT (new_f) = type;
1334 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
1335 DECL_CHAIN (new_f) = new_fields;
1336 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
1337 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
1338 &ctx->cb, NULL);
1339 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
1340 &ctx->cb, NULL);
1341 new_fields = new_f;
1342
1343 /* Arrange to be able to look up the receiver field
1344 given the sender field. */
1345 splay_tree_insert (ctx->field_map, (splay_tree_key) f,
1346 (splay_tree_value) new_f);
1347 }
1348 TYPE_FIELDS (type) = nreverse (new_fields);
1349 layout_type (type);
1350 }
1351
1352 TREE_TYPE (ctx->receiver_decl) = build_pointer_type (type);
1353 }
1354
1355 /* Instantiate decls as necessary in CTX to satisfy the data sharing
1356 specified by CLAUSES. */
1357
1358 static void
1359 scan_sharing_clauses (tree clauses, omp_context *ctx)
1360 {
1361 tree c, decl;
1362 bool scan_array_reductions = false;
1363
1364 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1365 {
1366 bool by_ref;
1367
1368 switch (OMP_CLAUSE_CODE (c))
1369 {
1370 case OMP_CLAUSE_PRIVATE:
1371 decl = OMP_CLAUSE_DECL (c);
1372 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
1373 goto do_private;
1374 else if (!is_variable_sized (decl))
1375 install_var_local (decl, ctx);
1376 break;
1377
1378 case OMP_CLAUSE_SHARED:
1379 gcc_assert (is_taskreg_ctx (ctx));
1380 decl = OMP_CLAUSE_DECL (c);
1381 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
1382 || !is_variable_sized (decl));
1383 /* Global variables don't need to be copied,
1384 the receiver side will use them directly. */
1385 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1386 break;
1387 by_ref = use_pointer_for_field (decl, ctx);
1388 if (! TREE_READONLY (decl)
1389 || TREE_ADDRESSABLE (decl)
1390 || by_ref
1391 || is_reference (decl))
1392 {
1393 install_var_field (decl, by_ref, 3, ctx);
1394 install_var_local (decl, ctx);
1395 break;
1396 }
1397 /* We don't need to copy const scalar vars back. */
1398 OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
1399 goto do_private;
1400
1401 case OMP_CLAUSE_LASTPRIVATE:
1402 /* Let the corresponding firstprivate clause create
1403 the variable. */
1404 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1405 break;
1406 /* FALLTHRU */
1407
1408 case OMP_CLAUSE_FIRSTPRIVATE:
1409 case OMP_CLAUSE_REDUCTION:
1410 decl = OMP_CLAUSE_DECL (c);
1411 do_private:
1412 if (is_variable_sized (decl))
1413 {
1414 if (is_task_ctx (ctx))
1415 install_var_field (decl, false, 1, ctx);
1416 break;
1417 }
1418 else if (is_taskreg_ctx (ctx))
1419 {
1420 bool global
1421 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
1422 by_ref = use_pointer_for_field (decl, NULL);
1423
1424 if (is_task_ctx (ctx)
1425 && (global || by_ref || is_reference (decl)))
1426 {
1427 install_var_field (decl, false, 1, ctx);
1428 if (!global)
1429 install_var_field (decl, by_ref, 2, ctx);
1430 }
1431 else if (!global)
1432 install_var_field (decl, by_ref, 3, ctx);
1433 }
1434 install_var_local (decl, ctx);
1435 break;
1436
1437 case OMP_CLAUSE_COPYPRIVATE:
1438 case OMP_CLAUSE_COPYIN:
1439 decl = OMP_CLAUSE_DECL (c);
1440 by_ref = use_pointer_for_field (decl, NULL);
1441 install_var_field (decl, by_ref, 3, ctx);
1442 break;
1443
1444 case OMP_CLAUSE_DEFAULT:
1445 ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
1446 break;
1447
1448 case OMP_CLAUSE_FINAL:
1449 case OMP_CLAUSE_IF:
1450 case OMP_CLAUSE_NUM_THREADS:
1451 case OMP_CLAUSE_SCHEDULE:
1452 if (ctx->outer)
1453 scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
1454 break;
1455
1456 case OMP_CLAUSE_NOWAIT:
1457 case OMP_CLAUSE_ORDERED:
1458 case OMP_CLAUSE_COLLAPSE:
1459 case OMP_CLAUSE_UNTIED:
1460 case OMP_CLAUSE_MERGEABLE:
1461 break;
1462
1463 default:
1464 gcc_unreachable ();
1465 }
1466 }
1467
1468 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1469 {
1470 switch (OMP_CLAUSE_CODE (c))
1471 {
1472 case OMP_CLAUSE_LASTPRIVATE:
1473 /* Let the corresponding firstprivate clause create
1474 the variable. */
1475 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1476 scan_array_reductions = true;
1477 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1478 break;
1479 /* FALLTHRU */
1480
1481 case OMP_CLAUSE_PRIVATE:
1482 case OMP_CLAUSE_FIRSTPRIVATE:
1483 case OMP_CLAUSE_REDUCTION:
1484 decl = OMP_CLAUSE_DECL (c);
1485 if (is_variable_sized (decl))
1486 install_var_local (decl, ctx);
1487 fixup_remapped_decl (decl, ctx,
1488 OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
1489 && OMP_CLAUSE_PRIVATE_DEBUG (c));
1490 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1491 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1492 scan_array_reductions = true;
1493 break;
1494
1495 case OMP_CLAUSE_SHARED:
1496 decl = OMP_CLAUSE_DECL (c);
1497 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1498 fixup_remapped_decl (decl, ctx, false);
1499 break;
1500
1501 case OMP_CLAUSE_COPYPRIVATE:
1502 case OMP_CLAUSE_COPYIN:
1503 case OMP_CLAUSE_DEFAULT:
1504 case OMP_CLAUSE_IF:
1505 case OMP_CLAUSE_NUM_THREADS:
1506 case OMP_CLAUSE_SCHEDULE:
1507 case OMP_CLAUSE_NOWAIT:
1508 case OMP_CLAUSE_ORDERED:
1509 case OMP_CLAUSE_COLLAPSE:
1510 case OMP_CLAUSE_UNTIED:
1511 case OMP_CLAUSE_FINAL:
1512 case OMP_CLAUSE_MERGEABLE:
1513 break;
1514
1515 default:
1516 gcc_unreachable ();
1517 }
1518 }
1519
1520 if (scan_array_reductions)
1521 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1522 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1523 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1524 {
1525 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
1526 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
1527 }
1528 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
1529 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1530 scan_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
1531 }
1532
1533 /* Create a new name for omp child function. Returns an identifier. */
1534
1535 static GTY(()) unsigned int tmp_ompfn_id_num;
1536
1537 static tree
1538 create_omp_child_function_name (bool task_copy)
1539 {
1540 return (clone_function_name (current_function_decl,
1541 task_copy ? "_omp_cpyfn" : "_omp_fn"));
1542 }
1543
1544 /* Build a decl for the omp child function. It'll not contain a body
1545 yet, just the bare decl. */
1546
1547 static void
1548 create_omp_child_function (omp_context *ctx, bool task_copy)
1549 {
1550 tree decl, type, name, t;
1551
1552 name = create_omp_child_function_name (task_copy);
1553 if (task_copy)
1554 type = build_function_type_list (void_type_node, ptr_type_node,
1555 ptr_type_node, NULL_TREE);
1556 else
1557 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
1558
1559 decl = build_decl (gimple_location (ctx->stmt),
1560 FUNCTION_DECL, name, type);
1561
1562 if (!task_copy)
1563 ctx->cb.dst_fn = decl;
1564 else
1565 gimple_omp_task_set_copy_fn (ctx->stmt, decl);
1566
1567 TREE_STATIC (decl) = 1;
1568 TREE_USED (decl) = 1;
1569 DECL_ARTIFICIAL (decl) = 1;
1570 DECL_NAMELESS (decl) = 1;
1571 DECL_IGNORED_P (decl) = 0;
1572 TREE_PUBLIC (decl) = 0;
1573 DECL_UNINLINABLE (decl) = 1;
1574 DECL_EXTERNAL (decl) = 0;
1575 DECL_CONTEXT (decl) = NULL_TREE;
1576 DECL_INITIAL (decl) = make_node (BLOCK);
1577
1578 t = build_decl (DECL_SOURCE_LOCATION (decl),
1579 RESULT_DECL, NULL_TREE, void_type_node);
1580 DECL_ARTIFICIAL (t) = 1;
1581 DECL_IGNORED_P (t) = 1;
1582 DECL_CONTEXT (t) = decl;
1583 DECL_RESULT (decl) = t;
1584
1585 t = build_decl (DECL_SOURCE_LOCATION (decl),
1586 PARM_DECL, get_identifier (".omp_data_i"), ptr_type_node);
1587 DECL_ARTIFICIAL (t) = 1;
1588 DECL_NAMELESS (t) = 1;
1589 DECL_ARG_TYPE (t) = ptr_type_node;
1590 DECL_CONTEXT (t) = current_function_decl;
1591 TREE_USED (t) = 1;
1592 DECL_ARGUMENTS (decl) = t;
1593 if (!task_copy)
1594 ctx->receiver_decl = t;
1595 else
1596 {
1597 t = build_decl (DECL_SOURCE_LOCATION (decl),
1598 PARM_DECL, get_identifier (".omp_data_o"),
1599 ptr_type_node);
1600 DECL_ARTIFICIAL (t) = 1;
1601 DECL_NAMELESS (t) = 1;
1602 DECL_ARG_TYPE (t) = ptr_type_node;
1603 DECL_CONTEXT (t) = current_function_decl;
1604 TREE_USED (t) = 1;
1605 TREE_ADDRESSABLE (t) = 1;
1606 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
1607 DECL_ARGUMENTS (decl) = t;
1608 }
1609
1610 /* Allocate memory for the function structure. The call to
1611 allocate_struct_function clobbers CFUN, so we need to restore
1612 it afterward. */
1613 push_struct_function (decl);
1614 cfun->function_end_locus = gimple_location (ctx->stmt);
1615 pop_cfun ();
1616 }
1617
1618
1619 /* Scan an OpenMP parallel directive. */
1620
1621 static void
1622 scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1623 {
1624 omp_context *ctx;
1625 tree name;
1626 gimple stmt = gsi_stmt (*gsi);
1627
1628 /* Ignore parallel directives with empty bodies, unless there
1629 are copyin clauses. */
1630 if (optimize > 0
1631 && empty_body_p (gimple_omp_body (stmt))
1632 && find_omp_clause (gimple_omp_parallel_clauses (stmt),
1633 OMP_CLAUSE_COPYIN) == NULL)
1634 {
1635 gsi_replace (gsi, gimple_build_nop (), false);
1636 return;
1637 }
1638
1639 ctx = new_omp_context (stmt, outer_ctx);
1640 if (taskreg_nesting_level > 1)
1641 ctx->is_nested = true;
1642 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1643 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1644 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1645 name = create_tmp_var_name (".omp_data_s");
1646 name = build_decl (gimple_location (stmt),
1647 TYPE_DECL, name, ctx->record_type);
1648 DECL_ARTIFICIAL (name) = 1;
1649 DECL_NAMELESS (name) = 1;
1650 TYPE_NAME (ctx->record_type) = name;
1651 create_omp_child_function (ctx, false);
1652 gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
1653
1654 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx);
1655 scan_omp (gimple_omp_body (stmt), ctx);
1656
1657 if (TYPE_FIELDS (ctx->record_type) == NULL)
1658 ctx->record_type = ctx->receiver_decl = NULL;
1659 else
1660 {
1661 layout_type (ctx->record_type);
1662 fixup_child_record_type (ctx);
1663 }
1664 }
1665
1666 /* Scan an OpenMP task directive. */
1667
1668 static void
1669 scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1670 {
1671 omp_context *ctx;
1672 tree name, t;
1673 gimple stmt = gsi_stmt (*gsi);
1674 location_t loc = gimple_location (stmt);
1675
1676 /* Ignore task directives with empty bodies. */
1677 if (optimize > 0
1678 && empty_body_p (gimple_omp_body (stmt)))
1679 {
1680 gsi_replace (gsi, gimple_build_nop (), false);
1681 return;
1682 }
1683
1684 ctx = new_omp_context (stmt, outer_ctx);
1685 if (taskreg_nesting_level > 1)
1686 ctx->is_nested = true;
1687 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1688 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1689 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1690 name = create_tmp_var_name (".omp_data_s");
1691 name = build_decl (gimple_location (stmt),
1692 TYPE_DECL, name, ctx->record_type);
1693 DECL_ARTIFICIAL (name) = 1;
1694 DECL_NAMELESS (name) = 1;
1695 TYPE_NAME (ctx->record_type) = name;
1696 create_omp_child_function (ctx, false);
1697 gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
1698
1699 scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
1700
1701 if (ctx->srecord_type)
1702 {
1703 name = create_tmp_var_name (".omp_data_a");
1704 name = build_decl (gimple_location (stmt),
1705 TYPE_DECL, name, ctx->srecord_type);
1706 DECL_ARTIFICIAL (name) = 1;
1707 DECL_NAMELESS (name) = 1;
1708 TYPE_NAME (ctx->srecord_type) = name;
1709 create_omp_child_function (ctx, true);
1710 }
1711
1712 scan_omp (gimple_omp_body (stmt), ctx);
1713
1714 if (TYPE_FIELDS (ctx->record_type) == NULL)
1715 {
1716 ctx->record_type = ctx->receiver_decl = NULL;
1717 t = build_int_cst (long_integer_type_node, 0);
1718 gimple_omp_task_set_arg_size (stmt, t);
1719 t = build_int_cst (long_integer_type_node, 1);
1720 gimple_omp_task_set_arg_align (stmt, t);
1721 }
1722 else
1723 {
1724 tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
1725 /* Move VLA fields to the end. */
1726 p = &TYPE_FIELDS (ctx->record_type);
1727 while (*p)
1728 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
1729 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
1730 {
1731 *q = *p;
1732 *p = TREE_CHAIN (*p);
1733 TREE_CHAIN (*q) = NULL_TREE;
1734 q = &TREE_CHAIN (*q);
1735 }
1736 else
1737 p = &DECL_CHAIN (*p);
1738 *p = vla_fields;
1739 layout_type (ctx->record_type);
1740 fixup_child_record_type (ctx);
1741 if (ctx->srecord_type)
1742 layout_type (ctx->srecord_type);
1743 t = fold_convert_loc (loc, long_integer_type_node,
1744 TYPE_SIZE_UNIT (ctx->record_type));
1745 gimple_omp_task_set_arg_size (stmt, t);
1746 t = build_int_cst (long_integer_type_node,
1747 TYPE_ALIGN_UNIT (ctx->record_type));
1748 gimple_omp_task_set_arg_align (stmt, t);
1749 }
1750 }
1751
1752
1753 /* Scan an OpenMP loop directive. */
1754
1755 static void
1756 scan_omp_for (gimple stmt, omp_context *outer_ctx)
1757 {
1758 omp_context *ctx;
1759 size_t i;
1760
1761 ctx = new_omp_context (stmt, outer_ctx);
1762
1763 scan_sharing_clauses (gimple_omp_for_clauses (stmt), ctx);
1764
1765 scan_omp (gimple_omp_for_pre_body (stmt), ctx);
1766 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
1767 {
1768 scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx);
1769 scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx);
1770 scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx);
1771 scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx);
1772 }
1773 scan_omp (gimple_omp_body (stmt), ctx);
1774 }
1775
1776 /* Scan an OpenMP sections directive. */
1777
1778 static void
1779 scan_omp_sections (gimple stmt, omp_context *outer_ctx)
1780 {
1781 omp_context *ctx;
1782
1783 ctx = new_omp_context (stmt, outer_ctx);
1784 scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx);
1785 scan_omp (gimple_omp_body (stmt), ctx);
1786 }
1787
1788 /* Scan an OpenMP single directive. */
1789
1790 static void
1791 scan_omp_single (gimple stmt, omp_context *outer_ctx)
1792 {
1793 omp_context *ctx;
1794 tree name;
1795
1796 ctx = new_omp_context (stmt, outer_ctx);
1797 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1798 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1799 name = create_tmp_var_name (".omp_copy_s");
1800 name = build_decl (gimple_location (stmt),
1801 TYPE_DECL, name, ctx->record_type);
1802 TYPE_NAME (ctx->record_type) = name;
1803
1804 scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx);
1805 scan_omp (gimple_omp_body (stmt), ctx);
1806
1807 if (TYPE_FIELDS (ctx->record_type) == NULL)
1808 ctx->record_type = NULL;
1809 else
1810 layout_type (ctx->record_type);
1811 }
1812
1813
1814 /* Check OpenMP nesting restrictions. */
1815 static void
1816 check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
1817 {
1818 switch (gimple_code (stmt))
1819 {
1820 case GIMPLE_OMP_FOR:
1821 case GIMPLE_OMP_SECTIONS:
1822 case GIMPLE_OMP_SINGLE:
1823 case GIMPLE_CALL:
1824 for (; ctx != NULL; ctx = ctx->outer)
1825 switch (gimple_code (ctx->stmt))
1826 {
1827 case GIMPLE_OMP_FOR:
1828 case GIMPLE_OMP_SECTIONS:
1829 case GIMPLE_OMP_SINGLE:
1830 case GIMPLE_OMP_ORDERED:
1831 case GIMPLE_OMP_MASTER:
1832 case GIMPLE_OMP_TASK:
1833 if (is_gimple_call (stmt))
1834 {
1835 warning (0, "barrier region may not be closely nested inside "
1836 "of work-sharing, critical, ordered, master or "
1837 "explicit task region");
1838 return;
1839 }
1840 warning (0, "work-sharing region may not be closely nested inside "
1841 "of work-sharing, critical, ordered, master or explicit "
1842 "task region");
1843 return;
1844 case GIMPLE_OMP_PARALLEL:
1845 return;
1846 default:
1847 break;
1848 }
1849 break;
1850 case GIMPLE_OMP_MASTER:
1851 for (; ctx != NULL; ctx = ctx->outer)
1852 switch (gimple_code (ctx->stmt))
1853 {
1854 case GIMPLE_OMP_FOR:
1855 case GIMPLE_OMP_SECTIONS:
1856 case GIMPLE_OMP_SINGLE:
1857 case GIMPLE_OMP_TASK:
1858 warning (0, "master region may not be closely nested inside "
1859 "of work-sharing or explicit task region");
1860 return;
1861 case GIMPLE_OMP_PARALLEL:
1862 return;
1863 default:
1864 break;
1865 }
1866 break;
1867 case GIMPLE_OMP_ORDERED:
1868 for (; ctx != NULL; ctx = ctx->outer)
1869 switch (gimple_code (ctx->stmt))
1870 {
1871 case GIMPLE_OMP_CRITICAL:
1872 case GIMPLE_OMP_TASK:
1873 warning (0, "ordered region may not be closely nested inside "
1874 "of critical or explicit task region");
1875 return;
1876 case GIMPLE_OMP_FOR:
1877 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
1878 OMP_CLAUSE_ORDERED) == NULL)
1879 warning (0, "ordered region must be closely nested inside "
1880 "a loop region with an ordered clause");
1881 return;
1882 case GIMPLE_OMP_PARALLEL:
1883 return;
1884 default:
1885 break;
1886 }
1887 break;
1888 case GIMPLE_OMP_CRITICAL:
1889 for (; ctx != NULL; ctx = ctx->outer)
1890 if (gimple_code (ctx->stmt) == GIMPLE_OMP_CRITICAL
1891 && (gimple_omp_critical_name (stmt)
1892 == gimple_omp_critical_name (ctx->stmt)))
1893 {
1894 warning (0, "critical region may not be nested inside a critical "
1895 "region with the same name");
1896 return;
1897 }
1898 break;
1899 default:
1900 break;
1901 }
1902 }
1903
1904
1905 /* Helper function scan_omp.
1906
1907 Callback for walk_tree or operators in walk_gimple_stmt used to
1908 scan for OpenMP directives in TP. */
1909
1910 static tree
1911 scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
1912 {
1913 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
1914 omp_context *ctx = (omp_context *) wi->info;
1915 tree t = *tp;
1916
1917 switch (TREE_CODE (t))
1918 {
1919 case VAR_DECL:
1920 case PARM_DECL:
1921 case LABEL_DECL:
1922 case RESULT_DECL:
1923 if (ctx)
1924 *tp = remap_decl (t, &ctx->cb);
1925 break;
1926
1927 default:
1928 if (ctx && TYPE_P (t))
1929 *tp = remap_type (t, &ctx->cb);
1930 else if (!DECL_P (t))
1931 {
1932 *walk_subtrees = 1;
1933 if (ctx)
1934 {
1935 tree tem = remap_type (TREE_TYPE (t), &ctx->cb);
1936 if (tem != TREE_TYPE (t))
1937 {
1938 if (TREE_CODE (t) == INTEGER_CST)
1939 *tp = build_int_cst_wide (tem,
1940 TREE_INT_CST_LOW (t),
1941 TREE_INT_CST_HIGH (t));
1942 else
1943 TREE_TYPE (t) = tem;
1944 }
1945 }
1946 }
1947 break;
1948 }
1949
1950 return NULL_TREE;
1951 }
1952
1953
1954 /* Helper function for scan_omp.
1955
1956 Callback for walk_gimple_stmt used to scan for OpenMP directives in
1957 the current statement in GSI. */
1958
1959 static tree
1960 scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
1961 struct walk_stmt_info *wi)
1962 {
1963 gimple stmt = gsi_stmt (*gsi);
1964 omp_context *ctx = (omp_context *) wi->info;
1965
1966 if (gimple_has_location (stmt))
1967 input_location = gimple_location (stmt);
1968
1969 /* Check the OpenMP nesting restrictions. */
1970 if (ctx != NULL)
1971 {
1972 if (is_gimple_omp (stmt))
1973 check_omp_nesting_restrictions (stmt, ctx);
1974 else if (is_gimple_call (stmt))
1975 {
1976 tree fndecl = gimple_call_fndecl (stmt);
1977 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
1978 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
1979 check_omp_nesting_restrictions (stmt, ctx);
1980 }
1981 }
1982
1983 *handled_ops_p = true;
1984
1985 switch (gimple_code (stmt))
1986 {
1987 case GIMPLE_OMP_PARALLEL:
1988 taskreg_nesting_level++;
1989 scan_omp_parallel (gsi, ctx);
1990 taskreg_nesting_level--;
1991 break;
1992
1993 case GIMPLE_OMP_TASK:
1994 taskreg_nesting_level++;
1995 scan_omp_task (gsi, ctx);
1996 taskreg_nesting_level--;
1997 break;
1998
1999 case GIMPLE_OMP_FOR:
2000 scan_omp_for (stmt, ctx);
2001 break;
2002
2003 case GIMPLE_OMP_SECTIONS:
2004 scan_omp_sections (stmt, ctx);
2005 break;
2006
2007 case GIMPLE_OMP_SINGLE:
2008 scan_omp_single (stmt, ctx);
2009 break;
2010
2011 case GIMPLE_OMP_SECTION:
2012 case GIMPLE_OMP_MASTER:
2013 case GIMPLE_OMP_ORDERED:
2014 case GIMPLE_OMP_CRITICAL:
2015 ctx = new_omp_context (stmt, ctx);
2016 scan_omp (gimple_omp_body (stmt), ctx);
2017 break;
2018
2019 case GIMPLE_BIND:
2020 {
2021 tree var;
2022
2023 *handled_ops_p = false;
2024 if (ctx)
2025 for (var = gimple_bind_vars (stmt); var ; var = DECL_CHAIN (var))
2026 insert_decl_map (&ctx->cb, var, var);
2027 }
2028 break;
2029 default:
2030 *handled_ops_p = false;
2031 break;
2032 }
2033
2034 return NULL_TREE;
2035 }
2036
2037
2038 /* Scan all the statements starting at the current statement. CTX
2039 contains context information about the OpenMP directives and
2040 clauses found during the scan. */
2041
2042 static void
2043 scan_omp (gimple_seq body, omp_context *ctx)
2044 {
2045 location_t saved_location;
2046 struct walk_stmt_info wi;
2047
2048 memset (&wi, 0, sizeof (wi));
2049 wi.info = ctx;
2050 wi.want_locations = true;
2051
2052 saved_location = input_location;
2053 walk_gimple_seq (body, scan_omp_1_stmt, scan_omp_1_op, &wi);
2054 input_location = saved_location;
2055 }
2056 \f
2057 /* Re-gimplification and code generation routines. */
2058
2059 /* Build a call to GOMP_barrier. */
2060
2061 static tree
2062 build_omp_barrier (void)
2063 {
2064 return build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_BARRIER), 0);
2065 }
2066
2067 /* If a context was created for STMT when it was scanned, return it. */
2068
2069 static omp_context *
2070 maybe_lookup_ctx (gimple stmt)
2071 {
2072 splay_tree_node n;
2073 n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
2074 return n ? (omp_context *) n->value : NULL;
2075 }
2076
2077
2078 /* Find the mapping for DECL in CTX or the immediately enclosing
2079 context that has a mapping for DECL.
2080
2081 If CTX is a nested parallel directive, we may have to use the decl
2082 mappings created in CTX's parent context. Suppose that we have the
2083 following parallel nesting (variable UIDs showed for clarity):
2084
2085 iD.1562 = 0;
2086 #omp parallel shared(iD.1562) -> outer parallel
2087 iD.1562 = iD.1562 + 1;
2088
2089 #omp parallel shared (iD.1562) -> inner parallel
2090 iD.1562 = iD.1562 - 1;
2091
2092 Each parallel structure will create a distinct .omp_data_s structure
2093 for copying iD.1562 in/out of the directive:
2094
2095 outer parallel .omp_data_s.1.i -> iD.1562
2096 inner parallel .omp_data_s.2.i -> iD.1562
2097
2098 A shared variable mapping will produce a copy-out operation before
2099 the parallel directive and a copy-in operation after it. So, in
2100 this case we would have:
2101
2102 iD.1562 = 0;
2103 .omp_data_o.1.i = iD.1562;
2104 #omp parallel shared(iD.1562) -> outer parallel
2105 .omp_data_i.1 = &.omp_data_o.1
2106 .omp_data_i.1->i = .omp_data_i.1->i + 1;
2107
2108 .omp_data_o.2.i = iD.1562; -> **
2109 #omp parallel shared(iD.1562) -> inner parallel
2110 .omp_data_i.2 = &.omp_data_o.2
2111 .omp_data_i.2->i = .omp_data_i.2->i - 1;
2112
2113
2114 ** This is a problem. The symbol iD.1562 cannot be referenced
2115 inside the body of the outer parallel region. But since we are
2116 emitting this copy operation while expanding the inner parallel
2117 directive, we need to access the CTX structure of the outer
2118 parallel directive to get the correct mapping:
2119
2120 .omp_data_o.2.i = .omp_data_i.1->i
2121
2122 Since there may be other workshare or parallel directives enclosing
2123 the parallel directive, it may be necessary to walk up the context
2124 parent chain. This is not a problem in general because nested
2125 parallelism happens only rarely. */
2126
2127 static tree
2128 lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2129 {
2130 tree t;
2131 omp_context *up;
2132
2133 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2134 t = maybe_lookup_decl (decl, up);
2135
2136 gcc_assert (!ctx->is_nested || t || is_global_var (decl));
2137
2138 return t ? t : decl;
2139 }
2140
2141
2142 /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
2143 in outer contexts. */
2144
2145 static tree
2146 maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2147 {
2148 tree t = NULL;
2149 omp_context *up;
2150
2151 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2152 t = maybe_lookup_decl (decl, up);
2153
2154 return t ? t : decl;
2155 }
2156
2157
2158 /* Construct the initialization value for reduction CLAUSE. */
2159
2160 tree
2161 omp_reduction_init (tree clause, tree type)
2162 {
2163 location_t loc = OMP_CLAUSE_LOCATION (clause);
2164 switch (OMP_CLAUSE_REDUCTION_CODE (clause))
2165 {
2166 case PLUS_EXPR:
2167 case MINUS_EXPR:
2168 case BIT_IOR_EXPR:
2169 case BIT_XOR_EXPR:
2170 case TRUTH_OR_EXPR:
2171 case TRUTH_ORIF_EXPR:
2172 case TRUTH_XOR_EXPR:
2173 case NE_EXPR:
2174 return build_zero_cst (type);
2175
2176 case MULT_EXPR:
2177 case TRUTH_AND_EXPR:
2178 case TRUTH_ANDIF_EXPR:
2179 case EQ_EXPR:
2180 return fold_convert_loc (loc, type, integer_one_node);
2181
2182 case BIT_AND_EXPR:
2183 return fold_convert_loc (loc, type, integer_minus_one_node);
2184
2185 case MAX_EXPR:
2186 if (SCALAR_FLOAT_TYPE_P (type))
2187 {
2188 REAL_VALUE_TYPE max, min;
2189 if (HONOR_INFINITIES (TYPE_MODE (type)))
2190 {
2191 real_inf (&max);
2192 real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
2193 }
2194 else
2195 real_maxval (&min, 1, TYPE_MODE (type));
2196 return build_real (type, min);
2197 }
2198 else
2199 {
2200 gcc_assert (INTEGRAL_TYPE_P (type));
2201 return TYPE_MIN_VALUE (type);
2202 }
2203
2204 case MIN_EXPR:
2205 if (SCALAR_FLOAT_TYPE_P (type))
2206 {
2207 REAL_VALUE_TYPE max;
2208 if (HONOR_INFINITIES (TYPE_MODE (type)))
2209 real_inf (&max);
2210 else
2211 real_maxval (&max, 0, TYPE_MODE (type));
2212 return build_real (type, max);
2213 }
2214 else
2215 {
2216 gcc_assert (INTEGRAL_TYPE_P (type));
2217 return TYPE_MAX_VALUE (type);
2218 }
2219
2220 default:
2221 gcc_unreachable ();
2222 }
2223 }
2224
2225 /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
2226 from the receiver (aka child) side and initializers for REFERENCE_TYPE
2227 private variables. Initialization statements go in ILIST, while calls
2228 to destructors go in DLIST. */
2229
2230 static void
2231 lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
2232 omp_context *ctx)
2233 {
2234 gimple_stmt_iterator diter;
2235 tree c, dtor, copyin_seq, x, ptr;
2236 bool copyin_by_ref = false;
2237 bool lastprivate_firstprivate = false;
2238 int pass;
2239
2240 *dlist = gimple_seq_alloc ();
2241 diter = gsi_start (*dlist);
2242 copyin_seq = NULL;
2243
2244 /* Do all the fixed sized types in the first pass, and the variable sized
2245 types in the second pass. This makes sure that the scalar arguments to
2246 the variable sized types are processed before we use them in the
2247 variable sized operations. */
2248 for (pass = 0; pass < 2; ++pass)
2249 {
2250 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2251 {
2252 enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
2253 tree var, new_var;
2254 bool by_ref;
2255 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2256
2257 switch (c_kind)
2258 {
2259 case OMP_CLAUSE_PRIVATE:
2260 if (OMP_CLAUSE_PRIVATE_DEBUG (c))
2261 continue;
2262 break;
2263 case OMP_CLAUSE_SHARED:
2264 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
2265 {
2266 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
2267 continue;
2268 }
2269 case OMP_CLAUSE_FIRSTPRIVATE:
2270 case OMP_CLAUSE_COPYIN:
2271 case OMP_CLAUSE_REDUCTION:
2272 break;
2273 case OMP_CLAUSE_LASTPRIVATE:
2274 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2275 {
2276 lastprivate_firstprivate = true;
2277 if (pass != 0)
2278 continue;
2279 }
2280 break;
2281 default:
2282 continue;
2283 }
2284
2285 new_var = var = OMP_CLAUSE_DECL (c);
2286 if (c_kind != OMP_CLAUSE_COPYIN)
2287 new_var = lookup_decl (var, ctx);
2288
2289 if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
2290 {
2291 if (pass != 0)
2292 continue;
2293 }
2294 else if (is_variable_sized (var))
2295 {
2296 /* For variable sized types, we need to allocate the
2297 actual storage here. Call alloca and store the
2298 result in the pointer decl that we created elsewhere. */
2299 if (pass == 0)
2300 continue;
2301
2302 if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
2303 {
2304 gimple stmt;
2305 tree tmp, atmp;
2306
2307 ptr = DECL_VALUE_EXPR (new_var);
2308 gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
2309 ptr = TREE_OPERAND (ptr, 0);
2310 gcc_assert (DECL_P (ptr));
2311 x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
2312
2313 /* void *tmp = __builtin_alloca */
2314 atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
2315 stmt = gimple_build_call (atmp, 1, x);
2316 tmp = create_tmp_var_raw (ptr_type_node, NULL);
2317 gimple_add_tmp_var (tmp);
2318 gimple_call_set_lhs (stmt, tmp);
2319
2320 gimple_seq_add_stmt (ilist, stmt);
2321
2322 x = fold_convert_loc (clause_loc, TREE_TYPE (ptr), tmp);
2323 gimplify_assign (ptr, x, ilist);
2324 }
2325 }
2326 else if (is_reference (var))
2327 {
2328 /* For references that are being privatized for Fortran,
2329 allocate new backing storage for the new pointer
2330 variable. This allows us to avoid changing all the
2331 code that expects a pointer to something that expects
2332 a direct variable. Note that this doesn't apply to
2333 C++, since reference types are disallowed in data
2334 sharing clauses there, except for NRV optimized
2335 return values. */
2336 if (pass == 0)
2337 continue;
2338
2339 x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
2340 if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
2341 {
2342 x = build_receiver_ref (var, false, ctx);
2343 x = build_fold_addr_expr_loc (clause_loc, x);
2344 }
2345 else if (TREE_CONSTANT (x))
2346 {
2347 const char *name = NULL;
2348 if (DECL_NAME (var))
2349 name = IDENTIFIER_POINTER (DECL_NAME (new_var));
2350
2351 x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
2352 name);
2353 gimple_add_tmp_var (x);
2354 TREE_ADDRESSABLE (x) = 1;
2355 x = build_fold_addr_expr_loc (clause_loc, x);
2356 }
2357 else
2358 {
2359 tree atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
2360 x = build_call_expr_loc (clause_loc, atmp, 1, x);
2361 }
2362
2363 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
2364 gimplify_assign (new_var, x, ilist);
2365
2366 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2367 }
2368 else if (c_kind == OMP_CLAUSE_REDUCTION
2369 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2370 {
2371 if (pass == 0)
2372 continue;
2373 }
2374 else if (pass != 0)
2375 continue;
2376
2377 switch (OMP_CLAUSE_CODE (c))
2378 {
2379 case OMP_CLAUSE_SHARED:
2380 /* Shared global vars are just accessed directly. */
2381 if (is_global_var (new_var))
2382 break;
2383 /* Set up the DECL_VALUE_EXPR for shared variables now. This
2384 needs to be delayed until after fixup_child_record_type so
2385 that we get the correct type during the dereference. */
2386 by_ref = use_pointer_for_field (var, ctx);
2387 x = build_receiver_ref (var, by_ref, ctx);
2388 SET_DECL_VALUE_EXPR (new_var, x);
2389 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2390
2391 /* ??? If VAR is not passed by reference, and the variable
2392 hasn't been initialized yet, then we'll get a warning for
2393 the store into the omp_data_s structure. Ideally, we'd be
2394 able to notice this and not store anything at all, but
2395 we're generating code too early. Suppress the warning. */
2396 if (!by_ref)
2397 TREE_NO_WARNING (var) = 1;
2398 break;
2399
2400 case OMP_CLAUSE_LASTPRIVATE:
2401 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2402 break;
2403 /* FALLTHRU */
2404
2405 case OMP_CLAUSE_PRIVATE:
2406 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE)
2407 x = build_outer_var_ref (var, ctx);
2408 else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2409 {
2410 if (is_task_ctx (ctx))
2411 x = build_receiver_ref (var, false, ctx);
2412 else
2413 x = build_outer_var_ref (var, ctx);
2414 }
2415 else
2416 x = NULL;
2417 x = lang_hooks.decls.omp_clause_default_ctor (c, new_var, x);
2418 if (x)
2419 gimplify_and_add (x, ilist);
2420 /* FALLTHRU */
2421
2422 do_dtor:
2423 x = lang_hooks.decls.omp_clause_dtor (c, new_var);
2424 if (x)
2425 {
2426 gimple_seq tseq = NULL;
2427
2428 dtor = x;
2429 gimplify_stmt (&dtor, &tseq);
2430 gsi_insert_seq_before (&diter, tseq, GSI_SAME_STMT);
2431 }
2432 break;
2433
2434 case OMP_CLAUSE_FIRSTPRIVATE:
2435 if (is_task_ctx (ctx))
2436 {
2437 if (is_reference (var) || is_variable_sized (var))
2438 goto do_dtor;
2439 else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var,
2440 ctx))
2441 || use_pointer_for_field (var, NULL))
2442 {
2443 x = build_receiver_ref (var, false, ctx);
2444 SET_DECL_VALUE_EXPR (new_var, x);
2445 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2446 goto do_dtor;
2447 }
2448 }
2449 x = build_outer_var_ref (var, ctx);
2450 x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x);
2451 gimplify_and_add (x, ilist);
2452 goto do_dtor;
2453 break;
2454
2455 case OMP_CLAUSE_COPYIN:
2456 by_ref = use_pointer_for_field (var, NULL);
2457 x = build_receiver_ref (var, by_ref, ctx);
2458 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
2459 append_to_statement_list (x, &copyin_seq);
2460 copyin_by_ref |= by_ref;
2461 break;
2462
2463 case OMP_CLAUSE_REDUCTION:
2464 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2465 {
2466 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2467 x = build_outer_var_ref (var, ctx);
2468
2469 if (is_reference (var))
2470 x = build_fold_addr_expr_loc (clause_loc, x);
2471 SET_DECL_VALUE_EXPR (placeholder, x);
2472 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2473 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
2474 gimple_seq_add_seq (ilist,
2475 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c));
2476 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
2477 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
2478 }
2479 else
2480 {
2481 x = omp_reduction_init (c, TREE_TYPE (new_var));
2482 gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
2483 gimplify_assign (new_var, x, ilist);
2484 }
2485 break;
2486
2487 default:
2488 gcc_unreachable ();
2489 }
2490 }
2491 }
2492
2493 /* The copyin sequence is not to be executed by the main thread, since
2494 that would result in self-copies. Perhaps not visible to scalars,
2495 but it certainly is to C++ operator=. */
2496 if (copyin_seq)
2497 {
2498 x = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM),
2499 0);
2500 x = build2 (NE_EXPR, boolean_type_node, x,
2501 build_int_cst (TREE_TYPE (x), 0));
2502 x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
2503 gimplify_and_add (x, ilist);
2504 }
2505
2506 /* If any copyin variable is passed by reference, we must ensure the
2507 master thread doesn't modify it before it is copied over in all
2508 threads. Similarly for variables in both firstprivate and
2509 lastprivate clauses we need to ensure the lastprivate copying
2510 happens after firstprivate copying in all threads. */
2511 if (copyin_by_ref || lastprivate_firstprivate)
2512 gimplify_and_add (build_omp_barrier (), ilist);
2513 }
2514
2515
2516 /* Generate code to implement the LASTPRIVATE clauses. This is used for
2517 both parallel and workshare constructs. PREDICATE may be NULL if it's
2518 always true. */
2519
2520 static void
2521 lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *stmt_list,
2522 omp_context *ctx)
2523 {
2524 tree x, c, label = NULL;
2525 bool par_clauses = false;
2526
2527 /* Early exit if there are no lastprivate clauses. */
2528 clauses = find_omp_clause (clauses, OMP_CLAUSE_LASTPRIVATE);
2529 if (clauses == NULL)
2530 {
2531 /* If this was a workshare clause, see if it had been combined
2532 with its parallel. In that case, look for the clauses on the
2533 parallel statement itself. */
2534 if (is_parallel_ctx (ctx))
2535 return;
2536
2537 ctx = ctx->outer;
2538 if (ctx == NULL || !is_parallel_ctx (ctx))
2539 return;
2540
2541 clauses = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2542 OMP_CLAUSE_LASTPRIVATE);
2543 if (clauses == NULL)
2544 return;
2545 par_clauses = true;
2546 }
2547
2548 if (predicate)
2549 {
2550 gimple stmt;
2551 tree label_true, arm1, arm2;
2552
2553 label = create_artificial_label (UNKNOWN_LOCATION);
2554 label_true = create_artificial_label (UNKNOWN_LOCATION);
2555 arm1 = TREE_OPERAND (predicate, 0);
2556 arm2 = TREE_OPERAND (predicate, 1);
2557 gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
2558 gimplify_expr (&arm2, stmt_list, NULL, is_gimple_val, fb_rvalue);
2559 stmt = gimple_build_cond (TREE_CODE (predicate), arm1, arm2,
2560 label_true, label);
2561 gimple_seq_add_stmt (stmt_list, stmt);
2562 gimple_seq_add_stmt (stmt_list, gimple_build_label (label_true));
2563 }
2564
2565 for (c = clauses; c ;)
2566 {
2567 tree var, new_var;
2568 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2569
2570 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
2571 {
2572 var = OMP_CLAUSE_DECL (c);
2573 new_var = lookup_decl (var, ctx);
2574
2575 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
2576 {
2577 lower_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
2578 gimple_seq_add_seq (stmt_list,
2579 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
2580 }
2581 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) = NULL;
2582
2583 x = build_outer_var_ref (var, ctx);
2584 if (is_reference (var))
2585 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2586 x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
2587 gimplify_and_add (x, stmt_list);
2588 }
2589 c = OMP_CLAUSE_CHAIN (c);
2590 if (c == NULL && !par_clauses)
2591 {
2592 /* If this was a workshare clause, see if it had been combined
2593 with its parallel. In that case, continue looking for the
2594 clauses also on the parallel statement itself. */
2595 if (is_parallel_ctx (ctx))
2596 break;
2597
2598 ctx = ctx->outer;
2599 if (ctx == NULL || !is_parallel_ctx (ctx))
2600 break;
2601
2602 c = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2603 OMP_CLAUSE_LASTPRIVATE);
2604 par_clauses = true;
2605 }
2606 }
2607
2608 if (label)
2609 gimple_seq_add_stmt (stmt_list, gimple_build_label (label));
2610 }
2611
2612
2613 /* Generate code to implement the REDUCTION clauses. */
2614
2615 static void
2616 lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
2617 {
2618 gimple_seq sub_seq = NULL;
2619 gimple stmt;
2620 tree x, c;
2621 int count = 0;
2622
2623 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
2624 update in that case, otherwise use a lock. */
2625 for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
2626 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
2627 {
2628 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2629 {
2630 /* Never use OMP_ATOMIC for array reductions. */
2631 count = -1;
2632 break;
2633 }
2634 count++;
2635 }
2636
2637 if (count == 0)
2638 return;
2639
2640 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2641 {
2642 tree var, ref, new_var;
2643 enum tree_code code;
2644 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2645
2646 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
2647 continue;
2648
2649 var = OMP_CLAUSE_DECL (c);
2650 new_var = lookup_decl (var, ctx);
2651 if (is_reference (var))
2652 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2653 ref = build_outer_var_ref (var, ctx);
2654 code = OMP_CLAUSE_REDUCTION_CODE (c);
2655
2656 /* reduction(-:var) sums up the partial results, so it acts
2657 identically to reduction(+:var). */
2658 if (code == MINUS_EXPR)
2659 code = PLUS_EXPR;
2660
2661 if (count == 1)
2662 {
2663 tree addr = build_fold_addr_expr_loc (clause_loc, ref);
2664
2665 addr = save_expr (addr);
2666 ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
2667 x = fold_build2_loc (clause_loc, code, TREE_TYPE (ref), ref, new_var);
2668 x = build2 (OMP_ATOMIC, void_type_node, addr, x);
2669 gimplify_and_add (x, stmt_seqp);
2670 return;
2671 }
2672
2673 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2674 {
2675 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2676
2677 if (is_reference (var))
2678 ref = build_fold_addr_expr_loc (clause_loc, ref);
2679 SET_DECL_VALUE_EXPR (placeholder, ref);
2680 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2681 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
2682 gimple_seq_add_seq (&sub_seq, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
2683 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
2684 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
2685 }
2686 else
2687 {
2688 x = build2 (code, TREE_TYPE (ref), ref, new_var);
2689 ref = build_outer_var_ref (var, ctx);
2690 gimplify_assign (ref, x, &sub_seq);
2691 }
2692 }
2693
2694 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START),
2695 0);
2696 gimple_seq_add_stmt (stmt_seqp, stmt);
2697
2698 gimple_seq_add_seq (stmt_seqp, sub_seq);
2699
2700 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END),
2701 0);
2702 gimple_seq_add_stmt (stmt_seqp, stmt);
2703 }
2704
2705
2706 /* Generate code to implement the COPYPRIVATE clauses. */
2707
2708 static void
2709 lower_copyprivate_clauses (tree clauses, gimple_seq *slist, gimple_seq *rlist,
2710 omp_context *ctx)
2711 {
2712 tree c;
2713
2714 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2715 {
2716 tree var, new_var, ref, x;
2717 bool by_ref;
2718 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2719
2720 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
2721 continue;
2722
2723 var = OMP_CLAUSE_DECL (c);
2724 by_ref = use_pointer_for_field (var, NULL);
2725
2726 ref = build_sender_ref (var, ctx);
2727 x = new_var = lookup_decl_in_outer_ctx (var, ctx);
2728 if (by_ref)
2729 {
2730 x = build_fold_addr_expr_loc (clause_loc, new_var);
2731 x = fold_convert_loc (clause_loc, TREE_TYPE (ref), x);
2732 }
2733 gimplify_assign (ref, x, slist);
2734
2735 ref = build_receiver_ref (var, false, ctx);
2736 if (by_ref)
2737 {
2738 ref = fold_convert_loc (clause_loc,
2739 build_pointer_type (TREE_TYPE (new_var)),
2740 ref);
2741 ref = build_fold_indirect_ref_loc (clause_loc, ref);
2742 }
2743 if (is_reference (var))
2744 {
2745 ref = fold_convert_loc (clause_loc, TREE_TYPE (new_var), ref);
2746 ref = build_simple_mem_ref_loc (clause_loc, ref);
2747 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2748 }
2749 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, ref);
2750 gimplify_and_add (x, rlist);
2751 }
2752 }
2753
2754
2755 /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
2756 and REDUCTION from the sender (aka parent) side. */
2757
2758 static void
2759 lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
2760 omp_context *ctx)
2761 {
2762 tree c;
2763
2764 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2765 {
2766 tree val, ref, x, var;
2767 bool by_ref, do_in = false, do_out = false;
2768 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2769
2770 switch (OMP_CLAUSE_CODE (c))
2771 {
2772 case OMP_CLAUSE_PRIVATE:
2773 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2774 break;
2775 continue;
2776 case OMP_CLAUSE_FIRSTPRIVATE:
2777 case OMP_CLAUSE_COPYIN:
2778 case OMP_CLAUSE_LASTPRIVATE:
2779 case OMP_CLAUSE_REDUCTION:
2780 break;
2781 default:
2782 continue;
2783 }
2784
2785 val = OMP_CLAUSE_DECL (c);
2786 var = lookup_decl_in_outer_ctx (val, ctx);
2787
2788 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
2789 && is_global_var (var))
2790 continue;
2791 if (is_variable_sized (val))
2792 continue;
2793 by_ref = use_pointer_for_field (val, NULL);
2794
2795 switch (OMP_CLAUSE_CODE (c))
2796 {
2797 case OMP_CLAUSE_PRIVATE:
2798 case OMP_CLAUSE_FIRSTPRIVATE:
2799 case OMP_CLAUSE_COPYIN:
2800 do_in = true;
2801 break;
2802
2803 case OMP_CLAUSE_LASTPRIVATE:
2804 if (by_ref || is_reference (val))
2805 {
2806 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2807 continue;
2808 do_in = true;
2809 }
2810 else
2811 {
2812 do_out = true;
2813 if (lang_hooks.decls.omp_private_outer_ref (val))
2814 do_in = true;
2815 }
2816 break;
2817
2818 case OMP_CLAUSE_REDUCTION:
2819 do_in = true;
2820 do_out = !(by_ref || is_reference (val));
2821 break;
2822
2823 default:
2824 gcc_unreachable ();
2825 }
2826
2827 if (do_in)
2828 {
2829 ref = build_sender_ref (val, ctx);
2830 x = by_ref ? build_fold_addr_expr_loc (clause_loc, var) : var;
2831 gimplify_assign (ref, x, ilist);
2832 if (is_task_ctx (ctx))
2833 DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL;
2834 }
2835
2836 if (do_out)
2837 {
2838 ref = build_sender_ref (val, ctx);
2839 gimplify_assign (var, ref, olist);
2840 }
2841 }
2842 }
2843
2844 /* Generate code to implement SHARED from the sender (aka parent)
2845 side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
2846 list things that got automatically shared. */
2847
2848 static void
2849 lower_send_shared_vars (gimple_seq *ilist, gimple_seq *olist, omp_context *ctx)
2850 {
2851 tree var, ovar, nvar, f, x, record_type;
2852
2853 if (ctx->record_type == NULL)
2854 return;
2855
2856 record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type;
2857 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
2858 {
2859 ovar = DECL_ABSTRACT_ORIGIN (f);
2860 nvar = maybe_lookup_decl (ovar, ctx);
2861 if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
2862 continue;
2863
2864 /* If CTX is a nested parallel directive. Find the immediately
2865 enclosing parallel or workshare construct that contains a
2866 mapping for OVAR. */
2867 var = lookup_decl_in_outer_ctx (ovar, ctx);
2868
2869 if (use_pointer_for_field (ovar, ctx))
2870 {
2871 x = build_sender_ref (ovar, ctx);
2872 var = build_fold_addr_expr (var);
2873 gimplify_assign (x, var, ilist);
2874 }
2875 else
2876 {
2877 x = build_sender_ref (ovar, ctx);
2878 gimplify_assign (x, var, ilist);
2879
2880 if (!TREE_READONLY (var)
2881 /* We don't need to receive a new reference to a result
2882 or parm decl. In fact we may not store to it as we will
2883 invalidate any pending RSO and generate wrong gimple
2884 during inlining. */
2885 && !((TREE_CODE (var) == RESULT_DECL
2886 || TREE_CODE (var) == PARM_DECL)
2887 && DECL_BY_REFERENCE (var)))
2888 {
2889 x = build_sender_ref (ovar, ctx);
2890 gimplify_assign (var, x, olist);
2891 }
2892 }
2893 }
2894 }
2895
2896
2897 /* A convenience function to build an empty GIMPLE_COND with just the
2898 condition. */
2899
2900 static gimple
2901 gimple_build_cond_empty (tree cond)
2902 {
2903 enum tree_code pred_code;
2904 tree lhs, rhs;
2905
2906 gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs);
2907 return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE);
2908 }
2909
2910
2911 /* Build the function calls to GOMP_parallel_start etc to actually
2912 generate the parallel operation. REGION is the parallel region
2913 being expanded. BB is the block where to insert the code. WS_ARGS
2914 will be set if this is a call to a combined parallel+workshare
2915 construct, it contains the list of additional arguments needed by
2916 the workshare construct. */
2917
2918 static void
2919 expand_parallel_call (struct omp_region *region, basic_block bb,
2920 gimple entry_stmt, VEC(tree,gc) *ws_args)
2921 {
2922 tree t, t1, t2, val, cond, c, clauses;
2923 gimple_stmt_iterator gsi;
2924 gimple stmt;
2925 enum built_in_function start_ix;
2926 int start_ix2;
2927 location_t clause_loc;
2928 VEC(tree,gc) *args;
2929
2930 clauses = gimple_omp_parallel_clauses (entry_stmt);
2931
2932 /* Determine what flavor of GOMP_parallel_start we will be
2933 emitting. */
2934 start_ix = BUILT_IN_GOMP_PARALLEL_START;
2935 if (is_combined_parallel (region))
2936 {
2937 switch (region->inner->type)
2938 {
2939 case GIMPLE_OMP_FOR:
2940 gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
2941 start_ix2 = ((int)BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START
2942 + (region->inner->sched_kind
2943 == OMP_CLAUSE_SCHEDULE_RUNTIME
2944 ? 3 : region->inner->sched_kind));
2945 start_ix = (enum built_in_function)start_ix2;
2946 break;
2947 case GIMPLE_OMP_SECTIONS:
2948 start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS_START;
2949 break;
2950 default:
2951 gcc_unreachable ();
2952 }
2953 }
2954
2955 /* By default, the value of NUM_THREADS is zero (selected at run time)
2956 and there is no conditional. */
2957 cond = NULL_TREE;
2958 val = build_int_cst (unsigned_type_node, 0);
2959
2960 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
2961 if (c)
2962 cond = OMP_CLAUSE_IF_EXPR (c);
2963
2964 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
2965 if (c)
2966 {
2967 val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
2968 clause_loc = OMP_CLAUSE_LOCATION (c);
2969 }
2970 else
2971 clause_loc = gimple_location (entry_stmt);
2972
2973 /* Ensure 'val' is of the correct type. */
2974 val = fold_convert_loc (clause_loc, unsigned_type_node, val);
2975
2976 /* If we found the clause 'if (cond)', build either
2977 (cond != 0) or (cond ? val : 1u). */
2978 if (cond)
2979 {
2980 gimple_stmt_iterator gsi;
2981
2982 cond = gimple_boolify (cond);
2983
2984 if (integer_zerop (val))
2985 val = fold_build2_loc (clause_loc,
2986 EQ_EXPR, unsigned_type_node, cond,
2987 build_int_cst (TREE_TYPE (cond), 0));
2988 else
2989 {
2990 basic_block cond_bb, then_bb, else_bb;
2991 edge e, e_then, e_else;
2992 tree tmp_then, tmp_else, tmp_join, tmp_var;
2993
2994 tmp_var = create_tmp_var (TREE_TYPE (val), NULL);
2995 if (gimple_in_ssa_p (cfun))
2996 {
2997 tmp_then = make_ssa_name (tmp_var, NULL);
2998 tmp_else = make_ssa_name (tmp_var, NULL);
2999 tmp_join = make_ssa_name (tmp_var, NULL);
3000 }
3001 else
3002 {
3003 tmp_then = tmp_var;
3004 tmp_else = tmp_var;
3005 tmp_join = tmp_var;
3006 }
3007
3008 e = split_block (bb, NULL);
3009 cond_bb = e->src;
3010 bb = e->dest;
3011 remove_edge (e);
3012
3013 then_bb = create_empty_bb (cond_bb);
3014 else_bb = create_empty_bb (then_bb);
3015 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
3016 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
3017
3018 stmt = gimple_build_cond_empty (cond);
3019 gsi = gsi_start_bb (cond_bb);
3020 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3021
3022 gsi = gsi_start_bb (then_bb);
3023 stmt = gimple_build_assign (tmp_then, val);
3024 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3025
3026 gsi = gsi_start_bb (else_bb);
3027 stmt = gimple_build_assign
3028 (tmp_else, build_int_cst (unsigned_type_node, 1));
3029 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3030
3031 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
3032 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
3033 e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
3034 e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
3035
3036 if (gimple_in_ssa_p (cfun))
3037 {
3038 gimple phi = create_phi_node (tmp_join, bb);
3039 SSA_NAME_DEF_STMT (tmp_join) = phi;
3040 add_phi_arg (phi, tmp_then, e_then, UNKNOWN_LOCATION);
3041 add_phi_arg (phi, tmp_else, e_else, UNKNOWN_LOCATION);
3042 }
3043
3044 val = tmp_join;
3045 }
3046
3047 gsi = gsi_start_bb (bb);
3048 val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE,
3049 false, GSI_CONTINUE_LINKING);
3050 }
3051
3052 gsi = gsi_last_bb (bb);
3053 t = gimple_omp_parallel_data_arg (entry_stmt);
3054 if (t == NULL)
3055 t1 = null_pointer_node;
3056 else
3057 t1 = build_fold_addr_expr (t);
3058 t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
3059
3060 args = VEC_alloc (tree, gc, 3 + VEC_length (tree, ws_args));
3061 VEC_quick_push (tree, args, t2);
3062 VEC_quick_push (tree, args, t1);
3063 VEC_quick_push (tree, args, val);
3064 VEC_splice (tree, args, ws_args);
3065
3066 t = build_call_expr_loc_vec (UNKNOWN_LOCATION,
3067 builtin_decl_explicit (start_ix), args);
3068
3069 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3070 false, GSI_CONTINUE_LINKING);
3071
3072 t = gimple_omp_parallel_data_arg (entry_stmt);
3073 if (t == NULL)
3074 t = null_pointer_node;
3075 else
3076 t = build_fold_addr_expr (t);
3077 t = build_call_expr_loc (gimple_location (entry_stmt),
3078 gimple_omp_parallel_child_fn (entry_stmt), 1, t);
3079 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3080 false, GSI_CONTINUE_LINKING);
3081
3082 t = build_call_expr_loc (gimple_location (entry_stmt),
3083 builtin_decl_explicit (BUILT_IN_GOMP_PARALLEL_END),
3084 0);
3085 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3086 false, GSI_CONTINUE_LINKING);
3087 }
3088
3089
3090 /* Build the function call to GOMP_task to actually
3091 generate the task operation. BB is the block where to insert the code. */
3092
3093 static void
3094 expand_task_call (basic_block bb, gimple entry_stmt)
3095 {
3096 tree t, t1, t2, t3, flags, cond, c, c2, clauses;
3097 gimple_stmt_iterator gsi;
3098 location_t loc = gimple_location (entry_stmt);
3099
3100 clauses = gimple_omp_task_clauses (entry_stmt);
3101
3102 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
3103 if (c)
3104 cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (c));
3105 else
3106 cond = boolean_true_node;
3107
3108 c = find_omp_clause (clauses, OMP_CLAUSE_UNTIED);
3109 c2 = find_omp_clause (clauses, OMP_CLAUSE_MERGEABLE);
3110 flags = build_int_cst (unsigned_type_node,
3111 (c ? 1 : 0) + (c2 ? 4 : 0));
3112
3113 c = find_omp_clause (clauses, OMP_CLAUSE_FINAL);
3114 if (c)
3115 {
3116 c = gimple_boolify (OMP_CLAUSE_FINAL_EXPR (c));
3117 c = fold_build3_loc (loc, COND_EXPR, unsigned_type_node, c,
3118 build_int_cst (unsigned_type_node, 2),
3119 build_int_cst (unsigned_type_node, 0));
3120 flags = fold_build2_loc (loc, PLUS_EXPR, unsigned_type_node, flags, c);
3121 }
3122
3123 gsi = gsi_last_bb (bb);
3124 t = gimple_omp_task_data_arg (entry_stmt);
3125 if (t == NULL)
3126 t2 = null_pointer_node;
3127 else
3128 t2 = build_fold_addr_expr_loc (loc, t);
3129 t1 = build_fold_addr_expr_loc (loc, gimple_omp_task_child_fn (entry_stmt));
3130 t = gimple_omp_task_copy_fn (entry_stmt);
3131 if (t == NULL)
3132 t3 = null_pointer_node;
3133 else
3134 t3 = build_fold_addr_expr_loc (loc, t);
3135
3136 t = build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_TASK),
3137 7, t1, t2, t3,
3138 gimple_omp_task_arg_size (entry_stmt),
3139 gimple_omp_task_arg_align (entry_stmt), cond, flags);
3140
3141 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3142 false, GSI_CONTINUE_LINKING);
3143 }
3144
3145
3146 /* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
3147 catch handler and return it. This prevents programs from violating the
3148 structured block semantics with throws. */
3149
3150 static gimple_seq
3151 maybe_catch_exception (gimple_seq body)
3152 {
3153 gimple g;
3154 tree decl;
3155
3156 if (!flag_exceptions)
3157 return body;
3158
3159 if (lang_hooks.eh_protect_cleanup_actions != NULL)
3160 decl = lang_hooks.eh_protect_cleanup_actions ();
3161 else
3162 decl = builtin_decl_explicit (BUILT_IN_TRAP);
3163
3164 g = gimple_build_eh_must_not_throw (decl);
3165 g = gimple_build_try (body, gimple_seq_alloc_with_stmt (g),
3166 GIMPLE_TRY_CATCH);
3167
3168 return gimple_seq_alloc_with_stmt (g);
3169 }
3170
3171 /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
3172
3173 static tree
3174 vec2chain (VEC(tree,gc) *v)
3175 {
3176 tree chain = NULL_TREE, t;
3177 unsigned ix;
3178
3179 FOR_EACH_VEC_ELT_REVERSE (tree, v, ix, t)
3180 {
3181 DECL_CHAIN (t) = chain;
3182 chain = t;
3183 }
3184
3185 return chain;
3186 }
3187
3188
3189 /* Remove barriers in REGION->EXIT's block. Note that this is only
3190 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
3191 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
3192 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
3193 removed. */
3194
3195 static void
3196 remove_exit_barrier (struct omp_region *region)
3197 {
3198 gimple_stmt_iterator gsi;
3199 basic_block exit_bb;
3200 edge_iterator ei;
3201 edge e;
3202 gimple stmt;
3203 int any_addressable_vars = -1;
3204
3205 exit_bb = region->exit;
3206
3207 /* If the parallel region doesn't return, we don't have REGION->EXIT
3208 block at all. */
3209 if (! exit_bb)
3210 return;
3211
3212 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
3213 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
3214 statements that can appear in between are extremely limited -- no
3215 memory operations at all. Here, we allow nothing at all, so the
3216 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
3217 gsi = gsi_last_bb (exit_bb);
3218 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3219 gsi_prev (&gsi);
3220 if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
3221 return;
3222
3223 FOR_EACH_EDGE (e, ei, exit_bb->preds)
3224 {
3225 gsi = gsi_last_bb (e->src);
3226 if (gsi_end_p (gsi))
3227 continue;
3228 stmt = gsi_stmt (gsi);
3229 if (gimple_code (stmt) == GIMPLE_OMP_RETURN
3230 && !gimple_omp_return_nowait_p (stmt))
3231 {
3232 /* OpenMP 3.0 tasks unfortunately prevent this optimization
3233 in many cases. If there could be tasks queued, the barrier
3234 might be needed to let the tasks run before some local
3235 variable of the parallel that the task uses as shared
3236 runs out of scope. The task can be spawned either
3237 from within current function (this would be easy to check)
3238 or from some function it calls and gets passed an address
3239 of such a variable. */
3240 if (any_addressable_vars < 0)
3241 {
3242 gimple parallel_stmt = last_stmt (region->entry);
3243 tree child_fun = gimple_omp_parallel_child_fn (parallel_stmt);
3244 tree local_decls, block, decl;
3245 unsigned ix;
3246
3247 any_addressable_vars = 0;
3248 FOR_EACH_LOCAL_DECL (DECL_STRUCT_FUNCTION (child_fun), ix, decl)
3249 if (TREE_ADDRESSABLE (decl))
3250 {
3251 any_addressable_vars = 1;
3252 break;
3253 }
3254 for (block = gimple_block (stmt);
3255 !any_addressable_vars
3256 && block
3257 && TREE_CODE (block) == BLOCK;
3258 block = BLOCK_SUPERCONTEXT (block))
3259 {
3260 for (local_decls = BLOCK_VARS (block);
3261 local_decls;
3262 local_decls = DECL_CHAIN (local_decls))
3263 if (TREE_ADDRESSABLE (local_decls))
3264 {
3265 any_addressable_vars = 1;
3266 break;
3267 }
3268 if (block == gimple_block (parallel_stmt))
3269 break;
3270 }
3271 }
3272 if (!any_addressable_vars)
3273 gimple_omp_return_set_nowait (stmt);
3274 }
3275 }
3276 }
3277
3278 static void
3279 remove_exit_barriers (struct omp_region *region)
3280 {
3281 if (region->type == GIMPLE_OMP_PARALLEL)
3282 remove_exit_barrier (region);
3283
3284 if (region->inner)
3285 {
3286 region = region->inner;
3287 remove_exit_barriers (region);
3288 while (region->next)
3289 {
3290 region = region->next;
3291 remove_exit_barriers (region);
3292 }
3293 }
3294 }
3295
3296 /* Optimize omp_get_thread_num () and omp_get_num_threads ()
3297 calls. These can't be declared as const functions, but
3298 within one parallel body they are constant, so they can be
3299 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
3300 which are declared const. Similarly for task body, except
3301 that in untied task omp_get_thread_num () can change at any task
3302 scheduling point. */
3303
3304 static void
3305 optimize_omp_library_calls (gimple entry_stmt)
3306 {
3307 basic_block bb;
3308 gimple_stmt_iterator gsi;
3309 tree thr_num_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
3310 tree thr_num_id = DECL_ASSEMBLER_NAME (thr_num_tree);
3311 tree num_thr_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
3312 tree num_thr_id = DECL_ASSEMBLER_NAME (num_thr_tree);
3313 bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK
3314 && find_omp_clause (gimple_omp_task_clauses (entry_stmt),
3315 OMP_CLAUSE_UNTIED) != NULL);
3316
3317 FOR_EACH_BB (bb)
3318 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3319 {
3320 gimple call = gsi_stmt (gsi);
3321 tree decl;
3322
3323 if (is_gimple_call (call)
3324 && (decl = gimple_call_fndecl (call))
3325 && DECL_EXTERNAL (decl)
3326 && TREE_PUBLIC (decl)
3327 && DECL_INITIAL (decl) == NULL)
3328 {
3329 tree built_in;
3330
3331 if (DECL_NAME (decl) == thr_num_id)
3332 {
3333 /* In #pragma omp task untied omp_get_thread_num () can change
3334 during the execution of the task region. */
3335 if (untied_task)
3336 continue;
3337 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
3338 }
3339 else if (DECL_NAME (decl) == num_thr_id)
3340 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
3341 else
3342 continue;
3343
3344 if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
3345 || gimple_call_num_args (call) != 0)
3346 continue;
3347
3348 if (flag_exceptions && !TREE_NOTHROW (decl))
3349 continue;
3350
3351 if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
3352 || !types_compatible_p (TREE_TYPE (TREE_TYPE (decl)),
3353 TREE_TYPE (TREE_TYPE (built_in))))
3354 continue;
3355
3356 gimple_call_set_fndecl (call, built_in);
3357 }
3358 }
3359 }
3360
3361 /* Expand the OpenMP parallel or task directive starting at REGION. */
3362
3363 static void
3364 expand_omp_taskreg (struct omp_region *region)
3365 {
3366 basic_block entry_bb, exit_bb, new_bb;
3367 struct function *child_cfun;
3368 tree child_fn, block, t;
3369 tree save_current;
3370 gimple_stmt_iterator gsi;
3371 gimple entry_stmt, stmt;
3372 edge e;
3373 VEC(tree,gc) *ws_args;
3374
3375 entry_stmt = last_stmt (region->entry);
3376 child_fn = gimple_omp_taskreg_child_fn (entry_stmt);
3377 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
3378 /* If this function has been already instrumented, make sure
3379 the child function isn't instrumented again. */
3380 child_cfun->after_tree_profile = cfun->after_tree_profile;
3381
3382 entry_bb = region->entry;
3383 exit_bb = region->exit;
3384
3385 if (is_combined_parallel (region))
3386 ws_args = region->ws_args;
3387 else
3388 ws_args = NULL;
3389
3390 if (child_cfun->cfg)
3391 {
3392 /* Due to inlining, it may happen that we have already outlined
3393 the region, in which case all we need to do is make the
3394 sub-graph unreachable and emit the parallel call. */
3395 edge entry_succ_e, exit_succ_e;
3396 gimple_stmt_iterator gsi;
3397
3398 entry_succ_e = single_succ_edge (entry_bb);
3399
3400 gsi = gsi_last_bb (entry_bb);
3401 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL
3402 || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK);
3403 gsi_remove (&gsi, true);
3404
3405 new_bb = entry_bb;
3406 if (exit_bb)
3407 {
3408 exit_succ_e = single_succ_edge (exit_bb);
3409 make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
3410 }
3411 remove_edge_and_dominated_blocks (entry_succ_e);
3412 }
3413 else
3414 {
3415 unsigned srcidx, dstidx, num;
3416
3417 /* If the parallel region needs data sent from the parent
3418 function, then the very first statement (except possible
3419 tree profile counter updates) of the parallel body
3420 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
3421 &.OMP_DATA_O is passed as an argument to the child function,
3422 we need to replace it with the argument as seen by the child
3423 function.
3424
3425 In most cases, this will end up being the identity assignment
3426 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
3427 a function call that has been inlined, the original PARM_DECL
3428 .OMP_DATA_I may have been converted into a different local
3429 variable. In which case, we need to keep the assignment. */
3430 if (gimple_omp_taskreg_data_arg (entry_stmt))
3431 {
3432 basic_block entry_succ_bb = single_succ (entry_bb);
3433 gimple_stmt_iterator gsi;
3434 tree arg, narg;
3435 gimple parcopy_stmt = NULL;
3436
3437 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
3438 {
3439 gimple stmt;
3440
3441 gcc_assert (!gsi_end_p (gsi));
3442 stmt = gsi_stmt (gsi);
3443 if (gimple_code (stmt) != GIMPLE_ASSIGN)
3444 continue;
3445
3446 if (gimple_num_ops (stmt) == 2)
3447 {
3448 tree arg = gimple_assign_rhs1 (stmt);
3449
3450 /* We're ignore the subcode because we're
3451 effectively doing a STRIP_NOPS. */
3452
3453 if (TREE_CODE (arg) == ADDR_EXPR
3454 && TREE_OPERAND (arg, 0)
3455 == gimple_omp_taskreg_data_arg (entry_stmt))
3456 {
3457 parcopy_stmt = stmt;
3458 break;
3459 }
3460 }
3461 }
3462
3463 gcc_assert (parcopy_stmt != NULL);
3464 arg = DECL_ARGUMENTS (child_fn);
3465
3466 if (!gimple_in_ssa_p (cfun))
3467 {
3468 if (gimple_assign_lhs (parcopy_stmt) == arg)
3469 gsi_remove (&gsi, true);
3470 else
3471 {
3472 /* ?? Is setting the subcode really necessary ?? */
3473 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg));
3474 gimple_assign_set_rhs1 (parcopy_stmt, arg);
3475 }
3476 }
3477 else
3478 {
3479 /* If we are in ssa form, we must load the value from the default
3480 definition of the argument. That should not be defined now,
3481 since the argument is not used uninitialized. */
3482 gcc_assert (gimple_default_def (cfun, arg) == NULL);
3483 narg = make_ssa_name (arg, gimple_build_nop ());
3484 set_default_def (arg, narg);
3485 /* ?? Is setting the subcode really necessary ?? */
3486 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (narg));
3487 gimple_assign_set_rhs1 (parcopy_stmt, narg);
3488 update_stmt (parcopy_stmt);
3489 }
3490 }
3491
3492 /* Declare local variables needed in CHILD_CFUN. */
3493 block = DECL_INITIAL (child_fn);
3494 BLOCK_VARS (block) = vec2chain (child_cfun->local_decls);
3495 /* The gimplifier could record temporaries in parallel/task block
3496 rather than in containing function's local_decls chain,
3497 which would mean cgraph missed finalizing them. Do it now. */
3498 for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t))
3499 if (TREE_CODE (t) == VAR_DECL
3500 && TREE_STATIC (t)
3501 && !DECL_EXTERNAL (t))
3502 varpool_finalize_decl (t);
3503 DECL_SAVED_TREE (child_fn) = NULL;
3504 gimple_set_body (child_fn, bb_seq (single_succ (entry_bb)));
3505 TREE_USED (block) = 1;
3506
3507 /* Reset DECL_CONTEXT on function arguments. */
3508 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
3509 DECL_CONTEXT (t) = child_fn;
3510
3511 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
3512 so that it can be moved to the child function. */
3513 gsi = gsi_last_bb (entry_bb);
3514 stmt = gsi_stmt (gsi);
3515 gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
3516 || gimple_code (stmt) == GIMPLE_OMP_TASK));
3517 gsi_remove (&gsi, true);
3518 e = split_block (entry_bb, stmt);
3519 entry_bb = e->dest;
3520 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
3521
3522 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
3523 if (exit_bb)
3524 {
3525 gsi = gsi_last_bb (exit_bb);
3526 gcc_assert (!gsi_end_p (gsi)
3527 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3528 stmt = gimple_build_return (NULL);
3529 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
3530 gsi_remove (&gsi, true);
3531 }
3532
3533 /* Move the parallel region into CHILD_CFUN. */
3534
3535 if (gimple_in_ssa_p (cfun))
3536 {
3537 push_cfun (child_cfun);
3538 init_tree_ssa (child_cfun);
3539 init_ssa_operands ();
3540 cfun->gimple_df->in_ssa_p = true;
3541 pop_cfun ();
3542 block = NULL_TREE;
3543 }
3544 else
3545 block = gimple_block (entry_stmt);
3546
3547 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
3548 if (exit_bb)
3549 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
3550
3551 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
3552 num = VEC_length (tree, child_cfun->local_decls);
3553 for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
3554 {
3555 t = VEC_index (tree, child_cfun->local_decls, srcidx);
3556 if (DECL_CONTEXT (t) == cfun->decl)
3557 continue;
3558 if (srcidx != dstidx)
3559 VEC_replace (tree, child_cfun->local_decls, dstidx, t);
3560 dstidx++;
3561 }
3562 if (dstidx != num)
3563 VEC_truncate (tree, child_cfun->local_decls, dstidx);
3564
3565 /* Inform the callgraph about the new function. */
3566 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
3567 = cfun->curr_properties & ~PROP_loops;
3568 cgraph_add_new_function (child_fn, true);
3569
3570 /* Fix the callgraph edges for child_cfun. Those for cfun will be
3571 fixed in a following pass. */
3572 push_cfun (child_cfun);
3573 save_current = current_function_decl;
3574 current_function_decl = child_fn;
3575 if (optimize)
3576 optimize_omp_library_calls (entry_stmt);
3577 rebuild_cgraph_edges ();
3578
3579 /* Some EH regions might become dead, see PR34608. If
3580 pass_cleanup_cfg isn't the first pass to happen with the
3581 new child, these dead EH edges might cause problems.
3582 Clean them up now. */
3583 if (flag_exceptions)
3584 {
3585 basic_block bb;
3586 bool changed = false;
3587
3588 FOR_EACH_BB (bb)
3589 changed |= gimple_purge_dead_eh_edges (bb);
3590 if (changed)
3591 cleanup_tree_cfg ();
3592 }
3593 if (gimple_in_ssa_p (cfun))
3594 update_ssa (TODO_update_ssa);
3595 current_function_decl = save_current;
3596 pop_cfun ();
3597 }
3598
3599 /* Emit a library call to launch the children threads. */
3600 if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
3601 expand_parallel_call (region, new_bb, entry_stmt, ws_args);
3602 else
3603 expand_task_call (new_bb, entry_stmt);
3604 update_ssa (TODO_update_ssa_only_virtuals);
3605 }
3606
3607
3608 /* A subroutine of expand_omp_for. Generate code for a parallel
3609 loop with any schedule. Given parameters:
3610
3611 for (V = N1; V cond N2; V += STEP) BODY;
3612
3613 where COND is "<" or ">", we generate pseudocode
3614
3615 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
3616 if (more) goto L0; else goto L3;
3617 L0:
3618 V = istart0;
3619 iend = iend0;
3620 L1:
3621 BODY;
3622 V += STEP;
3623 if (V cond iend) goto L1; else goto L2;
3624 L2:
3625 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3626 L3:
3627
3628 If this is a combined omp parallel loop, instead of the call to
3629 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
3630
3631 For collapsed loops, given parameters:
3632 collapse(3)
3633 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
3634 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
3635 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
3636 BODY;
3637
3638 we generate pseudocode
3639
3640 if (cond3 is <)
3641 adj = STEP3 - 1;
3642 else
3643 adj = STEP3 + 1;
3644 count3 = (adj + N32 - N31) / STEP3;
3645 if (cond2 is <)
3646 adj = STEP2 - 1;
3647 else
3648 adj = STEP2 + 1;
3649 count2 = (adj + N22 - N21) / STEP2;
3650 if (cond1 is <)
3651 adj = STEP1 - 1;
3652 else
3653 adj = STEP1 + 1;
3654 count1 = (adj + N12 - N11) / STEP1;
3655 count = count1 * count2 * count3;
3656 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
3657 if (more) goto L0; else goto L3;
3658 L0:
3659 V = istart0;
3660 T = V;
3661 V3 = N31 + (T % count3) * STEP3;
3662 T = T / count3;
3663 V2 = N21 + (T % count2) * STEP2;
3664 T = T / count2;
3665 V1 = N11 + T * STEP1;
3666 iend = iend0;
3667 L1:
3668 BODY;
3669 V += 1;
3670 if (V < iend) goto L10; else goto L2;
3671 L10:
3672 V3 += STEP3;
3673 if (V3 cond3 N32) goto L1; else goto L11;
3674 L11:
3675 V3 = N31;
3676 V2 += STEP2;
3677 if (V2 cond2 N22) goto L1; else goto L12;
3678 L12:
3679 V2 = N21;
3680 V1 += STEP1;
3681 goto L1;
3682 L2:
3683 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3684 L3:
3685
3686 */
3687
3688 static void
3689 expand_omp_for_generic (struct omp_region *region,
3690 struct omp_for_data *fd,
3691 enum built_in_function start_fn,
3692 enum built_in_function next_fn)
3693 {
3694 tree type, istart0, iend0, iend;
3695 tree t, vmain, vback, bias = NULL_TREE;
3696 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb;
3697 basic_block l2_bb = NULL, l3_bb = NULL;
3698 gimple_stmt_iterator gsi;
3699 gimple stmt;
3700 bool in_combined_parallel = is_combined_parallel (region);
3701 bool broken_loop = region->cont == NULL;
3702 edge e, ne;
3703 tree *counts = NULL;
3704 int i;
3705
3706 gcc_assert (!broken_loop || !in_combined_parallel);
3707 gcc_assert (fd->iter_type == long_integer_type_node
3708 || !in_combined_parallel);
3709
3710 type = TREE_TYPE (fd->loop.v);
3711 istart0 = create_tmp_var (fd->iter_type, ".istart0");
3712 iend0 = create_tmp_var (fd->iter_type, ".iend0");
3713 TREE_ADDRESSABLE (istart0) = 1;
3714 TREE_ADDRESSABLE (iend0) = 1;
3715 if (gimple_in_ssa_p (cfun))
3716 {
3717 add_referenced_var (istart0);
3718 add_referenced_var (iend0);
3719 }
3720
3721 /* See if we need to bias by LLONG_MIN. */
3722 if (fd->iter_type == long_long_unsigned_type_node
3723 && TREE_CODE (type) == INTEGER_TYPE
3724 && !TYPE_UNSIGNED (type))
3725 {
3726 tree n1, n2;
3727
3728 if (fd->loop.cond_code == LT_EXPR)
3729 {
3730 n1 = fd->loop.n1;
3731 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
3732 }
3733 else
3734 {
3735 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
3736 n2 = fd->loop.n1;
3737 }
3738 if (TREE_CODE (n1) != INTEGER_CST
3739 || TREE_CODE (n2) != INTEGER_CST
3740 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
3741 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
3742 }
3743
3744 entry_bb = region->entry;
3745 cont_bb = region->cont;
3746 collapse_bb = NULL;
3747 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
3748 gcc_assert (broken_loop
3749 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
3750 l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
3751 l1_bb = single_succ (l0_bb);
3752 if (!broken_loop)
3753 {
3754 l2_bb = create_empty_bb (cont_bb);
3755 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb);
3756 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
3757 }
3758 else
3759 l2_bb = NULL;
3760 l3_bb = BRANCH_EDGE (entry_bb)->dest;
3761 exit_bb = region->exit;
3762
3763 gsi = gsi_last_bb (entry_bb);
3764
3765 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
3766 if (fd->collapse > 1)
3767 {
3768 /* collapsed loops need work for expansion in SSA form. */
3769 gcc_assert (!gimple_in_ssa_p (cfun));
3770 counts = (tree *) alloca (fd->collapse * sizeof (tree));
3771 for (i = 0; i < fd->collapse; i++)
3772 {
3773 tree itype = TREE_TYPE (fd->loops[i].v);
3774
3775 if (POINTER_TYPE_P (itype))
3776 itype = signed_type_for (itype);
3777 t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR
3778 ? -1 : 1));
3779 t = fold_build2 (PLUS_EXPR, itype,
3780 fold_convert (itype, fd->loops[i].step), t);
3781 t = fold_build2 (PLUS_EXPR, itype, t,
3782 fold_convert (itype, fd->loops[i].n2));
3783 t = fold_build2 (MINUS_EXPR, itype, t,
3784 fold_convert (itype, fd->loops[i].n1));
3785 if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
3786 t = fold_build2 (TRUNC_DIV_EXPR, itype,
3787 fold_build1 (NEGATE_EXPR, itype, t),
3788 fold_build1 (NEGATE_EXPR, itype,
3789 fold_convert (itype,
3790 fd->loops[i].step)));
3791 else
3792 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
3793 fold_convert (itype, fd->loops[i].step));
3794 t = fold_convert (type, t);
3795 if (TREE_CODE (t) == INTEGER_CST)
3796 counts[i] = t;
3797 else
3798 {
3799 counts[i] = create_tmp_var (type, ".count");
3800 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3801 true, GSI_SAME_STMT);
3802 stmt = gimple_build_assign (counts[i], t);
3803 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3804 }
3805 if (SSA_VAR_P (fd->loop.n2))
3806 {
3807 if (i == 0)
3808 t = counts[0];
3809 else
3810 {
3811 t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]);
3812 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3813 true, GSI_SAME_STMT);
3814 }
3815 stmt = gimple_build_assign (fd->loop.n2, t);
3816 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3817 }
3818 }
3819 }
3820 if (in_combined_parallel)
3821 {
3822 /* In a combined parallel loop, emit a call to
3823 GOMP_loop_foo_next. */
3824 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
3825 build_fold_addr_expr (istart0),
3826 build_fold_addr_expr (iend0));
3827 }
3828 else
3829 {
3830 tree t0, t1, t2, t3, t4;
3831 /* If this is not a combined parallel loop, emit a call to
3832 GOMP_loop_foo_start in ENTRY_BB. */
3833 t4 = build_fold_addr_expr (iend0);
3834 t3 = build_fold_addr_expr (istart0);
3835 t2 = fold_convert (fd->iter_type, fd->loop.step);
3836 if (POINTER_TYPE_P (type)
3837 && TYPE_PRECISION (type) != TYPE_PRECISION (fd->iter_type))
3838 {
3839 /* Avoid casting pointers to integer of a different size. */
3840 tree itype = signed_type_for (type);
3841 t1 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n2));
3842 t0 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n1));
3843 }
3844 else
3845 {
3846 t1 = fold_convert (fd->iter_type, fd->loop.n2);
3847 t0 = fold_convert (fd->iter_type, fd->loop.n1);
3848 }
3849 if (bias)
3850 {
3851 t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
3852 t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
3853 }
3854 if (fd->iter_type == long_integer_type_node)
3855 {
3856 if (fd->chunk_size)
3857 {
3858 t = fold_convert (fd->iter_type, fd->chunk_size);
3859 t = build_call_expr (builtin_decl_explicit (start_fn),
3860 6, t0, t1, t2, t, t3, t4);
3861 }
3862 else
3863 t = build_call_expr (builtin_decl_explicit (start_fn),
3864 5, t0, t1, t2, t3, t4);
3865 }
3866 else
3867 {
3868 tree t5;
3869 tree c_bool_type;
3870 tree bfn_decl;
3871
3872 /* The GOMP_loop_ull_*start functions have additional boolean
3873 argument, true for < loops and false for > loops.
3874 In Fortran, the C bool type can be different from
3875 boolean_type_node. */
3876 bfn_decl = builtin_decl_explicit (start_fn);
3877 c_bool_type = TREE_TYPE (TREE_TYPE (bfn_decl));
3878 t5 = build_int_cst (c_bool_type,
3879 fd->loop.cond_code == LT_EXPR ? 1 : 0);
3880 if (fd->chunk_size)
3881 {
3882 tree bfn_decl = builtin_decl_explicit (start_fn);
3883 t = fold_convert (fd->iter_type, fd->chunk_size);
3884 t = build_call_expr (bfn_decl, 7, t5, t0, t1, t2, t, t3, t4);
3885 }
3886 else
3887 t = build_call_expr (builtin_decl_explicit (start_fn),
3888 6, t5, t0, t1, t2, t3, t4);
3889 }
3890 }
3891 if (TREE_TYPE (t) != boolean_type_node)
3892 t = fold_build2 (NE_EXPR, boolean_type_node,
3893 t, build_int_cst (TREE_TYPE (t), 0));
3894 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3895 true, GSI_SAME_STMT);
3896 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
3897
3898 /* Remove the GIMPLE_OMP_FOR statement. */
3899 gsi_remove (&gsi, true);
3900
3901 /* Iteration setup for sequential loop goes in L0_BB. */
3902 gsi = gsi_start_bb (l0_bb);
3903 t = istart0;
3904 if (bias)
3905 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
3906 if (POINTER_TYPE_P (type))
3907 t = fold_convert (signed_type_for (type), t);
3908 t = fold_convert (type, t);
3909 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3910 false, GSI_CONTINUE_LINKING);
3911 stmt = gimple_build_assign (fd->loop.v, t);
3912 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3913
3914 t = iend0;
3915 if (bias)
3916 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
3917 if (POINTER_TYPE_P (type))
3918 t = fold_convert (signed_type_for (type), t);
3919 t = fold_convert (type, t);
3920 iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3921 false, GSI_CONTINUE_LINKING);
3922 if (fd->collapse > 1)
3923 {
3924 tree tem = create_tmp_var (type, ".tem");
3925
3926 stmt = gimple_build_assign (tem, fd->loop.v);
3927 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3928 for (i = fd->collapse - 1; i >= 0; i--)
3929 {
3930 tree vtype = TREE_TYPE (fd->loops[i].v), itype;
3931 itype = vtype;
3932 if (POINTER_TYPE_P (vtype))
3933 itype = signed_type_for (vtype);
3934 t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]);
3935 t = fold_convert (itype, t);
3936 t = fold_build2 (MULT_EXPR, itype, t,
3937 fold_convert (itype, fd->loops[i].step));
3938 if (POINTER_TYPE_P (vtype))
3939 t = fold_build_pointer_plus (fd->loops[i].n1, t);
3940 else
3941 t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t);
3942 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3943 false, GSI_CONTINUE_LINKING);
3944 stmt = gimple_build_assign (fd->loops[i].v, t);
3945 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3946 if (i != 0)
3947 {
3948 t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]);
3949 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3950 false, GSI_CONTINUE_LINKING);
3951 stmt = gimple_build_assign (tem, t);
3952 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3953 }
3954 }
3955 }
3956
3957 if (!broken_loop)
3958 {
3959 /* Code to control the increment and predicate for the sequential
3960 loop goes in the CONT_BB. */
3961 gsi = gsi_last_bb (cont_bb);
3962 stmt = gsi_stmt (gsi);
3963 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
3964 vmain = gimple_omp_continue_control_use (stmt);
3965 vback = gimple_omp_continue_control_def (stmt);
3966
3967 if (POINTER_TYPE_P (type))
3968 t = fold_build_pointer_plus (vmain, fd->loop.step);
3969 else
3970 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
3971 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3972 true, GSI_SAME_STMT);
3973 stmt = gimple_build_assign (vback, t);
3974 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3975
3976 t = build2 (fd->loop.cond_code, boolean_type_node, vback, iend);
3977 stmt = gimple_build_cond_empty (t);
3978 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3979
3980 /* Remove GIMPLE_OMP_CONTINUE. */
3981 gsi_remove (&gsi, true);
3982
3983 if (fd->collapse > 1)
3984 {
3985 basic_block last_bb, bb;
3986
3987 last_bb = cont_bb;
3988 for (i = fd->collapse - 1; i >= 0; i--)
3989 {
3990 tree vtype = TREE_TYPE (fd->loops[i].v);
3991
3992 bb = create_empty_bb (last_bb);
3993 gsi = gsi_start_bb (bb);
3994
3995 if (i < fd->collapse - 1)
3996 {
3997 e = make_edge (last_bb, bb, EDGE_FALSE_VALUE);
3998 e->probability = REG_BR_PROB_BASE / 8;
3999
4000 t = fd->loops[i + 1].n1;
4001 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4002 false, GSI_CONTINUE_LINKING);
4003 stmt = gimple_build_assign (fd->loops[i + 1].v, t);
4004 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4005 }
4006 else
4007 collapse_bb = bb;
4008
4009 set_immediate_dominator (CDI_DOMINATORS, bb, last_bb);
4010
4011 if (POINTER_TYPE_P (vtype))
4012 t = fold_build_pointer_plus (fd->loops[i].v, fd->loops[i].step);
4013 else
4014 t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v,
4015 fd->loops[i].step);
4016 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4017 false, GSI_CONTINUE_LINKING);
4018 stmt = gimple_build_assign (fd->loops[i].v, t);
4019 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4020
4021 if (i > 0)
4022 {
4023 t = fd->loops[i].n2;
4024 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4025 false, GSI_CONTINUE_LINKING);
4026 t = fold_build2 (fd->loops[i].cond_code, boolean_type_node,
4027 fd->loops[i].v, t);
4028 stmt = gimple_build_cond_empty (t);
4029 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4030 e = make_edge (bb, l1_bb, EDGE_TRUE_VALUE);
4031 e->probability = REG_BR_PROB_BASE * 7 / 8;
4032 }
4033 else
4034 make_edge (bb, l1_bb, EDGE_FALLTHRU);
4035 last_bb = bb;
4036 }
4037 }
4038
4039 /* Emit code to get the next parallel iteration in L2_BB. */
4040 gsi = gsi_start_bb (l2_bb);
4041
4042 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
4043 build_fold_addr_expr (istart0),
4044 build_fold_addr_expr (iend0));
4045 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4046 false, GSI_CONTINUE_LINKING);
4047 if (TREE_TYPE (t) != boolean_type_node)
4048 t = fold_build2 (NE_EXPR, boolean_type_node,
4049 t, build_int_cst (TREE_TYPE (t), 0));
4050 stmt = gimple_build_cond_empty (t);
4051 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4052 }
4053
4054 /* Add the loop cleanup function. */
4055 gsi = gsi_last_bb (exit_bb);
4056 if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4057 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_NOWAIT);
4058 else
4059 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END);
4060 stmt = gimple_build_call (t, 0);
4061 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
4062 gsi_remove (&gsi, true);
4063
4064 /* Connect the new blocks. */
4065 find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
4066 find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
4067
4068 if (!broken_loop)
4069 {
4070 gimple_seq phis;
4071
4072 e = find_edge (cont_bb, l3_bb);
4073 ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
4074
4075 phis = phi_nodes (l3_bb);
4076 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
4077 {
4078 gimple phi = gsi_stmt (gsi);
4079 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
4080 PHI_ARG_DEF_FROM_EDGE (phi, e));
4081 }
4082 remove_edge (e);
4083
4084 make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
4085 if (fd->collapse > 1)
4086 {
4087 e = find_edge (cont_bb, l1_bb);
4088 remove_edge (e);
4089 e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
4090 }
4091 else
4092 {
4093 e = find_edge (cont_bb, l1_bb);
4094 e->flags = EDGE_TRUE_VALUE;
4095 }
4096 e->probability = REG_BR_PROB_BASE * 7 / 8;
4097 find_edge (cont_bb, l2_bb)->probability = REG_BR_PROB_BASE / 8;
4098 make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
4099
4100 set_immediate_dominator (CDI_DOMINATORS, l2_bb,
4101 recompute_dominator (CDI_DOMINATORS, l2_bb));
4102 set_immediate_dominator (CDI_DOMINATORS, l3_bb,
4103 recompute_dominator (CDI_DOMINATORS, l3_bb));
4104 set_immediate_dominator (CDI_DOMINATORS, l0_bb,
4105 recompute_dominator (CDI_DOMINATORS, l0_bb));
4106 set_immediate_dominator (CDI_DOMINATORS, l1_bb,
4107 recompute_dominator (CDI_DOMINATORS, l1_bb));
4108 }
4109 }
4110
4111
4112 /* A subroutine of expand_omp_for. Generate code for a parallel
4113 loop with static schedule and no specified chunk size. Given
4114 parameters:
4115
4116 for (V = N1; V cond N2; V += STEP) BODY;
4117
4118 where COND is "<" or ">", we generate pseudocode
4119
4120 if (cond is <)
4121 adj = STEP - 1;
4122 else
4123 adj = STEP + 1;
4124 if ((__typeof (V)) -1 > 0 && cond is >)
4125 n = -(adj + N2 - N1) / -STEP;
4126 else
4127 n = (adj + N2 - N1) / STEP;
4128 q = n / nthreads;
4129 tt = n % nthreads;
4130 if (threadid < tt) goto L3; else goto L4;
4131 L3:
4132 tt = 0;
4133 q = q + 1;
4134 L4:
4135 s0 = q * threadid + tt;
4136 e0 = s0 + q;
4137 V = s0 * STEP + N1;
4138 if (s0 >= e0) goto L2; else goto L0;
4139 L0:
4140 e = e0 * STEP + N1;
4141 L1:
4142 BODY;
4143 V += STEP;
4144 if (V cond e) goto L1;
4145 L2:
4146 */
4147
4148 static void
4149 expand_omp_for_static_nochunk (struct omp_region *region,
4150 struct omp_for_data *fd)
4151 {
4152 tree n, q, s0, e0, e, t, tt, nthreads, threadid;
4153 tree type, itype, vmain, vback;
4154 basic_block entry_bb, second_bb, third_bb, exit_bb, seq_start_bb;
4155 basic_block body_bb, cont_bb;
4156 basic_block fin_bb;
4157 gimple_stmt_iterator gsi;
4158 gimple stmt;
4159 edge ep;
4160
4161 itype = type = TREE_TYPE (fd->loop.v);
4162 if (POINTER_TYPE_P (type))
4163 itype = signed_type_for (type);
4164
4165 entry_bb = region->entry;
4166 cont_bb = region->cont;
4167 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
4168 gcc_assert (BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
4169 seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
4170 body_bb = single_succ (seq_start_bb);
4171 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4172 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4173 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4174 exit_bb = region->exit;
4175
4176 /* Iteration space partitioning goes in ENTRY_BB. */
4177 gsi = gsi_last_bb (entry_bb);
4178 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4179
4180 t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS), 0);
4181 t = fold_convert (itype, t);
4182 nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4183 true, GSI_SAME_STMT);
4184
4185 t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM), 0);
4186 t = fold_convert (itype, t);
4187 threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4188 true, GSI_SAME_STMT);
4189
4190 fd->loop.n1
4191 = force_gimple_operand_gsi (&gsi, fold_convert (type, fd->loop.n1),
4192 true, NULL_TREE, true, GSI_SAME_STMT);
4193 fd->loop.n2
4194 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.n2),
4195 true, NULL_TREE, true, GSI_SAME_STMT);
4196 fd->loop.step
4197 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.step),
4198 true, NULL_TREE, true, GSI_SAME_STMT);
4199
4200 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4201 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4202 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4203 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4204 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4205 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4206 fold_build1 (NEGATE_EXPR, itype, t),
4207 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4208 else
4209 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4210 t = fold_convert (itype, t);
4211 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4212
4213 q = create_tmp_var (itype, "q");
4214 t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads);
4215 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
4216 gsi_insert_before (&gsi, gimple_build_assign (q, t), GSI_SAME_STMT);
4217
4218 tt = create_tmp_var (itype, "tt");
4219 t = fold_build2 (TRUNC_MOD_EXPR, itype, n, nthreads);
4220 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
4221 gsi_insert_before (&gsi, gimple_build_assign (tt, t), GSI_SAME_STMT);
4222
4223 t = build2 (LT_EXPR, boolean_type_node, threadid, tt);
4224 stmt = gimple_build_cond_empty (t);
4225 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4226
4227 second_bb = split_block (entry_bb, stmt)->dest;
4228 gsi = gsi_last_bb (second_bb);
4229 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4230
4231 gsi_insert_before (&gsi, gimple_build_assign (tt, build_int_cst (itype, 0)),
4232 GSI_SAME_STMT);
4233 stmt = gimple_build_assign_with_ops (PLUS_EXPR, q, q,
4234 build_int_cst (itype, 1));
4235 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4236
4237 third_bb = split_block (second_bb, stmt)->dest;
4238 gsi = gsi_last_bb (third_bb);
4239 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4240
4241 t = build2 (MULT_EXPR, itype, q, threadid);
4242 t = build2 (PLUS_EXPR, itype, t, tt);
4243 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4244
4245 t = fold_build2 (PLUS_EXPR, itype, s0, q);
4246 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4247
4248 t = build2 (GE_EXPR, boolean_type_node, s0, e0);
4249 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4250
4251 /* Remove the GIMPLE_OMP_FOR statement. */
4252 gsi_remove (&gsi, true);
4253
4254 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4255 gsi = gsi_start_bb (seq_start_bb);
4256
4257 t = fold_convert (itype, s0);
4258 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4259 if (POINTER_TYPE_P (type))
4260 t = fold_build_pointer_plus (fd->loop.n1, t);
4261 else
4262 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4263 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4264 false, GSI_CONTINUE_LINKING);
4265 stmt = gimple_build_assign (fd->loop.v, t);
4266 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4267
4268 t = fold_convert (itype, e0);
4269 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4270 if (POINTER_TYPE_P (type))
4271 t = fold_build_pointer_plus (fd->loop.n1, t);
4272 else
4273 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4274 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4275 false, GSI_CONTINUE_LINKING);
4276
4277 /* The code controlling the sequential loop replaces the
4278 GIMPLE_OMP_CONTINUE. */
4279 gsi = gsi_last_bb (cont_bb);
4280 stmt = gsi_stmt (gsi);
4281 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4282 vmain = gimple_omp_continue_control_use (stmt);
4283 vback = gimple_omp_continue_control_def (stmt);
4284
4285 if (POINTER_TYPE_P (type))
4286 t = fold_build_pointer_plus (vmain, fd->loop.step);
4287 else
4288 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
4289 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4290 true, GSI_SAME_STMT);
4291 stmt = gimple_build_assign (vback, t);
4292 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4293
4294 t = build2 (fd->loop.cond_code, boolean_type_node, vback, e);
4295 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4296
4297 /* Remove the GIMPLE_OMP_CONTINUE statement. */
4298 gsi_remove (&gsi, true);
4299
4300 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4301 gsi = gsi_last_bb (exit_bb);
4302 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4303 force_gimple_operand_gsi (&gsi, build_omp_barrier (), false, NULL_TREE,
4304 false, GSI_SAME_STMT);
4305 gsi_remove (&gsi, true);
4306
4307 /* Connect all the blocks. */
4308 ep = make_edge (entry_bb, third_bb, EDGE_FALSE_VALUE);
4309 ep->probability = REG_BR_PROB_BASE / 4 * 3;
4310 ep = find_edge (entry_bb, second_bb);
4311 ep->flags = EDGE_TRUE_VALUE;
4312 ep->probability = REG_BR_PROB_BASE / 4;
4313 find_edge (third_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
4314 find_edge (third_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
4315
4316 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4317 find_edge (cont_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4318
4319 set_immediate_dominator (CDI_DOMINATORS, second_bb, entry_bb);
4320 set_immediate_dominator (CDI_DOMINATORS, third_bb, entry_bb);
4321 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, third_bb);
4322 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4323 recompute_dominator (CDI_DOMINATORS, body_bb));
4324 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4325 recompute_dominator (CDI_DOMINATORS, fin_bb));
4326 }
4327
4328
4329 /* A subroutine of expand_omp_for. Generate code for a parallel
4330 loop with static schedule and a specified chunk size. Given
4331 parameters:
4332
4333 for (V = N1; V cond N2; V += STEP) BODY;
4334
4335 where COND is "<" or ">", we generate pseudocode
4336
4337 if (cond is <)
4338 adj = STEP - 1;
4339 else
4340 adj = STEP + 1;
4341 if ((__typeof (V)) -1 > 0 && cond is >)
4342 n = -(adj + N2 - N1) / -STEP;
4343 else
4344 n = (adj + N2 - N1) / STEP;
4345 trip = 0;
4346 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
4347 here so that V is defined
4348 if the loop is not entered
4349 L0:
4350 s0 = (trip * nthreads + threadid) * CHUNK;
4351 e0 = min(s0 + CHUNK, n);
4352 if (s0 < n) goto L1; else goto L4;
4353 L1:
4354 V = s0 * STEP + N1;
4355 e = e0 * STEP + N1;
4356 L2:
4357 BODY;
4358 V += STEP;
4359 if (V cond e) goto L2; else goto L3;
4360 L3:
4361 trip += 1;
4362 goto L0;
4363 L4:
4364 */
4365
4366 static void
4367 expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
4368 {
4369 tree n, s0, e0, e, t;
4370 tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
4371 tree type, itype, v_main, v_back, v_extra;
4372 basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
4373 basic_block trip_update_bb, cont_bb, fin_bb;
4374 gimple_stmt_iterator si;
4375 gimple stmt;
4376 edge se;
4377
4378 itype = type = TREE_TYPE (fd->loop.v);
4379 if (POINTER_TYPE_P (type))
4380 itype = signed_type_for (type);
4381
4382 entry_bb = region->entry;
4383 se = split_block (entry_bb, last_stmt (entry_bb));
4384 entry_bb = se->src;
4385 iter_part_bb = se->dest;
4386 cont_bb = region->cont;
4387 gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
4388 gcc_assert (BRANCH_EDGE (iter_part_bb)->dest
4389 == FALLTHRU_EDGE (cont_bb)->dest);
4390 seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
4391 body_bb = single_succ (seq_start_bb);
4392 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4393 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4394 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4395 trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
4396 exit_bb = region->exit;
4397
4398 /* Trip and adjustment setup goes in ENTRY_BB. */
4399 si = gsi_last_bb (entry_bb);
4400 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_FOR);
4401
4402 t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS), 0);
4403 t = fold_convert (itype, t);
4404 nthreads = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4405 true, GSI_SAME_STMT);
4406
4407 t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM), 0);
4408 t = fold_convert (itype, t);
4409 threadid = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4410 true, GSI_SAME_STMT);
4411
4412 fd->loop.n1
4413 = force_gimple_operand_gsi (&si, fold_convert (type, fd->loop.n1),
4414 true, NULL_TREE, true, GSI_SAME_STMT);
4415 fd->loop.n2
4416 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.n2),
4417 true, NULL_TREE, true, GSI_SAME_STMT);
4418 fd->loop.step
4419 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.step),
4420 true, NULL_TREE, true, GSI_SAME_STMT);
4421 fd->chunk_size
4422 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->chunk_size),
4423 true, NULL_TREE, true, GSI_SAME_STMT);
4424
4425 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4426 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4427 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4428 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4429 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4430 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4431 fold_build1 (NEGATE_EXPR, itype, t),
4432 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4433 else
4434 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4435 t = fold_convert (itype, t);
4436 n = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4437 true, GSI_SAME_STMT);
4438
4439 trip_var = create_tmp_var (itype, ".trip");
4440 if (gimple_in_ssa_p (cfun))
4441 {
4442 add_referenced_var (trip_var);
4443 trip_init = make_ssa_name (trip_var, NULL);
4444 trip_main = make_ssa_name (trip_var, NULL);
4445 trip_back = make_ssa_name (trip_var, NULL);
4446 }
4447 else
4448 {
4449 trip_init = trip_var;
4450 trip_main = trip_var;
4451 trip_back = trip_var;
4452 }
4453
4454 stmt = gimple_build_assign (trip_init, build_int_cst (itype, 0));
4455 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4456
4457 t = fold_build2 (MULT_EXPR, itype, threadid, fd->chunk_size);
4458 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4459 if (POINTER_TYPE_P (type))
4460 t = fold_build_pointer_plus (fd->loop.n1, t);
4461 else
4462 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4463 v_extra = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4464 true, GSI_SAME_STMT);
4465
4466 /* Remove the GIMPLE_OMP_FOR. */
4467 gsi_remove (&si, true);
4468
4469 /* Iteration space partitioning goes in ITER_PART_BB. */
4470 si = gsi_last_bb (iter_part_bb);
4471
4472 t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads);
4473 t = fold_build2 (PLUS_EXPR, itype, t, threadid);
4474 t = fold_build2 (MULT_EXPR, itype, t, fd->chunk_size);
4475 s0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4476 false, GSI_CONTINUE_LINKING);
4477
4478 t = fold_build2 (PLUS_EXPR, itype, s0, fd->chunk_size);
4479 t = fold_build2 (MIN_EXPR, itype, t, n);
4480 e0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4481 false, GSI_CONTINUE_LINKING);
4482
4483 t = build2 (LT_EXPR, boolean_type_node, s0, n);
4484 gsi_insert_after (&si, gimple_build_cond_empty (t), GSI_CONTINUE_LINKING);
4485
4486 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4487 si = gsi_start_bb (seq_start_bb);
4488
4489 t = fold_convert (itype, s0);
4490 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4491 if (POINTER_TYPE_P (type))
4492 t = fold_build_pointer_plus (fd->loop.n1, t);
4493 else
4494 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4495 t = force_gimple_operand_gsi (&si, t, false, NULL_TREE,
4496 false, GSI_CONTINUE_LINKING);
4497 stmt = gimple_build_assign (fd->loop.v, t);
4498 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4499
4500 t = fold_convert (itype, e0);
4501 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4502 if (POINTER_TYPE_P (type))
4503 t = fold_build_pointer_plus (fd->loop.n1, t);
4504 else
4505 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4506 e = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4507 false, GSI_CONTINUE_LINKING);
4508
4509 /* The code controlling the sequential loop goes in CONT_BB,
4510 replacing the GIMPLE_OMP_CONTINUE. */
4511 si = gsi_last_bb (cont_bb);
4512 stmt = gsi_stmt (si);
4513 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4514 v_main = gimple_omp_continue_control_use (stmt);
4515 v_back = gimple_omp_continue_control_def (stmt);
4516
4517 if (POINTER_TYPE_P (type))
4518 t = fold_build_pointer_plus (v_main, fd->loop.step);
4519 else
4520 t = fold_build2 (PLUS_EXPR, type, v_main, fd->loop.step);
4521 stmt = gimple_build_assign (v_back, t);
4522 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4523
4524 t = build2 (fd->loop.cond_code, boolean_type_node, v_back, e);
4525 gsi_insert_before (&si, gimple_build_cond_empty (t), GSI_SAME_STMT);
4526
4527 /* Remove GIMPLE_OMP_CONTINUE. */
4528 gsi_remove (&si, true);
4529
4530 /* Trip update code goes into TRIP_UPDATE_BB. */
4531 si = gsi_start_bb (trip_update_bb);
4532
4533 t = build_int_cst (itype, 1);
4534 t = build2 (PLUS_EXPR, itype, trip_main, t);
4535 stmt = gimple_build_assign (trip_back, t);
4536 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4537
4538 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4539 si = gsi_last_bb (exit_bb);
4540 if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
4541 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4542 false, GSI_SAME_STMT);
4543 gsi_remove (&si, true);
4544
4545 /* Connect the new blocks. */
4546 find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
4547 find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4548
4549 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4550 find_edge (cont_bb, trip_update_bb)->flags = EDGE_FALSE_VALUE;
4551
4552 redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
4553
4554 if (gimple_in_ssa_p (cfun))
4555 {
4556 gimple_stmt_iterator psi;
4557 gimple phi;
4558 edge re, ene;
4559 edge_var_map_vector head;
4560 edge_var_map *vm;
4561 size_t i;
4562
4563 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
4564 remove arguments of the phi nodes in fin_bb. We need to create
4565 appropriate phi nodes in iter_part_bb instead. */
4566 se = single_pred_edge (fin_bb);
4567 re = single_succ_edge (trip_update_bb);
4568 head = redirect_edge_var_map_vector (re);
4569 ene = single_succ_edge (entry_bb);
4570
4571 psi = gsi_start_phis (fin_bb);
4572 for (i = 0; !gsi_end_p (psi) && VEC_iterate (edge_var_map, head, i, vm);
4573 gsi_next (&psi), ++i)
4574 {
4575 gimple nphi;
4576 source_location locus;
4577
4578 phi = gsi_stmt (psi);
4579 t = gimple_phi_result (phi);
4580 gcc_assert (t == redirect_edge_var_map_result (vm));
4581 nphi = create_phi_node (t, iter_part_bb);
4582 SSA_NAME_DEF_STMT (t) = nphi;
4583
4584 t = PHI_ARG_DEF_FROM_EDGE (phi, se);
4585 locus = gimple_phi_arg_location_from_edge (phi, se);
4586
4587 /* A special case -- fd->loop.v is not yet computed in
4588 iter_part_bb, we need to use v_extra instead. */
4589 if (t == fd->loop.v)
4590 t = v_extra;
4591 add_phi_arg (nphi, t, ene, locus);
4592 locus = redirect_edge_var_map_location (vm);
4593 add_phi_arg (nphi, redirect_edge_var_map_def (vm), re, locus);
4594 }
4595 gcc_assert (!gsi_end_p (psi) && i == VEC_length (edge_var_map, head));
4596 redirect_edge_var_map_clear (re);
4597 while (1)
4598 {
4599 psi = gsi_start_phis (fin_bb);
4600 if (gsi_end_p (psi))
4601 break;
4602 remove_phi_node (&psi, false);
4603 }
4604
4605 /* Make phi node for trip. */
4606 phi = create_phi_node (trip_main, iter_part_bb);
4607 SSA_NAME_DEF_STMT (trip_main) = phi;
4608 add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb),
4609 UNKNOWN_LOCATION);
4610 add_phi_arg (phi, trip_init, single_succ_edge (entry_bb),
4611 UNKNOWN_LOCATION);
4612 }
4613
4614 set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
4615 set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
4616 recompute_dominator (CDI_DOMINATORS, iter_part_bb));
4617 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4618 recompute_dominator (CDI_DOMINATORS, fin_bb));
4619 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
4620 recompute_dominator (CDI_DOMINATORS, seq_start_bb));
4621 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4622 recompute_dominator (CDI_DOMINATORS, body_bb));
4623 }
4624
4625
4626 /* Expand the OpenMP loop defined by REGION. */
4627
4628 static void
4629 expand_omp_for (struct omp_region *region)
4630 {
4631 struct omp_for_data fd;
4632 struct omp_for_data_loop *loops;
4633
4634 loops
4635 = (struct omp_for_data_loop *)
4636 alloca (gimple_omp_for_collapse (last_stmt (region->entry))
4637 * sizeof (struct omp_for_data_loop));
4638 extract_omp_for_data (last_stmt (region->entry), &fd, loops);
4639 region->sched_kind = fd.sched_kind;
4640
4641 gcc_assert (EDGE_COUNT (region->entry->succs) == 2);
4642 BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4643 FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4644 if (region->cont)
4645 {
4646 gcc_assert (EDGE_COUNT (region->cont->succs) == 2);
4647 BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4648 FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4649 }
4650
4651 if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
4652 && !fd.have_ordered
4653 && fd.collapse == 1
4654 && region->cont != NULL)
4655 {
4656 if (fd.chunk_size == NULL)
4657 expand_omp_for_static_nochunk (region, &fd);
4658 else
4659 expand_omp_for_static_chunk (region, &fd);
4660 }
4661 else
4662 {
4663 int fn_index, start_ix, next_ix;
4664
4665 if (fd.chunk_size == NULL
4666 && fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
4667 fd.chunk_size = integer_zero_node;
4668 gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
4669 fn_index = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
4670 ? 3 : fd.sched_kind;
4671 fn_index += fd.have_ordered * 4;
4672 start_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_START) + fn_index;
4673 next_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_NEXT) + fn_index;
4674 if (fd.iter_type == long_long_unsigned_type_node)
4675 {
4676 start_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_START
4677 - (int)BUILT_IN_GOMP_LOOP_STATIC_START);
4678 next_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
4679 - (int)BUILT_IN_GOMP_LOOP_STATIC_NEXT);
4680 }
4681 expand_omp_for_generic (region, &fd, (enum built_in_function) start_ix,
4682 (enum built_in_function) next_ix);
4683 }
4684
4685 update_ssa (TODO_update_ssa_only_virtuals);
4686 }
4687
4688
4689 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
4690
4691 v = GOMP_sections_start (n);
4692 L0:
4693 switch (v)
4694 {
4695 case 0:
4696 goto L2;
4697 case 1:
4698 section 1;
4699 goto L1;
4700 case 2:
4701 ...
4702 case n:
4703 ...
4704 default:
4705 abort ();
4706 }
4707 L1:
4708 v = GOMP_sections_next ();
4709 goto L0;
4710 L2:
4711 reduction;
4712
4713 If this is a combined parallel sections, replace the call to
4714 GOMP_sections_start with call to GOMP_sections_next. */
4715
4716 static void
4717 expand_omp_sections (struct omp_region *region)
4718 {
4719 tree t, u, vin = NULL, vmain, vnext, l2;
4720 VEC (tree,heap) *label_vec;
4721 unsigned len;
4722 basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
4723 gimple_stmt_iterator si, switch_si;
4724 gimple sections_stmt, stmt, cont;
4725 edge_iterator ei;
4726 edge e;
4727 struct omp_region *inner;
4728 unsigned i, casei;
4729 bool exit_reachable = region->cont != NULL;
4730
4731 gcc_assert (exit_reachable == (region->exit != NULL));
4732 entry_bb = region->entry;
4733 l0_bb = single_succ (entry_bb);
4734 l1_bb = region->cont;
4735 l2_bb = region->exit;
4736 if (exit_reachable)
4737 {
4738 if (single_pred_p (l2_bb) && single_pred (l2_bb) == l0_bb)
4739 l2 = gimple_block_label (l2_bb);
4740 else
4741 {
4742 /* This can happen if there are reductions. */
4743 len = EDGE_COUNT (l0_bb->succs);
4744 gcc_assert (len > 0);
4745 e = EDGE_SUCC (l0_bb, len - 1);
4746 si = gsi_last_bb (e->dest);
4747 l2 = NULL_TREE;
4748 if (gsi_end_p (si)
4749 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4750 l2 = gimple_block_label (e->dest);
4751 else
4752 FOR_EACH_EDGE (e, ei, l0_bb->succs)
4753 {
4754 si = gsi_last_bb (e->dest);
4755 if (gsi_end_p (si)
4756 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4757 {
4758 l2 = gimple_block_label (e->dest);
4759 break;
4760 }
4761 }
4762 }
4763 default_bb = create_empty_bb (l1_bb->prev_bb);
4764 }
4765 else
4766 {
4767 default_bb = create_empty_bb (l0_bb);
4768 l2 = gimple_block_label (default_bb);
4769 }
4770
4771 /* We will build a switch() with enough cases for all the
4772 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
4773 and a default case to abort if something goes wrong. */
4774 len = EDGE_COUNT (l0_bb->succs);
4775
4776 /* Use VEC_quick_push on label_vec throughout, since we know the size
4777 in advance. */
4778 label_vec = VEC_alloc (tree, heap, len);
4779
4780 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
4781 GIMPLE_OMP_SECTIONS statement. */
4782 si = gsi_last_bb (entry_bb);
4783 sections_stmt = gsi_stmt (si);
4784 gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS);
4785 vin = gimple_omp_sections_control (sections_stmt);
4786 if (!is_combined_parallel (region))
4787 {
4788 /* If we are not inside a combined parallel+sections region,
4789 call GOMP_sections_start. */
4790 t = build_int_cst (unsigned_type_node,
4791 exit_reachable ? len - 1 : len);
4792 u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_START);
4793 stmt = gimple_build_call (u, 1, t);
4794 }
4795 else
4796 {
4797 /* Otherwise, call GOMP_sections_next. */
4798 u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
4799 stmt = gimple_build_call (u, 0);
4800 }
4801 gimple_call_set_lhs (stmt, vin);
4802 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4803 gsi_remove (&si, true);
4804
4805 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
4806 L0_BB. */
4807 switch_si = gsi_last_bb (l0_bb);
4808 gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH);
4809 if (exit_reachable)
4810 {
4811 cont = last_stmt (l1_bb);
4812 gcc_assert (gimple_code (cont) == GIMPLE_OMP_CONTINUE);
4813 vmain = gimple_omp_continue_control_use (cont);
4814 vnext = gimple_omp_continue_control_def (cont);
4815 }
4816 else
4817 {
4818 vmain = vin;
4819 vnext = NULL_TREE;
4820 }
4821
4822 i = 0;
4823 if (exit_reachable)
4824 {
4825 t = build_case_label (build_int_cst (unsigned_type_node, 0), NULL, l2);
4826 VEC_quick_push (tree, label_vec, t);
4827 i++;
4828 }
4829
4830 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
4831 for (inner = region->inner, casei = 1;
4832 inner;
4833 inner = inner->next, i++, casei++)
4834 {
4835 basic_block s_entry_bb, s_exit_bb;
4836
4837 /* Skip optional reduction region. */
4838 if (inner->type == GIMPLE_OMP_ATOMIC_LOAD)
4839 {
4840 --i;
4841 --casei;
4842 continue;
4843 }
4844
4845 s_entry_bb = inner->entry;
4846 s_exit_bb = inner->exit;
4847
4848 t = gimple_block_label (s_entry_bb);
4849 u = build_int_cst (unsigned_type_node, casei);
4850 u = build_case_label (u, NULL, t);
4851 VEC_quick_push (tree, label_vec, u);
4852
4853 si = gsi_last_bb (s_entry_bb);
4854 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION);
4855 gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si)));
4856 gsi_remove (&si, true);
4857 single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
4858
4859 if (s_exit_bb == NULL)
4860 continue;
4861
4862 si = gsi_last_bb (s_exit_bb);
4863 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4864 gsi_remove (&si, true);
4865
4866 single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
4867 }
4868
4869 /* Error handling code goes in DEFAULT_BB. */
4870 t = gimple_block_label (default_bb);
4871 u = build_case_label (NULL, NULL, t);
4872 make_edge (l0_bb, default_bb, 0);
4873
4874 stmt = gimple_build_switch_vec (vmain, u, label_vec);
4875 gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT);
4876 gsi_remove (&switch_si, true);
4877 VEC_free (tree, heap, label_vec);
4878
4879 si = gsi_start_bb (default_bb);
4880 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP), 0);
4881 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4882
4883 if (exit_reachable)
4884 {
4885 tree bfn_decl;
4886
4887 /* Code to get the next section goes in L1_BB. */
4888 si = gsi_last_bb (l1_bb);
4889 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE);
4890
4891 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
4892 stmt = gimple_build_call (bfn_decl, 0);
4893 gimple_call_set_lhs (stmt, vnext);
4894 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4895 gsi_remove (&si, true);
4896
4897 single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
4898
4899 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
4900 si = gsi_last_bb (l2_bb);
4901 if (gimple_omp_return_nowait_p (gsi_stmt (si)))
4902 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_NOWAIT);
4903 else
4904 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END);
4905 stmt = gimple_build_call (t, 0);
4906 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4907 gsi_remove (&si, true);
4908 }
4909
4910 set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
4911 }
4912
4913
4914 /* Expand code for an OpenMP single directive. We've already expanded
4915 much of the code, here we simply place the GOMP_barrier call. */
4916
4917 static void
4918 expand_omp_single (struct omp_region *region)
4919 {
4920 basic_block entry_bb, exit_bb;
4921 gimple_stmt_iterator si;
4922 bool need_barrier = false;
4923
4924 entry_bb = region->entry;
4925 exit_bb = region->exit;
4926
4927 si = gsi_last_bb (entry_bb);
4928 /* The terminal barrier at the end of a GOMP_single_copy sequence cannot
4929 be removed. We need to ensure that the thread that entered the single
4930 does not exit before the data is copied out by the other threads. */
4931 if (find_omp_clause (gimple_omp_single_clauses (gsi_stmt (si)),
4932 OMP_CLAUSE_COPYPRIVATE))
4933 need_barrier = true;
4934 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE);
4935 gsi_remove (&si, true);
4936 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4937
4938 si = gsi_last_bb (exit_bb);
4939 if (!gimple_omp_return_nowait_p (gsi_stmt (si)) || need_barrier)
4940 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4941 false, GSI_SAME_STMT);
4942 gsi_remove (&si, true);
4943 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4944 }
4945
4946
4947 /* Generic expansion for OpenMP synchronization directives: master,
4948 ordered and critical. All we need to do here is remove the entry
4949 and exit markers for REGION. */
4950
4951 static void
4952 expand_omp_synch (struct omp_region *region)
4953 {
4954 basic_block entry_bb, exit_bb;
4955 gimple_stmt_iterator si;
4956
4957 entry_bb = region->entry;
4958 exit_bb = region->exit;
4959
4960 si = gsi_last_bb (entry_bb);
4961 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE
4962 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER
4963 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED
4964 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL);
4965 gsi_remove (&si, true);
4966 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4967
4968 if (exit_bb)
4969 {
4970 si = gsi_last_bb (exit_bb);
4971 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4972 gsi_remove (&si, true);
4973 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4974 }
4975 }
4976
4977 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
4978 operation as a normal volatile load. */
4979
4980 static bool
4981 expand_omp_atomic_load (basic_block load_bb, tree addr,
4982 tree loaded_val, int index)
4983 {
4984 enum built_in_function tmpbase;
4985 gimple_stmt_iterator gsi;
4986 basic_block store_bb;
4987 location_t loc;
4988 gimple stmt;
4989 tree decl, call, type, itype;
4990
4991 gsi = gsi_last_bb (load_bb);
4992 stmt = gsi_stmt (gsi);
4993 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
4994 loc = gimple_location (stmt);
4995
4996 /* ??? If the target does not implement atomic_load_optab[mode], and mode
4997 is smaller than word size, then expand_atomic_load assumes that the load
4998 is atomic. We could avoid the builtin entirely in this case. */
4999
5000 tmpbase = (enum built_in_function) (BUILT_IN_ATOMIC_LOAD_N + index + 1);
5001 decl = builtin_decl_explicit (tmpbase);
5002 if (decl == NULL_TREE)
5003 return false;
5004
5005 type = TREE_TYPE (loaded_val);
5006 itype = TREE_TYPE (TREE_TYPE (decl));
5007
5008 call = build_call_expr_loc (loc, decl, 2, addr,
5009 build_int_cst (NULL, MEMMODEL_RELAXED));
5010 if (!useless_type_conversion_p (type, itype))
5011 call = fold_build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
5012 call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
5013
5014 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
5015 gsi_remove (&gsi, true);
5016
5017 store_bb = single_succ (load_bb);
5018 gsi = gsi_last_bb (store_bb);
5019 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
5020 gsi_remove (&gsi, true);
5021
5022 if (gimple_in_ssa_p (cfun))
5023 update_ssa (TODO_update_ssa_no_phi);
5024
5025 return true;
5026 }
5027
5028 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
5029 operation as a normal volatile store. */
5030
5031 static bool
5032 expand_omp_atomic_store (basic_block load_bb, tree addr,
5033 tree loaded_val, tree stored_val, int index)
5034 {
5035 enum built_in_function tmpbase;
5036 gimple_stmt_iterator gsi;
5037 basic_block store_bb = single_succ (load_bb);
5038 location_t loc;
5039 gimple stmt;
5040 tree decl, call, type, itype;
5041 enum machine_mode imode;
5042 bool exchange;
5043
5044 gsi = gsi_last_bb (load_bb);
5045 stmt = gsi_stmt (gsi);
5046 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
5047
5048 /* If the load value is needed, then this isn't a store but an exchange. */
5049 exchange = gimple_omp_atomic_need_value_p (stmt);
5050
5051 gsi = gsi_last_bb (store_bb);
5052 stmt = gsi_stmt (gsi);
5053 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_STORE);
5054 loc = gimple_location (stmt);
5055
5056 /* ??? If the target does not implement atomic_store_optab[mode], and mode
5057 is smaller than word size, then expand_atomic_store assumes that the store
5058 is atomic. We could avoid the builtin entirely in this case. */
5059
5060 tmpbase = (exchange ? BUILT_IN_ATOMIC_EXCHANGE_N : BUILT_IN_ATOMIC_STORE_N);
5061 tmpbase = (enum built_in_function) ((int) tmpbase + index + 1);
5062 decl = builtin_decl_explicit (tmpbase);
5063 if (decl == NULL_TREE)
5064 return false;
5065
5066 type = TREE_TYPE (stored_val);
5067
5068 /* Dig out the type of the function's second argument. */
5069 itype = TREE_TYPE (decl);
5070 itype = TYPE_ARG_TYPES (itype);
5071 itype = TREE_CHAIN (itype);
5072 itype = TREE_VALUE (itype);
5073 imode = TYPE_MODE (itype);
5074
5075 if (exchange && !can_atomic_exchange_p (imode, true))
5076 return false;
5077
5078 if (!useless_type_conversion_p (itype, type))
5079 stored_val = fold_build1_loc (loc, VIEW_CONVERT_EXPR, itype, stored_val);
5080 call = build_call_expr_loc (loc, decl, 3, addr, stored_val,
5081 build_int_cst (NULL, MEMMODEL_RELAXED));
5082 if (exchange)
5083 {
5084 if (!useless_type_conversion_p (type, itype))
5085 call = build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
5086 call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
5087 }
5088
5089 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
5090 gsi_remove (&gsi, true);
5091
5092 /* Remove the GIMPLE_OMP_ATOMIC_LOAD that we verified above. */
5093 gsi = gsi_last_bb (load_bb);
5094 gsi_remove (&gsi, true);
5095
5096 if (gimple_in_ssa_p (cfun))
5097 update_ssa (TODO_update_ssa_no_phi);
5098
5099 return true;
5100 }
5101
5102 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
5103 operation as a __atomic_fetch_op builtin. INDEX is log2 of the
5104 size of the data type, and thus usable to find the index of the builtin
5105 decl. Returns false if the expression is not of the proper form. */
5106
5107 static bool
5108 expand_omp_atomic_fetch_op (basic_block load_bb,
5109 tree addr, tree loaded_val,
5110 tree stored_val, int index)
5111 {
5112 enum built_in_function oldbase, newbase, tmpbase;
5113 tree decl, itype, call;
5114 tree lhs, rhs;
5115 basic_block store_bb = single_succ (load_bb);
5116 gimple_stmt_iterator gsi;
5117 gimple stmt;
5118 location_t loc;
5119 enum tree_code code;
5120 bool need_old, need_new;
5121 enum machine_mode imode;
5122
5123 /* We expect to find the following sequences:
5124
5125 load_bb:
5126 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
5127
5128 store_bb:
5129 val = tmp OP something; (or: something OP tmp)
5130 GIMPLE_OMP_STORE (val)
5131
5132 ???FIXME: Allow a more flexible sequence.
5133 Perhaps use data flow to pick the statements.
5134
5135 */
5136
5137 gsi = gsi_after_labels (store_bb);
5138 stmt = gsi_stmt (gsi);
5139 loc = gimple_location (stmt);
5140 if (!is_gimple_assign (stmt))
5141 return false;
5142 gsi_next (&gsi);
5143 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE)
5144 return false;
5145 need_new = gimple_omp_atomic_need_value_p (gsi_stmt (gsi));
5146 need_old = gimple_omp_atomic_need_value_p (last_stmt (load_bb));
5147 gcc_checking_assert (!need_old || !need_new);
5148
5149 if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0))
5150 return false;
5151
5152 /* Check for one of the supported fetch-op operations. */
5153 code = gimple_assign_rhs_code (stmt);
5154 switch (code)
5155 {
5156 case PLUS_EXPR:
5157 case POINTER_PLUS_EXPR:
5158 oldbase = BUILT_IN_ATOMIC_FETCH_ADD_N;
5159 newbase = BUILT_IN_ATOMIC_ADD_FETCH_N;
5160 break;
5161 case MINUS_EXPR:
5162 oldbase = BUILT_IN_ATOMIC_FETCH_SUB_N;
5163 newbase = BUILT_IN_ATOMIC_SUB_FETCH_N;
5164 break;
5165 case BIT_AND_EXPR:
5166 oldbase = BUILT_IN_ATOMIC_FETCH_AND_N;
5167 newbase = BUILT_IN_ATOMIC_AND_FETCH_N;
5168 break;
5169 case BIT_IOR_EXPR:
5170 oldbase = BUILT_IN_ATOMIC_FETCH_OR_N;
5171 newbase = BUILT_IN_ATOMIC_OR_FETCH_N;
5172 break;
5173 case BIT_XOR_EXPR:
5174 oldbase = BUILT_IN_ATOMIC_FETCH_XOR_N;
5175 newbase = BUILT_IN_ATOMIC_XOR_FETCH_N;
5176 break;
5177 default:
5178 return false;
5179 }
5180
5181 /* Make sure the expression is of the proper form. */
5182 if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0))
5183 rhs = gimple_assign_rhs2 (stmt);
5184 else if (commutative_tree_code (gimple_assign_rhs_code (stmt))
5185 && operand_equal_p (gimple_assign_rhs2 (stmt), loaded_val, 0))
5186 rhs = gimple_assign_rhs1 (stmt);
5187 else
5188 return false;
5189
5190 tmpbase = ((enum built_in_function)
5191 ((need_new ? newbase : oldbase) + index + 1));
5192 decl = builtin_decl_explicit (tmpbase);
5193 if (decl == NULL_TREE)
5194 return false;
5195 itype = TREE_TYPE (TREE_TYPE (decl));
5196 imode = TYPE_MODE (itype);
5197
5198 /* We could test all of the various optabs involved, but the fact of the
5199 matter is that (with the exception of i486 vs i586 and xadd) all targets
5200 that support any atomic operaton optab also implements compare-and-swap.
5201 Let optabs.c take care of expanding any compare-and-swap loop. */
5202 if (!can_compare_and_swap_p (imode, true))
5203 return false;
5204
5205 gsi = gsi_last_bb (load_bb);
5206 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD);
5207
5208 /* OpenMP does not imply any barrier-like semantics on its atomic ops.
5209 It only requires that the operation happen atomically. Thus we can
5210 use the RELAXED memory model. */
5211 call = build_call_expr_loc (loc, decl, 3, addr,
5212 fold_convert_loc (loc, itype, rhs),
5213 build_int_cst (NULL, MEMMODEL_RELAXED));
5214
5215 if (need_old || need_new)
5216 {
5217 lhs = need_old ? loaded_val : stored_val;
5218 call = fold_convert_loc (loc, TREE_TYPE (lhs), call);
5219 call = build2_loc (loc, MODIFY_EXPR, void_type_node, lhs, call);
5220 }
5221 else
5222 call = fold_convert_loc (loc, void_type_node, call);
5223 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
5224 gsi_remove (&gsi, true);
5225
5226 gsi = gsi_last_bb (store_bb);
5227 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
5228 gsi_remove (&gsi, true);
5229 gsi = gsi_last_bb (store_bb);
5230 gsi_remove (&gsi, true);
5231
5232 if (gimple_in_ssa_p (cfun))
5233 update_ssa (TODO_update_ssa_no_phi);
5234
5235 return true;
5236 }
5237
5238 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5239
5240 oldval = *addr;
5241 repeat:
5242 newval = rhs; // with oldval replacing *addr in rhs
5243 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
5244 if (oldval != newval)
5245 goto repeat;
5246
5247 INDEX is log2 of the size of the data type, and thus usable to find the
5248 index of the builtin decl. */
5249
5250 static bool
5251 expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
5252 tree addr, tree loaded_val, tree stored_val,
5253 int index)
5254 {
5255 tree loadedi, storedi, initial, new_storedi, old_vali;
5256 tree type, itype, cmpxchg, iaddr;
5257 gimple_stmt_iterator si;
5258 basic_block loop_header = single_succ (load_bb);
5259 gimple phi, stmt;
5260 edge e;
5261 enum built_in_function fncode;
5262
5263 /* ??? We need a non-pointer interface to __atomic_compare_exchange in
5264 order to use the RELAXED memory model effectively. */
5265 fncode = (enum built_in_function)((int)BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_N
5266 + index + 1);
5267 cmpxchg = builtin_decl_explicit (fncode);
5268 if (cmpxchg == NULL_TREE)
5269 return false;
5270 type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5271 itype = TREE_TYPE (TREE_TYPE (cmpxchg));
5272
5273 if (!can_compare_and_swap_p (TYPE_MODE (itype), true))
5274 return false;
5275
5276 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
5277 si = gsi_last_bb (load_bb);
5278 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
5279
5280 /* For floating-point values, we'll need to view-convert them to integers
5281 so that we can perform the atomic compare and swap. Simplify the
5282 following code by always setting up the "i"ntegral variables. */
5283 if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
5284 {
5285 tree iaddr_val;
5286
5287 iaddr = create_tmp_var (build_pointer_type_for_mode (itype, ptr_mode,
5288 true), NULL);
5289 iaddr_val
5290 = force_gimple_operand_gsi (&si,
5291 fold_convert (TREE_TYPE (iaddr), addr),
5292 false, NULL_TREE, true, GSI_SAME_STMT);
5293 stmt = gimple_build_assign (iaddr, iaddr_val);
5294 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5295 loadedi = create_tmp_var (itype, NULL);
5296 if (gimple_in_ssa_p (cfun))
5297 {
5298 add_referenced_var (iaddr);
5299 add_referenced_var (loadedi);
5300 loadedi = make_ssa_name (loadedi, NULL);
5301 }
5302 }
5303 else
5304 {
5305 iaddr = addr;
5306 loadedi = loaded_val;
5307 }
5308
5309 initial
5310 = force_gimple_operand_gsi (&si,
5311 build2 (MEM_REF, TREE_TYPE (TREE_TYPE (iaddr)),
5312 iaddr,
5313 build_int_cst (TREE_TYPE (iaddr), 0)),
5314 true, NULL_TREE, true, GSI_SAME_STMT);
5315
5316 /* Move the value to the LOADEDI temporary. */
5317 if (gimple_in_ssa_p (cfun))
5318 {
5319 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header)));
5320 phi = create_phi_node (loadedi, loop_header);
5321 SSA_NAME_DEF_STMT (loadedi) = phi;
5322 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
5323 initial);
5324 }
5325 else
5326 gsi_insert_before (&si,
5327 gimple_build_assign (loadedi, initial),
5328 GSI_SAME_STMT);
5329 if (loadedi != loaded_val)
5330 {
5331 gimple_stmt_iterator gsi2;
5332 tree x;
5333
5334 x = build1 (VIEW_CONVERT_EXPR, type, loadedi);
5335 gsi2 = gsi_start_bb (loop_header);
5336 if (gimple_in_ssa_p (cfun))
5337 {
5338 gimple stmt;
5339 x = force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5340 true, GSI_SAME_STMT);
5341 stmt = gimple_build_assign (loaded_val, x);
5342 gsi_insert_before (&gsi2, stmt, GSI_SAME_STMT);
5343 }
5344 else
5345 {
5346 x = build2 (MODIFY_EXPR, TREE_TYPE (loaded_val), loaded_val, x);
5347 force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5348 true, GSI_SAME_STMT);
5349 }
5350 }
5351 gsi_remove (&si, true);
5352
5353 si = gsi_last_bb (store_bb);
5354 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
5355
5356 if (iaddr == addr)
5357 storedi = stored_val;
5358 else
5359 storedi =
5360 force_gimple_operand_gsi (&si,
5361 build1 (VIEW_CONVERT_EXPR, itype,
5362 stored_val), true, NULL_TREE, true,
5363 GSI_SAME_STMT);
5364
5365 /* Build the compare&swap statement. */
5366 new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
5367 new_storedi = force_gimple_operand_gsi (&si,
5368 fold_convert (TREE_TYPE (loadedi),
5369 new_storedi),
5370 true, NULL_TREE,
5371 true, GSI_SAME_STMT);
5372
5373 if (gimple_in_ssa_p (cfun))
5374 old_vali = loadedi;
5375 else
5376 {
5377 old_vali = create_tmp_var (TREE_TYPE (loadedi), NULL);
5378 if (gimple_in_ssa_p (cfun))
5379 add_referenced_var (old_vali);
5380 stmt = gimple_build_assign (old_vali, loadedi);
5381 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5382
5383 stmt = gimple_build_assign (loadedi, new_storedi);
5384 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5385 }
5386
5387 /* Note that we always perform the comparison as an integer, even for
5388 floating point. This allows the atomic operation to properly
5389 succeed even with NaNs and -0.0. */
5390 stmt = gimple_build_cond_empty
5391 (build2 (NE_EXPR, boolean_type_node,
5392 new_storedi, old_vali));
5393 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5394
5395 /* Update cfg. */
5396 e = single_succ_edge (store_bb);
5397 e->flags &= ~EDGE_FALLTHRU;
5398 e->flags |= EDGE_FALSE_VALUE;
5399
5400 e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
5401
5402 /* Copy the new value to loadedi (we already did that before the condition
5403 if we are not in SSA). */
5404 if (gimple_in_ssa_p (cfun))
5405 {
5406 phi = gimple_seq_first_stmt (phi_nodes (loop_header));
5407 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi);
5408 }
5409
5410 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
5411 gsi_remove (&si, true);
5412
5413 if (gimple_in_ssa_p (cfun))
5414 update_ssa (TODO_update_ssa_no_phi);
5415
5416 return true;
5417 }
5418
5419 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5420
5421 GOMP_atomic_start ();
5422 *addr = rhs;
5423 GOMP_atomic_end ();
5424
5425 The result is not globally atomic, but works so long as all parallel
5426 references are within #pragma omp atomic directives. According to
5427 responses received from omp@openmp.org, appears to be within spec.
5428 Which makes sense, since that's how several other compilers handle
5429 this situation as well.
5430 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
5431 expanding. STORED_VAL is the operand of the matching
5432 GIMPLE_OMP_ATOMIC_STORE.
5433
5434 We replace
5435 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
5436 loaded_val = *addr;
5437
5438 and replace
5439 GIMPLE_OMP_ATOMIC_STORE (stored_val) with
5440 *addr = stored_val;
5441 */
5442
5443 static bool
5444 expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
5445 tree addr, tree loaded_val, tree stored_val)
5446 {
5447 gimple_stmt_iterator si;
5448 gimple stmt;
5449 tree t;
5450
5451 si = gsi_last_bb (load_bb);
5452 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
5453
5454 t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START);
5455 t = build_call_expr (t, 0);
5456 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5457
5458 stmt = gimple_build_assign (loaded_val, build_simple_mem_ref (addr));
5459 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5460 gsi_remove (&si, true);
5461
5462 si = gsi_last_bb (store_bb);
5463 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
5464
5465 stmt = gimple_build_assign (build_simple_mem_ref (unshare_expr (addr)),
5466 stored_val);
5467 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5468
5469 t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END);
5470 t = build_call_expr (t, 0);
5471 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5472 gsi_remove (&si, true);
5473
5474 if (gimple_in_ssa_p (cfun))
5475 update_ssa (TODO_update_ssa_no_phi);
5476 return true;
5477 }
5478
5479 /* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
5480 using expand_omp_atomic_fetch_op. If it failed, we try to
5481 call expand_omp_atomic_pipeline, and if it fails too, the
5482 ultimate fallback is wrapping the operation in a mutex
5483 (expand_omp_atomic_mutex). REGION is the atomic region built
5484 by build_omp_regions_1(). */
5485
5486 static void
5487 expand_omp_atomic (struct omp_region *region)
5488 {
5489 basic_block load_bb = region->entry, store_bb = region->exit;
5490 gimple load = last_stmt (load_bb), store = last_stmt (store_bb);
5491 tree loaded_val = gimple_omp_atomic_load_lhs (load);
5492 tree addr = gimple_omp_atomic_load_rhs (load);
5493 tree stored_val = gimple_omp_atomic_store_val (store);
5494 tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5495 HOST_WIDE_INT index;
5496
5497 /* Make sure the type is one of the supported sizes. */
5498 index = tree_low_cst (TYPE_SIZE_UNIT (type), 1);
5499 index = exact_log2 (index);
5500 if (index >= 0 && index <= 4)
5501 {
5502 unsigned int align = TYPE_ALIGN_UNIT (type);
5503
5504 /* __sync builtins require strict data alignment. */
5505 if (exact_log2 (align) >= index)
5506 {
5507 /* Atomic load. */
5508 if (loaded_val == stored_val
5509 && (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
5510 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
5511 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
5512 && expand_omp_atomic_load (load_bb, addr, loaded_val, index))
5513 return;
5514
5515 /* Atomic store. */
5516 if ((GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
5517 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
5518 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
5519 && store_bb == single_succ (load_bb)
5520 && first_stmt (store_bb) == store
5521 && expand_omp_atomic_store (load_bb, addr, loaded_val,
5522 stored_val, index))
5523 return;
5524
5525 /* When possible, use specialized atomic update functions. */
5526 if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
5527 && store_bb == single_succ (load_bb)
5528 && expand_omp_atomic_fetch_op (load_bb, addr,
5529 loaded_val, stored_val, index))
5530 return;
5531
5532 /* If we don't have specialized __sync builtins, try and implement
5533 as a compare and swap loop. */
5534 if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
5535 loaded_val, stored_val, index))
5536 return;
5537 }
5538 }
5539
5540 /* The ultimate fallback is wrapping the operation in a mutex. */
5541 expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
5542 }
5543
5544
5545 /* Expand the parallel region tree rooted at REGION. Expansion
5546 proceeds in depth-first order. Innermost regions are expanded
5547 first. This way, parallel regions that require a new function to
5548 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
5549 internal dependencies in their body. */
5550
5551 static void
5552 expand_omp (struct omp_region *region)
5553 {
5554 while (region)
5555 {
5556 location_t saved_location;
5557
5558 /* First, determine whether this is a combined parallel+workshare
5559 region. */
5560 if (region->type == GIMPLE_OMP_PARALLEL)
5561 determine_parallel_type (region);
5562
5563 if (region->inner)
5564 expand_omp (region->inner);
5565
5566 saved_location = input_location;
5567 if (gimple_has_location (last_stmt (region->entry)))
5568 input_location = gimple_location (last_stmt (region->entry));
5569
5570 switch (region->type)
5571 {
5572 case GIMPLE_OMP_PARALLEL:
5573 case GIMPLE_OMP_TASK:
5574 expand_omp_taskreg (region);
5575 break;
5576
5577 case GIMPLE_OMP_FOR:
5578 expand_omp_for (region);
5579 break;
5580
5581 case GIMPLE_OMP_SECTIONS:
5582 expand_omp_sections (region);
5583 break;
5584
5585 case GIMPLE_OMP_SECTION:
5586 /* Individual omp sections are handled together with their
5587 parent GIMPLE_OMP_SECTIONS region. */
5588 break;
5589
5590 case GIMPLE_OMP_SINGLE:
5591 expand_omp_single (region);
5592 break;
5593
5594 case GIMPLE_OMP_MASTER:
5595 case GIMPLE_OMP_ORDERED:
5596 case GIMPLE_OMP_CRITICAL:
5597 expand_omp_synch (region);
5598 break;
5599
5600 case GIMPLE_OMP_ATOMIC_LOAD:
5601 expand_omp_atomic (region);
5602 break;
5603
5604 default:
5605 gcc_unreachable ();
5606 }
5607
5608 input_location = saved_location;
5609 region = region->next;
5610 }
5611 }
5612
5613
5614 /* Helper for build_omp_regions. Scan the dominator tree starting at
5615 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
5616 true, the function ends once a single tree is built (otherwise, whole
5617 forest of OMP constructs may be built). */
5618
5619 static void
5620 build_omp_regions_1 (basic_block bb, struct omp_region *parent,
5621 bool single_tree)
5622 {
5623 gimple_stmt_iterator gsi;
5624 gimple stmt;
5625 basic_block son;
5626
5627 gsi = gsi_last_bb (bb);
5628 if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi)))
5629 {
5630 struct omp_region *region;
5631 enum gimple_code code;
5632
5633 stmt = gsi_stmt (gsi);
5634 code = gimple_code (stmt);
5635 if (code == GIMPLE_OMP_RETURN)
5636 {
5637 /* STMT is the return point out of region PARENT. Mark it
5638 as the exit point and make PARENT the immediately
5639 enclosing region. */
5640 gcc_assert (parent);
5641 region = parent;
5642 region->exit = bb;
5643 parent = parent->outer;
5644 }
5645 else if (code == GIMPLE_OMP_ATOMIC_STORE)
5646 {
5647 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
5648 GIMPLE_OMP_RETURN, but matches with
5649 GIMPLE_OMP_ATOMIC_LOAD. */
5650 gcc_assert (parent);
5651 gcc_assert (parent->type == GIMPLE_OMP_ATOMIC_LOAD);
5652 region = parent;
5653 region->exit = bb;
5654 parent = parent->outer;
5655 }
5656
5657 else if (code == GIMPLE_OMP_CONTINUE)
5658 {
5659 gcc_assert (parent);
5660 parent->cont = bb;
5661 }
5662 else if (code == GIMPLE_OMP_SECTIONS_SWITCH)
5663 {
5664 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
5665 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
5666 ;
5667 }
5668 else
5669 {
5670 /* Otherwise, this directive becomes the parent for a new
5671 region. */
5672 region = new_omp_region (bb, code, parent);
5673 parent = region;
5674 }
5675 }
5676
5677 if (single_tree && !parent)
5678 return;
5679
5680 for (son = first_dom_son (CDI_DOMINATORS, bb);
5681 son;
5682 son = next_dom_son (CDI_DOMINATORS, son))
5683 build_omp_regions_1 (son, parent, single_tree);
5684 }
5685
5686 /* Builds the tree of OMP regions rooted at ROOT, storing it to
5687 root_omp_region. */
5688
5689 static void
5690 build_omp_regions_root (basic_block root)
5691 {
5692 gcc_assert (root_omp_region == NULL);
5693 build_omp_regions_1 (root, NULL, true);
5694 gcc_assert (root_omp_region != NULL);
5695 }
5696
5697 /* Expands omp construct (and its subconstructs) starting in HEAD. */
5698
5699 void
5700 omp_expand_local (basic_block head)
5701 {
5702 build_omp_regions_root (head);
5703 if (dump_file && (dump_flags & TDF_DETAILS))
5704 {
5705 fprintf (dump_file, "\nOMP region tree\n\n");
5706 dump_omp_region (dump_file, root_omp_region, 0);
5707 fprintf (dump_file, "\n");
5708 }
5709
5710 remove_exit_barriers (root_omp_region);
5711 expand_omp (root_omp_region);
5712
5713 free_omp_regions ();
5714 }
5715
5716 /* Scan the CFG and build a tree of OMP regions. Return the root of
5717 the OMP region tree. */
5718
5719 static void
5720 build_omp_regions (void)
5721 {
5722 gcc_assert (root_omp_region == NULL);
5723 calculate_dominance_info (CDI_DOMINATORS);
5724 build_omp_regions_1 (ENTRY_BLOCK_PTR, NULL, false);
5725 }
5726
5727 /* Main entry point for expanding OMP-GIMPLE into runtime calls. */
5728
5729 static unsigned int
5730 execute_expand_omp (void)
5731 {
5732 build_omp_regions ();
5733
5734 if (!root_omp_region)
5735 return 0;
5736
5737 if (dump_file)
5738 {
5739 fprintf (dump_file, "\nOMP region tree\n\n");
5740 dump_omp_region (dump_file, root_omp_region, 0);
5741 fprintf (dump_file, "\n");
5742 }
5743
5744 remove_exit_barriers (root_omp_region);
5745
5746 expand_omp (root_omp_region);
5747
5748 cleanup_tree_cfg ();
5749
5750 free_omp_regions ();
5751
5752 return 0;
5753 }
5754
5755 /* OMP expansion -- the default pass, run before creation of SSA form. */
5756
5757 static bool
5758 gate_expand_omp (void)
5759 {
5760 return (flag_openmp != 0 && !seen_error ());
5761 }
5762
5763 struct gimple_opt_pass pass_expand_omp =
5764 {
5765 {
5766 GIMPLE_PASS,
5767 "ompexp", /* name */
5768 gate_expand_omp, /* gate */
5769 execute_expand_omp, /* execute */
5770 NULL, /* sub */
5771 NULL, /* next */
5772 0, /* static_pass_number */
5773 TV_NONE, /* tv_id */
5774 PROP_gimple_any, /* properties_required */
5775 0, /* properties_provided */
5776 0, /* properties_destroyed */
5777 0, /* todo_flags_start */
5778 0 /* todo_flags_finish */
5779 }
5780 };
5781 \f
5782 /* Routines to lower OpenMP directives into OMP-GIMPLE. */
5783
5784 /* Lower the OpenMP sections directive in the current statement in GSI_P.
5785 CTX is the enclosing OMP context for the current statement. */
5786
5787 static void
5788 lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5789 {
5790 tree block, control;
5791 gimple_stmt_iterator tgsi;
5792 unsigned i, len;
5793 gimple stmt, new_stmt, bind, t;
5794 gimple_seq ilist, dlist, olist, new_body, body;
5795 struct gimplify_ctx gctx;
5796
5797 stmt = gsi_stmt (*gsi_p);
5798
5799 push_gimplify_context (&gctx);
5800
5801 dlist = NULL;
5802 ilist = NULL;
5803 lower_rec_input_clauses (gimple_omp_sections_clauses (stmt),
5804 &ilist, &dlist, ctx);
5805
5806 tgsi = gsi_start (gimple_omp_body (stmt));
5807 for (len = 0; !gsi_end_p (tgsi); len++, gsi_next (&tgsi))
5808 continue;
5809
5810 tgsi = gsi_start (gimple_omp_body (stmt));
5811 body = NULL;
5812 for (i = 0; i < len; i++, gsi_next (&tgsi))
5813 {
5814 omp_context *sctx;
5815 gimple sec_start;
5816
5817 sec_start = gsi_stmt (tgsi);
5818 sctx = maybe_lookup_ctx (sec_start);
5819 gcc_assert (sctx);
5820
5821 gimple_seq_add_stmt (&body, sec_start);
5822
5823 lower_omp (gimple_omp_body (sec_start), sctx);
5824 gimple_seq_add_seq (&body, gimple_omp_body (sec_start));
5825 gimple_omp_set_body (sec_start, NULL);
5826
5827 if (i == len - 1)
5828 {
5829 gimple_seq l = NULL;
5830 lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt), NULL,
5831 &l, ctx);
5832 gimple_seq_add_seq (&body, l);
5833 gimple_omp_section_set_last (sec_start);
5834 }
5835
5836 gimple_seq_add_stmt (&body, gimple_build_omp_return (false));
5837 }
5838
5839 block = make_node (BLOCK);
5840 bind = gimple_build_bind (NULL, body, block);
5841
5842 olist = NULL;
5843 lower_reduction_clauses (gimple_omp_sections_clauses (stmt), &olist, ctx);
5844
5845 block = make_node (BLOCK);
5846 new_stmt = gimple_build_bind (NULL, NULL, block);
5847
5848 pop_gimplify_context (new_stmt);
5849 gimple_bind_append_vars (new_stmt, ctx->block_vars);
5850 BLOCK_VARS (block) = gimple_bind_vars (bind);
5851 if (BLOCK_VARS (block))
5852 TREE_USED (block) = 1;
5853
5854 new_body = NULL;
5855 gimple_seq_add_seq (&new_body, ilist);
5856 gimple_seq_add_stmt (&new_body, stmt);
5857 gimple_seq_add_stmt (&new_body, gimple_build_omp_sections_switch ());
5858 gimple_seq_add_stmt (&new_body, bind);
5859
5860 control = create_tmp_var (unsigned_type_node, ".section");
5861 t = gimple_build_omp_continue (control, control);
5862 gimple_omp_sections_set_control (stmt, control);
5863 gimple_seq_add_stmt (&new_body, t);
5864
5865 gimple_seq_add_seq (&new_body, olist);
5866 gimple_seq_add_seq (&new_body, dlist);
5867
5868 new_body = maybe_catch_exception (new_body);
5869
5870 t = gimple_build_omp_return
5871 (!!find_omp_clause (gimple_omp_sections_clauses (stmt),
5872 OMP_CLAUSE_NOWAIT));
5873 gimple_seq_add_stmt (&new_body, t);
5874
5875 gimple_bind_set_body (new_stmt, new_body);
5876 gimple_omp_set_body (stmt, NULL);
5877
5878 gsi_replace (gsi_p, new_stmt, true);
5879 }
5880
5881
5882 /* A subroutine of lower_omp_single. Expand the simple form of
5883 a GIMPLE_OMP_SINGLE, without a copyprivate clause:
5884
5885 if (GOMP_single_start ())
5886 BODY;
5887 [ GOMP_barrier (); ] -> unless 'nowait' is present.
5888
5889 FIXME. It may be better to delay expanding the logic of this until
5890 pass_expand_omp. The expanded logic may make the job more difficult
5891 to a synchronization analysis pass. */
5892
5893 static void
5894 lower_omp_single_simple (gimple single_stmt, gimple_seq *pre_p)
5895 {
5896 location_t loc = gimple_location (single_stmt);
5897 tree tlabel = create_artificial_label (loc);
5898 tree flabel = create_artificial_label (loc);
5899 gimple call, cond;
5900 tree lhs, decl;
5901
5902 decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_START);
5903 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (decl)), NULL);
5904 call = gimple_build_call (decl, 0);
5905 gimple_call_set_lhs (call, lhs);
5906 gimple_seq_add_stmt (pre_p, call);
5907
5908 cond = gimple_build_cond (EQ_EXPR, lhs,
5909 fold_convert_loc (loc, TREE_TYPE (lhs),
5910 boolean_true_node),
5911 tlabel, flabel);
5912 gimple_seq_add_stmt (pre_p, cond);
5913 gimple_seq_add_stmt (pre_p, gimple_build_label (tlabel));
5914 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5915 gimple_seq_add_stmt (pre_p, gimple_build_label (flabel));
5916 }
5917
5918
5919 /* A subroutine of lower_omp_single. Expand the simple form of
5920 a GIMPLE_OMP_SINGLE, with a copyprivate clause:
5921
5922 #pragma omp single copyprivate (a, b, c)
5923
5924 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
5925
5926 {
5927 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
5928 {
5929 BODY;
5930 copyout.a = a;
5931 copyout.b = b;
5932 copyout.c = c;
5933 GOMP_single_copy_end (&copyout);
5934 }
5935 else
5936 {
5937 a = copyout_p->a;
5938 b = copyout_p->b;
5939 c = copyout_p->c;
5940 }
5941 GOMP_barrier ();
5942 }
5943
5944 FIXME. It may be better to delay expanding the logic of this until
5945 pass_expand_omp. The expanded logic may make the job more difficult
5946 to a synchronization analysis pass. */
5947
5948 static void
5949 lower_omp_single_copy (gimple single_stmt, gimple_seq *pre_p, omp_context *ctx)
5950 {
5951 tree ptr_type, t, l0, l1, l2, bfn_decl;
5952 gimple_seq copyin_seq;
5953 location_t loc = gimple_location (single_stmt);
5954
5955 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
5956
5957 ptr_type = build_pointer_type (ctx->record_type);
5958 ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
5959
5960 l0 = create_artificial_label (loc);
5961 l1 = create_artificial_label (loc);
5962 l2 = create_artificial_label (loc);
5963
5964 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_START);
5965 t = build_call_expr_loc (loc, bfn_decl, 0);
5966 t = fold_convert_loc (loc, ptr_type, t);
5967 gimplify_assign (ctx->receiver_decl, t, pre_p);
5968
5969 t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
5970 build_int_cst (ptr_type, 0));
5971 t = build3 (COND_EXPR, void_type_node, t,
5972 build_and_jump (&l0), build_and_jump (&l1));
5973 gimplify_and_add (t, pre_p);
5974
5975 gimple_seq_add_stmt (pre_p, gimple_build_label (l0));
5976
5977 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5978
5979 copyin_seq = NULL;
5980 lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt), pre_p,
5981 &copyin_seq, ctx);
5982
5983 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
5984 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_END);
5985 t = build_call_expr_loc (loc, bfn_decl, 1, t);
5986 gimplify_and_add (t, pre_p);
5987
5988 t = build_and_jump (&l2);
5989 gimplify_and_add (t, pre_p);
5990
5991 gimple_seq_add_stmt (pre_p, gimple_build_label (l1));
5992
5993 gimple_seq_add_seq (pre_p, copyin_seq);
5994
5995 gimple_seq_add_stmt (pre_p, gimple_build_label (l2));
5996 }
5997
5998
5999 /* Expand code for an OpenMP single directive. */
6000
6001 static void
6002 lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6003 {
6004 tree block;
6005 gimple t, bind, single_stmt = gsi_stmt (*gsi_p);
6006 gimple_seq bind_body, dlist;
6007 struct gimplify_ctx gctx;
6008
6009 push_gimplify_context (&gctx);
6010
6011 bind_body = NULL;
6012 lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt),
6013 &bind_body, &dlist, ctx);
6014 lower_omp (gimple_omp_body (single_stmt), ctx);
6015
6016 gimple_seq_add_stmt (&bind_body, single_stmt);
6017
6018 if (ctx->record_type)
6019 lower_omp_single_copy (single_stmt, &bind_body, ctx);
6020 else
6021 lower_omp_single_simple (single_stmt, &bind_body);
6022
6023 gimple_omp_set_body (single_stmt, NULL);
6024
6025 gimple_seq_add_seq (&bind_body, dlist);
6026
6027 bind_body = maybe_catch_exception (bind_body);
6028
6029 t = gimple_build_omp_return
6030 (!!find_omp_clause (gimple_omp_single_clauses (single_stmt),
6031 OMP_CLAUSE_NOWAIT));
6032 gimple_seq_add_stmt (&bind_body, t);
6033
6034 block = make_node (BLOCK);
6035 bind = gimple_build_bind (NULL, bind_body, block);
6036
6037 pop_gimplify_context (bind);
6038
6039 gimple_bind_append_vars (bind, ctx->block_vars);
6040 BLOCK_VARS (block) = ctx->block_vars;
6041 gsi_replace (gsi_p, bind, true);
6042 if (BLOCK_VARS (block))
6043 TREE_USED (block) = 1;
6044 }
6045
6046
6047 /* Expand code for an OpenMP master directive. */
6048
6049 static void
6050 lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6051 {
6052 tree block, lab = NULL, x, bfn_decl;
6053 gimple stmt = gsi_stmt (*gsi_p), bind;
6054 location_t loc = gimple_location (stmt);
6055 gimple_seq tseq;
6056 struct gimplify_ctx gctx;
6057
6058 push_gimplify_context (&gctx);
6059
6060 block = make_node (BLOCK);
6061 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
6062 block);
6063
6064 bfn_decl = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
6065 x = build_call_expr_loc (loc, bfn_decl, 0);
6066 x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
6067 x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
6068 tseq = NULL;
6069 gimplify_and_add (x, &tseq);
6070 gimple_bind_add_seq (bind, tseq);
6071
6072 lower_omp (gimple_omp_body (stmt), ctx);
6073 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
6074 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
6075 gimple_omp_set_body (stmt, NULL);
6076
6077 gimple_bind_add_stmt (bind, gimple_build_label (lab));
6078
6079 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
6080
6081 pop_gimplify_context (bind);
6082
6083 gimple_bind_append_vars (bind, ctx->block_vars);
6084 BLOCK_VARS (block) = ctx->block_vars;
6085 gsi_replace (gsi_p, bind, true);
6086 }
6087
6088
6089 /* Expand code for an OpenMP ordered directive. */
6090
6091 static void
6092 lower_omp_ordered (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6093 {
6094 tree block;
6095 gimple stmt = gsi_stmt (*gsi_p), bind, x;
6096 struct gimplify_ctx gctx;
6097
6098 push_gimplify_context (&gctx);
6099
6100 block = make_node (BLOCK);
6101 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
6102 block);
6103
6104 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_START),
6105 0);
6106 gimple_bind_add_stmt (bind, x);
6107
6108 lower_omp (gimple_omp_body (stmt), ctx);
6109 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
6110 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
6111 gimple_omp_set_body (stmt, NULL);
6112
6113 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_END), 0);
6114 gimple_bind_add_stmt (bind, x);
6115
6116 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
6117
6118 pop_gimplify_context (bind);
6119
6120 gimple_bind_append_vars (bind, ctx->block_vars);
6121 BLOCK_VARS (block) = gimple_bind_vars (bind);
6122 gsi_replace (gsi_p, bind, true);
6123 }
6124
6125
6126 /* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
6127 substitution of a couple of function calls. But in the NAMED case,
6128 requires that languages coordinate a symbol name. It is therefore
6129 best put here in common code. */
6130
6131 static GTY((param1_is (tree), param2_is (tree)))
6132 splay_tree critical_name_mutexes;
6133
6134 static void
6135 lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6136 {
6137 tree block;
6138 tree name, lock, unlock;
6139 gimple stmt = gsi_stmt (*gsi_p), bind;
6140 location_t loc = gimple_location (stmt);
6141 gimple_seq tbody;
6142 struct gimplify_ctx gctx;
6143
6144 name = gimple_omp_critical_name (stmt);
6145 if (name)
6146 {
6147 tree decl;
6148 splay_tree_node n;
6149
6150 if (!critical_name_mutexes)
6151 critical_name_mutexes
6152 = splay_tree_new_ggc (splay_tree_compare_pointers,
6153 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_s,
6154 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_node_s);
6155
6156 n = splay_tree_lookup (critical_name_mutexes, (splay_tree_key) name);
6157 if (n == NULL)
6158 {
6159 char *new_str;
6160
6161 decl = create_tmp_var_raw (ptr_type_node, NULL);
6162
6163 new_str = ACONCAT ((".gomp_critical_user_",
6164 IDENTIFIER_POINTER (name), NULL));
6165 DECL_NAME (decl) = get_identifier (new_str);
6166 TREE_PUBLIC (decl) = 1;
6167 TREE_STATIC (decl) = 1;
6168 DECL_COMMON (decl) = 1;
6169 DECL_ARTIFICIAL (decl) = 1;
6170 DECL_IGNORED_P (decl) = 1;
6171 varpool_finalize_decl (decl);
6172
6173 splay_tree_insert (critical_name_mutexes, (splay_tree_key) name,
6174 (splay_tree_value) decl);
6175 }
6176 else
6177 decl = (tree) n->value;
6178
6179 lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_START);
6180 lock = build_call_expr_loc (loc, lock, 1, build_fold_addr_expr_loc (loc, decl));
6181
6182 unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_END);
6183 unlock = build_call_expr_loc (loc, unlock, 1,
6184 build_fold_addr_expr_loc (loc, decl));
6185 }
6186 else
6187 {
6188 lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_START);
6189 lock = build_call_expr_loc (loc, lock, 0);
6190
6191 unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_END);
6192 unlock = build_call_expr_loc (loc, unlock, 0);
6193 }
6194
6195 push_gimplify_context (&gctx);
6196
6197 block = make_node (BLOCK);
6198 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt), block);
6199
6200 tbody = gimple_bind_body (bind);
6201 gimplify_and_add (lock, &tbody);
6202 gimple_bind_set_body (bind, tbody);
6203
6204 lower_omp (gimple_omp_body (stmt), ctx);
6205 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
6206 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
6207 gimple_omp_set_body (stmt, NULL);
6208
6209 tbody = gimple_bind_body (bind);
6210 gimplify_and_add (unlock, &tbody);
6211 gimple_bind_set_body (bind, tbody);
6212
6213 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
6214
6215 pop_gimplify_context (bind);
6216 gimple_bind_append_vars (bind, ctx->block_vars);
6217 BLOCK_VARS (block) = gimple_bind_vars (bind);
6218 gsi_replace (gsi_p, bind, true);
6219 }
6220
6221
6222 /* A subroutine of lower_omp_for. Generate code to emit the predicate
6223 for a lastprivate clause. Given a loop control predicate of (V
6224 cond N2), we gate the clause on (!(V cond N2)). The lowered form
6225 is appended to *DLIST, iterator initialization is appended to
6226 *BODY_P. */
6227
6228 static void
6229 lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p,
6230 gimple_seq *dlist, struct omp_context *ctx)
6231 {
6232 tree clauses, cond, vinit;
6233 enum tree_code cond_code;
6234 gimple_seq stmts;
6235
6236 cond_code = fd->loop.cond_code;
6237 cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
6238
6239 /* When possible, use a strict equality expression. This can let VRP
6240 type optimizations deduce the value and remove a copy. */
6241 if (host_integerp (fd->loop.step, 0))
6242 {
6243 HOST_WIDE_INT step = TREE_INT_CST_LOW (fd->loop.step);
6244 if (step == 1 || step == -1)
6245 cond_code = EQ_EXPR;
6246 }
6247
6248 cond = build2 (cond_code, boolean_type_node, fd->loop.v, fd->loop.n2);
6249
6250 clauses = gimple_omp_for_clauses (fd->for_stmt);
6251 stmts = NULL;
6252 lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
6253 if (!gimple_seq_empty_p (stmts))
6254 {
6255 gimple_seq_add_seq (&stmts, *dlist);
6256 *dlist = stmts;
6257
6258 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
6259 vinit = fd->loop.n1;
6260 if (cond_code == EQ_EXPR
6261 && host_integerp (fd->loop.n2, 0)
6262 && ! integer_zerop (fd->loop.n2))
6263 vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0);
6264
6265 /* Initialize the iterator variable, so that threads that don't execute
6266 any iterations don't execute the lastprivate clauses by accident. */
6267 gimplify_assign (fd->loop.v, vinit, body_p);
6268 }
6269 }
6270
6271
6272 /* Lower code for an OpenMP loop directive. */
6273
6274 static void
6275 lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6276 {
6277 tree *rhs_p, block;
6278 struct omp_for_data fd;
6279 gimple stmt = gsi_stmt (*gsi_p), new_stmt;
6280 gimple_seq omp_for_body, body, dlist;
6281 size_t i;
6282 struct gimplify_ctx gctx;
6283
6284 push_gimplify_context (&gctx);
6285
6286 lower_omp (gimple_omp_for_pre_body (stmt), ctx);
6287 lower_omp (gimple_omp_body (stmt), ctx);
6288
6289 block = make_node (BLOCK);
6290 new_stmt = gimple_build_bind (NULL, NULL, block);
6291
6292 /* Move declaration of temporaries in the loop body before we make
6293 it go away. */
6294 omp_for_body = gimple_omp_body (stmt);
6295 if (!gimple_seq_empty_p (omp_for_body)
6296 && gimple_code (gimple_seq_first_stmt (omp_for_body)) == GIMPLE_BIND)
6297 {
6298 tree vars = gimple_bind_vars (gimple_seq_first_stmt (omp_for_body));
6299 gimple_bind_append_vars (new_stmt, vars);
6300 }
6301
6302 /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
6303 dlist = NULL;
6304 body = NULL;
6305 lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx);
6306 gimple_seq_add_seq (&body, gimple_omp_for_pre_body (stmt));
6307
6308 /* Lower the header expressions. At this point, we can assume that
6309 the header is of the form:
6310
6311 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
6312
6313 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
6314 using the .omp_data_s mapping, if needed. */
6315 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
6316 {
6317 rhs_p = gimple_omp_for_initial_ptr (stmt, i);
6318 if (!is_gimple_min_invariant (*rhs_p))
6319 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6320
6321 rhs_p = gimple_omp_for_final_ptr (stmt, i);
6322 if (!is_gimple_min_invariant (*rhs_p))
6323 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6324
6325 rhs_p = &TREE_OPERAND (gimple_omp_for_incr (stmt, i), 1);
6326 if (!is_gimple_min_invariant (*rhs_p))
6327 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6328 }
6329
6330 /* Once lowered, extract the bounds and clauses. */
6331 extract_omp_for_data (stmt, &fd, NULL);
6332
6333 lower_omp_for_lastprivate (&fd, &body, &dlist, ctx);
6334
6335 gimple_seq_add_stmt (&body, stmt);
6336 gimple_seq_add_seq (&body, gimple_omp_body (stmt));
6337
6338 gimple_seq_add_stmt (&body, gimple_build_omp_continue (fd.loop.v,
6339 fd.loop.v));
6340
6341 /* After the loop, add exit clauses. */
6342 lower_reduction_clauses (gimple_omp_for_clauses (stmt), &body, ctx);
6343 gimple_seq_add_seq (&body, dlist);
6344
6345 body = maybe_catch_exception (body);
6346
6347 /* Region exit marker goes at the end of the loop body. */
6348 gimple_seq_add_stmt (&body, gimple_build_omp_return (fd.have_nowait));
6349
6350 pop_gimplify_context (new_stmt);
6351
6352 gimple_bind_append_vars (new_stmt, ctx->block_vars);
6353 BLOCK_VARS (block) = gimple_bind_vars (new_stmt);
6354 if (BLOCK_VARS (block))
6355 TREE_USED (block) = 1;
6356
6357 gimple_bind_set_body (new_stmt, body);
6358 gimple_omp_set_body (stmt, NULL);
6359 gimple_omp_for_set_pre_body (stmt, NULL);
6360 gsi_replace (gsi_p, new_stmt, true);
6361 }
6362
6363 /* Callback for walk_stmts. Check if the current statement only contains
6364 GIMPLE_OMP_FOR or GIMPLE_OMP_PARALLEL. */
6365
6366 static tree
6367 check_combined_parallel (gimple_stmt_iterator *gsi_p,
6368 bool *handled_ops_p,
6369 struct walk_stmt_info *wi)
6370 {
6371 int *info = (int *) wi->info;
6372 gimple stmt = gsi_stmt (*gsi_p);
6373
6374 *handled_ops_p = true;
6375 switch (gimple_code (stmt))
6376 {
6377 WALK_SUBSTMTS;
6378
6379 case GIMPLE_OMP_FOR:
6380 case GIMPLE_OMP_SECTIONS:
6381 *info = *info == 0 ? 1 : -1;
6382 break;
6383 default:
6384 *info = -1;
6385 break;
6386 }
6387 return NULL;
6388 }
6389
6390 struct omp_taskcopy_context
6391 {
6392 /* This field must be at the beginning, as we do "inheritance": Some
6393 callback functions for tree-inline.c (e.g., omp_copy_decl)
6394 receive a copy_body_data pointer that is up-casted to an
6395 omp_context pointer. */
6396 copy_body_data cb;
6397 omp_context *ctx;
6398 };
6399
6400 static tree
6401 task_copyfn_copy_decl (tree var, copy_body_data *cb)
6402 {
6403 struct omp_taskcopy_context *tcctx = (struct omp_taskcopy_context *) cb;
6404
6405 if (splay_tree_lookup (tcctx->ctx->sfield_map, (splay_tree_key) var))
6406 return create_tmp_var (TREE_TYPE (var), NULL);
6407
6408 return var;
6409 }
6410
6411 static tree
6412 task_copyfn_remap_type (struct omp_taskcopy_context *tcctx, tree orig_type)
6413 {
6414 tree name, new_fields = NULL, type, f;
6415
6416 type = lang_hooks.types.make_type (RECORD_TYPE);
6417 name = DECL_NAME (TYPE_NAME (orig_type));
6418 name = build_decl (gimple_location (tcctx->ctx->stmt),
6419 TYPE_DECL, name, type);
6420 TYPE_NAME (type) = name;
6421
6422 for (f = TYPE_FIELDS (orig_type); f ; f = TREE_CHAIN (f))
6423 {
6424 tree new_f = copy_node (f);
6425 DECL_CONTEXT (new_f) = type;
6426 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &tcctx->cb);
6427 TREE_CHAIN (new_f) = new_fields;
6428 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6429 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6430 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
6431 &tcctx->cb, NULL);
6432 new_fields = new_f;
6433 *pointer_map_insert (tcctx->cb.decl_map, f) = new_f;
6434 }
6435 TYPE_FIELDS (type) = nreverse (new_fields);
6436 layout_type (type);
6437 return type;
6438 }
6439
6440 /* Create task copyfn. */
6441
6442 static void
6443 create_task_copyfn (gimple task_stmt, omp_context *ctx)
6444 {
6445 struct function *child_cfun;
6446 tree child_fn, t, c, src, dst, f, sf, arg, sarg, decl;
6447 tree record_type, srecord_type, bind, list;
6448 bool record_needs_remap = false, srecord_needs_remap = false;
6449 splay_tree_node n;
6450 struct omp_taskcopy_context tcctx;
6451 struct gimplify_ctx gctx;
6452 location_t loc = gimple_location (task_stmt);
6453
6454 child_fn = gimple_omp_task_copy_fn (task_stmt);
6455 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
6456 gcc_assert (child_cfun->cfg == NULL);
6457 DECL_SAVED_TREE (child_fn) = alloc_stmt_list ();
6458
6459 /* Reset DECL_CONTEXT on function arguments. */
6460 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
6461 DECL_CONTEXT (t) = child_fn;
6462
6463 /* Populate the function. */
6464 push_gimplify_context (&gctx);
6465 current_function_decl = child_fn;
6466
6467 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
6468 TREE_SIDE_EFFECTS (bind) = 1;
6469 list = NULL;
6470 DECL_SAVED_TREE (child_fn) = bind;
6471 DECL_SOURCE_LOCATION (child_fn) = gimple_location (task_stmt);
6472
6473 /* Remap src and dst argument types if needed. */
6474 record_type = ctx->record_type;
6475 srecord_type = ctx->srecord_type;
6476 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
6477 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6478 {
6479 record_needs_remap = true;
6480 break;
6481 }
6482 for (f = TYPE_FIELDS (srecord_type); f ; f = DECL_CHAIN (f))
6483 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6484 {
6485 srecord_needs_remap = true;
6486 break;
6487 }
6488
6489 if (record_needs_remap || srecord_needs_remap)
6490 {
6491 memset (&tcctx, '\0', sizeof (tcctx));
6492 tcctx.cb.src_fn = ctx->cb.src_fn;
6493 tcctx.cb.dst_fn = child_fn;
6494 tcctx.cb.src_node = cgraph_get_node (tcctx.cb.src_fn);
6495 gcc_checking_assert (tcctx.cb.src_node);
6496 tcctx.cb.dst_node = tcctx.cb.src_node;
6497 tcctx.cb.src_cfun = ctx->cb.src_cfun;
6498 tcctx.cb.copy_decl = task_copyfn_copy_decl;
6499 tcctx.cb.eh_lp_nr = 0;
6500 tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE;
6501 tcctx.cb.decl_map = pointer_map_create ();
6502 tcctx.ctx = ctx;
6503
6504 if (record_needs_remap)
6505 record_type = task_copyfn_remap_type (&tcctx, record_type);
6506 if (srecord_needs_remap)
6507 srecord_type = task_copyfn_remap_type (&tcctx, srecord_type);
6508 }
6509 else
6510 tcctx.cb.decl_map = NULL;
6511
6512 push_cfun (child_cfun);
6513
6514 arg = DECL_ARGUMENTS (child_fn);
6515 TREE_TYPE (arg) = build_pointer_type (record_type);
6516 sarg = DECL_CHAIN (arg);
6517 TREE_TYPE (sarg) = build_pointer_type (srecord_type);
6518
6519 /* First pass: initialize temporaries used in record_type and srecord_type
6520 sizes and field offsets. */
6521 if (tcctx.cb.decl_map)
6522 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6523 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6524 {
6525 tree *p;
6526
6527 decl = OMP_CLAUSE_DECL (c);
6528 p = (tree *) pointer_map_contains (tcctx.cb.decl_map, decl);
6529 if (p == NULL)
6530 continue;
6531 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6532 sf = (tree) n->value;
6533 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6534 src = build_simple_mem_ref_loc (loc, sarg);
6535 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6536 t = build2 (MODIFY_EXPR, TREE_TYPE (*p), *p, src);
6537 append_to_statement_list (t, &list);
6538 }
6539
6540 /* Second pass: copy shared var pointers and copy construct non-VLA
6541 firstprivate vars. */
6542 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6543 switch (OMP_CLAUSE_CODE (c))
6544 {
6545 case OMP_CLAUSE_SHARED:
6546 decl = OMP_CLAUSE_DECL (c);
6547 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6548 if (n == NULL)
6549 break;
6550 f = (tree) n->value;
6551 if (tcctx.cb.decl_map)
6552 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6553 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6554 sf = (tree) n->value;
6555 if (tcctx.cb.decl_map)
6556 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6557 src = build_simple_mem_ref_loc (loc, sarg);
6558 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6559 dst = build_simple_mem_ref_loc (loc, arg);
6560 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6561 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
6562 append_to_statement_list (t, &list);
6563 break;
6564 case OMP_CLAUSE_FIRSTPRIVATE:
6565 decl = OMP_CLAUSE_DECL (c);
6566 if (is_variable_sized (decl))
6567 break;
6568 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6569 if (n == NULL)
6570 break;
6571 f = (tree) n->value;
6572 if (tcctx.cb.decl_map)
6573 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6574 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6575 if (n != NULL)
6576 {
6577 sf = (tree) n->value;
6578 if (tcctx.cb.decl_map)
6579 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6580 src = build_simple_mem_ref_loc (loc, sarg);
6581 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6582 if (use_pointer_for_field (decl, NULL) || is_reference (decl))
6583 src = build_simple_mem_ref_loc (loc, src);
6584 }
6585 else
6586 src = decl;
6587 dst = build_simple_mem_ref_loc (loc, arg);
6588 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6589 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6590 append_to_statement_list (t, &list);
6591 break;
6592 case OMP_CLAUSE_PRIVATE:
6593 if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c))
6594 break;
6595 decl = OMP_CLAUSE_DECL (c);
6596 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6597 f = (tree) n->value;
6598 if (tcctx.cb.decl_map)
6599 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6600 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6601 if (n != NULL)
6602 {
6603 sf = (tree) n->value;
6604 if (tcctx.cb.decl_map)
6605 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6606 src = build_simple_mem_ref_loc (loc, sarg);
6607 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6608 if (use_pointer_for_field (decl, NULL))
6609 src = build_simple_mem_ref_loc (loc, src);
6610 }
6611 else
6612 src = decl;
6613 dst = build_simple_mem_ref_loc (loc, arg);
6614 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6615 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
6616 append_to_statement_list (t, &list);
6617 break;
6618 default:
6619 break;
6620 }
6621
6622 /* Last pass: handle VLA firstprivates. */
6623 if (tcctx.cb.decl_map)
6624 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6625 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6626 {
6627 tree ind, ptr, df;
6628
6629 decl = OMP_CLAUSE_DECL (c);
6630 if (!is_variable_sized (decl))
6631 continue;
6632 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6633 if (n == NULL)
6634 continue;
6635 f = (tree) n->value;
6636 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6637 gcc_assert (DECL_HAS_VALUE_EXPR_P (decl));
6638 ind = DECL_VALUE_EXPR (decl);
6639 gcc_assert (TREE_CODE (ind) == INDIRECT_REF);
6640 gcc_assert (DECL_P (TREE_OPERAND (ind, 0)));
6641 n = splay_tree_lookup (ctx->sfield_map,
6642 (splay_tree_key) TREE_OPERAND (ind, 0));
6643 sf = (tree) n->value;
6644 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6645 src = build_simple_mem_ref_loc (loc, sarg);
6646 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6647 src = build_simple_mem_ref_loc (loc, src);
6648 dst = build_simple_mem_ref_loc (loc, arg);
6649 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6650 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6651 append_to_statement_list (t, &list);
6652 n = splay_tree_lookup (ctx->field_map,
6653 (splay_tree_key) TREE_OPERAND (ind, 0));
6654 df = (tree) n->value;
6655 df = *(tree *) pointer_map_contains (tcctx.cb.decl_map, df);
6656 ptr = build_simple_mem_ref_loc (loc, arg);
6657 ptr = build3 (COMPONENT_REF, TREE_TYPE (df), ptr, df, NULL);
6658 t = build2 (MODIFY_EXPR, TREE_TYPE (ptr), ptr,
6659 build_fold_addr_expr_loc (loc, dst));
6660 append_to_statement_list (t, &list);
6661 }
6662
6663 t = build1 (RETURN_EXPR, void_type_node, NULL);
6664 append_to_statement_list (t, &list);
6665
6666 if (tcctx.cb.decl_map)
6667 pointer_map_destroy (tcctx.cb.decl_map);
6668 pop_gimplify_context (NULL);
6669 BIND_EXPR_BODY (bind) = list;
6670 pop_cfun ();
6671 current_function_decl = ctx->cb.src_fn;
6672 }
6673
6674 /* Lower the OpenMP parallel or task directive in the current statement
6675 in GSI_P. CTX holds context information for the directive. */
6676
6677 static void
6678 lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6679 {
6680 tree clauses;
6681 tree child_fn, t;
6682 gimple stmt = gsi_stmt (*gsi_p);
6683 gimple par_bind, bind;
6684 gimple_seq par_body, olist, ilist, par_olist, par_ilist, new_body;
6685 struct gimplify_ctx gctx;
6686 location_t loc = gimple_location (stmt);
6687
6688 clauses = gimple_omp_taskreg_clauses (stmt);
6689 par_bind = gimple_seq_first_stmt (gimple_omp_body (stmt));
6690 par_body = gimple_bind_body (par_bind);
6691 child_fn = ctx->cb.dst_fn;
6692 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
6693 && !gimple_omp_parallel_combined_p (stmt))
6694 {
6695 struct walk_stmt_info wi;
6696 int ws_num = 0;
6697
6698 memset (&wi, 0, sizeof (wi));
6699 wi.info = &ws_num;
6700 wi.val_only = true;
6701 walk_gimple_seq (par_body, check_combined_parallel, NULL, &wi);
6702 if (ws_num == 1)
6703 gimple_omp_parallel_set_combined_p (stmt, true);
6704 }
6705 if (ctx->srecord_type)
6706 create_task_copyfn (stmt, ctx);
6707
6708 push_gimplify_context (&gctx);
6709
6710 par_olist = NULL;
6711 par_ilist = NULL;
6712 lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx);
6713 lower_omp (par_body, ctx);
6714 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL)
6715 lower_reduction_clauses (clauses, &par_olist, ctx);
6716
6717 /* Declare all the variables created by mapping and the variables
6718 declared in the scope of the parallel body. */
6719 record_vars_into (ctx->block_vars, child_fn);
6720 record_vars_into (gimple_bind_vars (par_bind), child_fn);
6721
6722 if (ctx->record_type)
6723 {
6724 ctx->sender_decl
6725 = create_tmp_var (ctx->srecord_type ? ctx->srecord_type
6726 : ctx->record_type, ".omp_data_o");
6727 DECL_NAMELESS (ctx->sender_decl) = 1;
6728 TREE_ADDRESSABLE (ctx->sender_decl) = 1;
6729 gimple_omp_taskreg_set_data_arg (stmt, ctx->sender_decl);
6730 }
6731
6732 olist = NULL;
6733 ilist = NULL;
6734 lower_send_clauses (clauses, &ilist, &olist, ctx);
6735 lower_send_shared_vars (&ilist, &olist, ctx);
6736
6737 /* Once all the expansions are done, sequence all the different
6738 fragments inside gimple_omp_body. */
6739
6740 new_body = NULL;
6741
6742 if (ctx->record_type)
6743 {
6744 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
6745 /* fixup_child_record_type might have changed receiver_decl's type. */
6746 t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
6747 gimple_seq_add_stmt (&new_body,
6748 gimple_build_assign (ctx->receiver_decl, t));
6749 }
6750
6751 gimple_seq_add_seq (&new_body, par_ilist);
6752 gimple_seq_add_seq (&new_body, par_body);
6753 gimple_seq_add_seq (&new_body, par_olist);
6754 new_body = maybe_catch_exception (new_body);
6755 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
6756 gimple_omp_set_body (stmt, new_body);
6757
6758 bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
6759 gimple_bind_add_stmt (bind, stmt);
6760 if (ilist || olist)
6761 {
6762 gimple_seq_add_stmt (&ilist, bind);
6763 gimple_seq_add_seq (&ilist, olist);
6764 bind = gimple_build_bind (NULL, ilist, NULL);
6765 }
6766
6767 gsi_replace (gsi_p, bind, true);
6768
6769 pop_gimplify_context (NULL);
6770 }
6771
6772 /* Callback for lower_omp_1. Return non-NULL if *tp needs to be
6773 regimplified. If DATA is non-NULL, lower_omp_1 is outside
6774 of OpenMP context, but with task_shared_vars set. */
6775
6776 static tree
6777 lower_omp_regimplify_p (tree *tp, int *walk_subtrees,
6778 void *data)
6779 {
6780 tree t = *tp;
6781
6782 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
6783 if (TREE_CODE (t) == VAR_DECL && data == NULL && DECL_HAS_VALUE_EXPR_P (t))
6784 return t;
6785
6786 if (task_shared_vars
6787 && DECL_P (t)
6788 && bitmap_bit_p (task_shared_vars, DECL_UID (t)))
6789 return t;
6790
6791 /* If a global variable has been privatized, TREE_CONSTANT on
6792 ADDR_EXPR might be wrong. */
6793 if (data == NULL && TREE_CODE (t) == ADDR_EXPR)
6794 recompute_tree_invariant_for_addr_expr (t);
6795
6796 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
6797 return NULL_TREE;
6798 }
6799
6800 static void
6801 lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6802 {
6803 gimple stmt = gsi_stmt (*gsi_p);
6804 struct walk_stmt_info wi;
6805
6806 if (gimple_has_location (stmt))
6807 input_location = gimple_location (stmt);
6808
6809 if (task_shared_vars)
6810 memset (&wi, '\0', sizeof (wi));
6811
6812 /* If we have issued syntax errors, avoid doing any heavy lifting.
6813 Just replace the OpenMP directives with a NOP to avoid
6814 confusing RTL expansion. */
6815 if (seen_error () && is_gimple_omp (stmt))
6816 {
6817 gsi_replace (gsi_p, gimple_build_nop (), true);
6818 return;
6819 }
6820
6821 switch (gimple_code (stmt))
6822 {
6823 case GIMPLE_COND:
6824 if ((ctx || task_shared_vars)
6825 && (walk_tree (gimple_cond_lhs_ptr (stmt), lower_omp_regimplify_p,
6826 ctx ? NULL : &wi, NULL)
6827 || walk_tree (gimple_cond_rhs_ptr (stmt), lower_omp_regimplify_p,
6828 ctx ? NULL : &wi, NULL)))
6829 gimple_regimplify_operands (stmt, gsi_p);
6830 break;
6831 case GIMPLE_CATCH:
6832 lower_omp (gimple_catch_handler (stmt), ctx);
6833 break;
6834 case GIMPLE_EH_FILTER:
6835 lower_omp (gimple_eh_filter_failure (stmt), ctx);
6836 break;
6837 case GIMPLE_TRY:
6838 lower_omp (gimple_try_eval (stmt), ctx);
6839 lower_omp (gimple_try_cleanup (stmt), ctx);
6840 break;
6841 case GIMPLE_BIND:
6842 lower_omp (gimple_bind_body (stmt), ctx);
6843 break;
6844 case GIMPLE_OMP_PARALLEL:
6845 case GIMPLE_OMP_TASK:
6846 ctx = maybe_lookup_ctx (stmt);
6847 lower_omp_taskreg (gsi_p, ctx);
6848 break;
6849 case GIMPLE_OMP_FOR:
6850 ctx = maybe_lookup_ctx (stmt);
6851 gcc_assert (ctx);
6852 lower_omp_for (gsi_p, ctx);
6853 break;
6854 case GIMPLE_OMP_SECTIONS:
6855 ctx = maybe_lookup_ctx (stmt);
6856 gcc_assert (ctx);
6857 lower_omp_sections (gsi_p, ctx);
6858 break;
6859 case GIMPLE_OMP_SINGLE:
6860 ctx = maybe_lookup_ctx (stmt);
6861 gcc_assert (ctx);
6862 lower_omp_single (gsi_p, ctx);
6863 break;
6864 case GIMPLE_OMP_MASTER:
6865 ctx = maybe_lookup_ctx (stmt);
6866 gcc_assert (ctx);
6867 lower_omp_master (gsi_p, ctx);
6868 break;
6869 case GIMPLE_OMP_ORDERED:
6870 ctx = maybe_lookup_ctx (stmt);
6871 gcc_assert (ctx);
6872 lower_omp_ordered (gsi_p, ctx);
6873 break;
6874 case GIMPLE_OMP_CRITICAL:
6875 ctx = maybe_lookup_ctx (stmt);
6876 gcc_assert (ctx);
6877 lower_omp_critical (gsi_p, ctx);
6878 break;
6879 case GIMPLE_OMP_ATOMIC_LOAD:
6880 if ((ctx || task_shared_vars)
6881 && walk_tree (gimple_omp_atomic_load_rhs_ptr (stmt),
6882 lower_omp_regimplify_p, ctx ? NULL : &wi, NULL))
6883 gimple_regimplify_operands (stmt, gsi_p);
6884 break;
6885 default:
6886 if ((ctx || task_shared_vars)
6887 && walk_gimple_op (stmt, lower_omp_regimplify_p,
6888 ctx ? NULL : &wi))
6889 gimple_regimplify_operands (stmt, gsi_p);
6890 break;
6891 }
6892 }
6893
6894 static void
6895 lower_omp (gimple_seq body, omp_context *ctx)
6896 {
6897 location_t saved_location = input_location;
6898 gimple_stmt_iterator gsi = gsi_start (body);
6899 for (gsi = gsi_start (body); !gsi_end_p (gsi); gsi_next (&gsi))
6900 lower_omp_1 (&gsi, ctx);
6901 input_location = saved_location;
6902 }
6903 \f
6904 /* Main entry point. */
6905
6906 static unsigned int
6907 execute_lower_omp (void)
6908 {
6909 gimple_seq body;
6910
6911 /* This pass always runs, to provide PROP_gimple_lomp.
6912 But there is nothing to do unless -fopenmp is given. */
6913 if (flag_openmp == 0)
6914 return 0;
6915
6916 all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
6917 delete_omp_context);
6918
6919 body = gimple_body (current_function_decl);
6920 scan_omp (body, NULL);
6921 gcc_assert (taskreg_nesting_level == 0);
6922
6923 if (all_contexts->root)
6924 {
6925 struct gimplify_ctx gctx;
6926
6927 if (task_shared_vars)
6928 push_gimplify_context (&gctx);
6929 lower_omp (body, NULL);
6930 if (task_shared_vars)
6931 pop_gimplify_context (NULL);
6932 }
6933
6934 if (all_contexts)
6935 {
6936 splay_tree_delete (all_contexts);
6937 all_contexts = NULL;
6938 }
6939 BITMAP_FREE (task_shared_vars);
6940 return 0;
6941 }
6942
6943 struct gimple_opt_pass pass_lower_omp =
6944 {
6945 {
6946 GIMPLE_PASS,
6947 "omplower", /* name */
6948 NULL, /* gate */
6949 execute_lower_omp, /* execute */
6950 NULL, /* sub */
6951 NULL, /* next */
6952 0, /* static_pass_number */
6953 TV_NONE, /* tv_id */
6954 PROP_gimple_any, /* properties_required */
6955 PROP_gimple_lomp, /* properties_provided */
6956 0, /* properties_destroyed */
6957 0, /* todo_flags_start */
6958 0 /* todo_flags_finish */
6959 }
6960 };
6961 \f
6962 /* The following is a utility to diagnose OpenMP structured block violations.
6963 It is not part of the "omplower" pass, as that's invoked too late. It
6964 should be invoked by the respective front ends after gimplification. */
6965
6966 static splay_tree all_labels;
6967
6968 /* Check for mismatched contexts and generate an error if needed. Return
6969 true if an error is detected. */
6970
6971 static bool
6972 diagnose_sb_0 (gimple_stmt_iterator *gsi_p,
6973 gimple branch_ctx, gimple label_ctx)
6974 {
6975 if (label_ctx == branch_ctx)
6976 return false;
6977
6978
6979 /*
6980 Previously we kept track of the label's entire context in diagnose_sb_[12]
6981 so we could traverse it and issue a correct "exit" or "enter" error
6982 message upon a structured block violation.
6983
6984 We built the context by building a list with tree_cons'ing, but there is
6985 no easy counterpart in gimple tuples. It seems like far too much work
6986 for issuing exit/enter error messages. If someone really misses the
6987 distinct error message... patches welcome.
6988 */
6989
6990 #if 0
6991 /* Try to avoid confusing the user by producing and error message
6992 with correct "exit" or "enter" verbiage. We prefer "exit"
6993 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
6994 if (branch_ctx == NULL)
6995 exit_p = false;
6996 else
6997 {
6998 while (label_ctx)
6999 {
7000 if (TREE_VALUE (label_ctx) == branch_ctx)
7001 {
7002 exit_p = false;
7003 break;
7004 }
7005 label_ctx = TREE_CHAIN (label_ctx);
7006 }
7007 }
7008
7009 if (exit_p)
7010 error ("invalid exit from OpenMP structured block");
7011 else
7012 error ("invalid entry to OpenMP structured block");
7013 #endif
7014
7015 /* If it's obvious we have an invalid entry, be specific about the error. */
7016 if (branch_ctx == NULL)
7017 error ("invalid entry to OpenMP structured block");
7018 else
7019 /* Otherwise, be vague and lazy, but efficient. */
7020 error ("invalid branch to/from an OpenMP structured block");
7021
7022 gsi_replace (gsi_p, gimple_build_nop (), false);
7023 return true;
7024 }
7025
7026 /* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
7027 where each label is found. */
7028
7029 static tree
7030 diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
7031 struct walk_stmt_info *wi)
7032 {
7033 gimple context = (gimple) wi->info;
7034 gimple inner_context;
7035 gimple stmt = gsi_stmt (*gsi_p);
7036
7037 *handled_ops_p = true;
7038
7039 switch (gimple_code (stmt))
7040 {
7041 WALK_SUBSTMTS;
7042
7043 case GIMPLE_OMP_PARALLEL:
7044 case GIMPLE_OMP_TASK:
7045 case GIMPLE_OMP_SECTIONS:
7046 case GIMPLE_OMP_SINGLE:
7047 case GIMPLE_OMP_SECTION:
7048 case GIMPLE_OMP_MASTER:
7049 case GIMPLE_OMP_ORDERED:
7050 case GIMPLE_OMP_CRITICAL:
7051 /* The minimal context here is just the current OMP construct. */
7052 inner_context = stmt;
7053 wi->info = inner_context;
7054 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
7055 wi->info = context;
7056 break;
7057
7058 case GIMPLE_OMP_FOR:
7059 inner_context = stmt;
7060 wi->info = inner_context;
7061 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
7062 walk them. */
7063 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
7064 diagnose_sb_1, NULL, wi);
7065 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
7066 wi->info = context;
7067 break;
7068
7069 case GIMPLE_LABEL:
7070 splay_tree_insert (all_labels, (splay_tree_key) gimple_label_label (stmt),
7071 (splay_tree_value) context);
7072 break;
7073
7074 default:
7075 break;
7076 }
7077
7078 return NULL_TREE;
7079 }
7080
7081 /* Pass 2: Check each branch and see if its context differs from that of
7082 the destination label's context. */
7083
7084 static tree
7085 diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
7086 struct walk_stmt_info *wi)
7087 {
7088 gimple context = (gimple) wi->info;
7089 splay_tree_node n;
7090 gimple stmt = gsi_stmt (*gsi_p);
7091
7092 *handled_ops_p = true;
7093
7094 switch (gimple_code (stmt))
7095 {
7096 WALK_SUBSTMTS;
7097
7098 case GIMPLE_OMP_PARALLEL:
7099 case GIMPLE_OMP_TASK:
7100 case GIMPLE_OMP_SECTIONS:
7101 case GIMPLE_OMP_SINGLE:
7102 case GIMPLE_OMP_SECTION:
7103 case GIMPLE_OMP_MASTER:
7104 case GIMPLE_OMP_ORDERED:
7105 case GIMPLE_OMP_CRITICAL:
7106 wi->info = stmt;
7107 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
7108 wi->info = context;
7109 break;
7110
7111 case GIMPLE_OMP_FOR:
7112 wi->info = stmt;
7113 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
7114 walk them. */
7115 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
7116 diagnose_sb_2, NULL, wi);
7117 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
7118 wi->info = context;
7119 break;
7120
7121 case GIMPLE_COND:
7122 {
7123 tree lab = gimple_cond_true_label (stmt);
7124 if (lab)
7125 {
7126 n = splay_tree_lookup (all_labels,
7127 (splay_tree_key) lab);
7128 diagnose_sb_0 (gsi_p, context,
7129 n ? (gimple) n->value : NULL);
7130 }
7131 lab = gimple_cond_false_label (stmt);
7132 if (lab)
7133 {
7134 n = splay_tree_lookup (all_labels,
7135 (splay_tree_key) lab);
7136 diagnose_sb_0 (gsi_p, context,
7137 n ? (gimple) n->value : NULL);
7138 }
7139 }
7140 break;
7141
7142 case GIMPLE_GOTO:
7143 {
7144 tree lab = gimple_goto_dest (stmt);
7145 if (TREE_CODE (lab) != LABEL_DECL)
7146 break;
7147
7148 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
7149 diagnose_sb_0 (gsi_p, context, n ? (gimple) n->value : NULL);
7150 }
7151 break;
7152
7153 case GIMPLE_SWITCH:
7154 {
7155 unsigned int i;
7156 for (i = 0; i < gimple_switch_num_labels (stmt); ++i)
7157 {
7158 tree lab = CASE_LABEL (gimple_switch_label (stmt, i));
7159 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
7160 if (n && diagnose_sb_0 (gsi_p, context, (gimple) n->value))
7161 break;
7162 }
7163 }
7164 break;
7165
7166 case GIMPLE_RETURN:
7167 diagnose_sb_0 (gsi_p, context, NULL);
7168 break;
7169
7170 default:
7171 break;
7172 }
7173
7174 return NULL_TREE;
7175 }
7176
7177 static unsigned int
7178 diagnose_omp_structured_block_errors (void)
7179 {
7180 struct walk_stmt_info wi;
7181 gimple_seq body = gimple_body (current_function_decl);
7182
7183 all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
7184
7185 memset (&wi, 0, sizeof (wi));
7186 walk_gimple_seq (body, diagnose_sb_1, NULL, &wi);
7187
7188 memset (&wi, 0, sizeof (wi));
7189 wi.want_locations = true;
7190 walk_gimple_seq (body, diagnose_sb_2, NULL, &wi);
7191
7192 splay_tree_delete (all_labels);
7193 all_labels = NULL;
7194
7195 return 0;
7196 }
7197
7198 static bool
7199 gate_diagnose_omp_blocks (void)
7200 {
7201 return flag_openmp != 0;
7202 }
7203
7204 struct gimple_opt_pass pass_diagnose_omp_blocks =
7205 {
7206 {
7207 GIMPLE_PASS,
7208 "*diagnose_omp_blocks", /* name */
7209 gate_diagnose_omp_blocks, /* gate */
7210 diagnose_omp_structured_block_errors, /* execute */
7211 NULL, /* sub */
7212 NULL, /* next */
7213 0, /* static_pass_number */
7214 TV_NONE, /* tv_id */
7215 PROP_gimple_any, /* properties_required */
7216 0, /* properties_provided */
7217 0, /* properties_destroyed */
7218 0, /* todo_flags_start */
7219 0, /* todo_flags_finish */
7220 }
7221 };
7222
7223 #include "gt-omp-low.h"