re PR debug/44901 (-fcompare-debug failure for tree-predcom.c)
[gcc.git] / gcc / omp-low.c
1 /* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
5
6 Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010
7 Free Software Foundation, Inc.
8
9 This file is part of GCC.
10
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
14 version.
15
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
24
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "tree.h"
30 #include "rtl.h"
31 #include "gimple.h"
32 #include "tree-iterator.h"
33 #include "tree-inline.h"
34 #include "langhooks.h"
35 #include "diagnostic-core.h"
36 #include "tree-flow.h"
37 #include "timevar.h"
38 #include "flags.h"
39 #include "function.h"
40 #include "expr.h"
41 #include "toplev.h"
42 #include "tree-pass.h"
43 #include "ggc.h"
44 #include "except.h"
45 #include "splay-tree.h"
46 #include "optabs.h"
47 #include "cfgloop.h"
48
49
50 /* Lowering of OpenMP parallel and workshare constructs proceeds in two
51 phases. The first phase scans the function looking for OMP statements
52 and then for variables that must be replaced to satisfy data sharing
53 clauses. The second phase expands code for the constructs, as well as
54 re-gimplifying things when variables have been replaced with complex
55 expressions.
56
57 Final code generation is done by pass_expand_omp. The flowgraph is
58 scanned for parallel regions which are then moved to a new
59 function, to be invoked by the thread library. */
60
61 /* Context structure. Used to store information about each parallel
62 directive in the code. */
63
64 typedef struct omp_context
65 {
66 /* This field must be at the beginning, as we do "inheritance": Some
67 callback functions for tree-inline.c (e.g., omp_copy_decl)
68 receive a copy_body_data pointer that is up-casted to an
69 omp_context pointer. */
70 copy_body_data cb;
71
72 /* The tree of contexts corresponding to the encountered constructs. */
73 struct omp_context *outer;
74 gimple stmt;
75
76 /* Map variables to fields in a structure that allows communication
77 between sending and receiving threads. */
78 splay_tree field_map;
79 tree record_type;
80 tree sender_decl;
81 tree receiver_decl;
82
83 /* These are used just by task contexts, if task firstprivate fn is
84 needed. srecord_type is used to communicate from the thread
85 that encountered the task construct to task firstprivate fn,
86 record_type is allocated by GOMP_task, initialized by task firstprivate
87 fn and passed to the task body fn. */
88 splay_tree sfield_map;
89 tree srecord_type;
90
91 /* A chain of variables to add to the top-level block surrounding the
92 construct. In the case of a parallel, this is in the child function. */
93 tree block_vars;
94
95 /* What to do with variables with implicitly determined sharing
96 attributes. */
97 enum omp_clause_default_kind default_kind;
98
99 /* Nesting depth of this context. Used to beautify error messages re
100 invalid gotos. The outermost ctx is depth 1, with depth 0 being
101 reserved for the main body of the function. */
102 int depth;
103
104 /* True if this parallel directive is nested within another. */
105 bool is_nested;
106 } omp_context;
107
108
109 struct omp_for_data_loop
110 {
111 tree v, n1, n2, step;
112 enum tree_code cond_code;
113 };
114
115 /* A structure describing the main elements of a parallel loop. */
116
117 struct omp_for_data
118 {
119 struct omp_for_data_loop loop;
120 tree chunk_size;
121 gimple for_stmt;
122 tree pre, iter_type;
123 int collapse;
124 bool have_nowait, have_ordered;
125 enum omp_clause_schedule_kind sched_kind;
126 struct omp_for_data_loop *loops;
127 };
128
129
130 static splay_tree all_contexts;
131 static int taskreg_nesting_level;
132 struct omp_region *root_omp_region;
133 static bitmap task_shared_vars;
134
135 static void scan_omp (gimple_seq, omp_context *);
136 static tree scan_omp_1_op (tree *, int *, void *);
137
138 #define WALK_SUBSTMTS \
139 case GIMPLE_BIND: \
140 case GIMPLE_TRY: \
141 case GIMPLE_CATCH: \
142 case GIMPLE_EH_FILTER: \
143 /* The sub-statements for these should be walked. */ \
144 *handled_ops_p = false; \
145 break;
146
147 /* Convenience function for calling scan_omp_1_op on tree operands. */
148
149 static inline tree
150 scan_omp_op (tree *tp, omp_context *ctx)
151 {
152 struct walk_stmt_info wi;
153
154 memset (&wi, 0, sizeof (wi));
155 wi.info = ctx;
156 wi.want_locations = true;
157
158 return walk_tree (tp, scan_omp_1_op, &wi, NULL);
159 }
160
161 static void lower_omp (gimple_seq, omp_context *);
162 static tree lookup_decl_in_outer_ctx (tree, omp_context *);
163 static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
164
165 /* Find an OpenMP clause of type KIND within CLAUSES. */
166
167 tree
168 find_omp_clause (tree clauses, enum omp_clause_code kind)
169 {
170 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
171 if (OMP_CLAUSE_CODE (clauses) == kind)
172 return clauses;
173
174 return NULL_TREE;
175 }
176
177 /* Return true if CTX is for an omp parallel. */
178
179 static inline bool
180 is_parallel_ctx (omp_context *ctx)
181 {
182 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL;
183 }
184
185
186 /* Return true if CTX is for an omp task. */
187
188 static inline bool
189 is_task_ctx (omp_context *ctx)
190 {
191 return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
192 }
193
194
195 /* Return true if CTX is for an omp parallel or omp task. */
196
197 static inline bool
198 is_taskreg_ctx (omp_context *ctx)
199 {
200 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL
201 || gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
202 }
203
204
205 /* Return true if REGION is a combined parallel+workshare region. */
206
207 static inline bool
208 is_combined_parallel (struct omp_region *region)
209 {
210 return region->is_combined_parallel;
211 }
212
213
214 /* Extract the header elements of parallel loop FOR_STMT and store
215 them into *FD. */
216
217 static void
218 extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
219 struct omp_for_data_loop *loops)
220 {
221 tree t, var, *collapse_iter, *collapse_count;
222 tree count = NULL_TREE, iter_type = long_integer_type_node;
223 struct omp_for_data_loop *loop;
224 int i;
225 struct omp_for_data_loop dummy_loop;
226 location_t loc = gimple_location (for_stmt);
227
228 fd->for_stmt = for_stmt;
229 fd->pre = NULL;
230 fd->collapse = gimple_omp_for_collapse (for_stmt);
231 if (fd->collapse > 1)
232 fd->loops = loops;
233 else
234 fd->loops = &fd->loop;
235
236 fd->have_nowait = fd->have_ordered = false;
237 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
238 fd->chunk_size = NULL_TREE;
239 collapse_iter = NULL;
240 collapse_count = NULL;
241
242 for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
243 switch (OMP_CLAUSE_CODE (t))
244 {
245 case OMP_CLAUSE_NOWAIT:
246 fd->have_nowait = true;
247 break;
248 case OMP_CLAUSE_ORDERED:
249 fd->have_ordered = true;
250 break;
251 case OMP_CLAUSE_SCHEDULE:
252 fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
253 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
254 break;
255 case OMP_CLAUSE_COLLAPSE:
256 if (fd->collapse > 1)
257 {
258 collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
259 collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
260 }
261 default:
262 break;
263 }
264
265 /* FIXME: for now map schedule(auto) to schedule(static).
266 There should be analysis to determine whether all iterations
267 are approximately the same amount of work (then schedule(static)
268 is best) or if it varies (then schedule(dynamic,N) is better). */
269 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
270 {
271 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
272 gcc_assert (fd->chunk_size == NULL);
273 }
274 gcc_assert (fd->collapse == 1 || collapse_iter != NULL);
275 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
276 gcc_assert (fd->chunk_size == NULL);
277 else if (fd->chunk_size == NULL)
278 {
279 /* We only need to compute a default chunk size for ordered
280 static loops and dynamic loops. */
281 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
282 || fd->have_ordered
283 || fd->collapse > 1)
284 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
285 ? integer_zero_node : integer_one_node;
286 }
287
288 for (i = 0; i < fd->collapse; i++)
289 {
290 if (fd->collapse == 1)
291 loop = &fd->loop;
292 else if (loops != NULL)
293 loop = loops + i;
294 else
295 loop = &dummy_loop;
296
297
298 loop->v = gimple_omp_for_index (for_stmt, i);
299 gcc_assert (SSA_VAR_P (loop->v));
300 gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
301 || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
302 var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
303 loop->n1 = gimple_omp_for_initial (for_stmt, i);
304
305 loop->cond_code = gimple_omp_for_cond (for_stmt, i);
306 loop->n2 = gimple_omp_for_final (for_stmt, i);
307 switch (loop->cond_code)
308 {
309 case LT_EXPR:
310 case GT_EXPR:
311 break;
312 case LE_EXPR:
313 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
314 loop->n2 = fold_build2_loc (loc,
315 POINTER_PLUS_EXPR, TREE_TYPE (loop->n2),
316 loop->n2, size_one_node);
317 else
318 loop->n2 = fold_build2_loc (loc,
319 PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
320 build_int_cst (TREE_TYPE (loop->n2), 1));
321 loop->cond_code = LT_EXPR;
322 break;
323 case GE_EXPR:
324 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
325 loop->n2 = fold_build2_loc (loc,
326 POINTER_PLUS_EXPR, TREE_TYPE (loop->n2),
327 loop->n2, size_int (-1));
328 else
329 loop->n2 = fold_build2_loc (loc,
330 MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
331 build_int_cst (TREE_TYPE (loop->n2), 1));
332 loop->cond_code = GT_EXPR;
333 break;
334 default:
335 gcc_unreachable ();
336 }
337
338 t = gimple_omp_for_incr (for_stmt, i);
339 gcc_assert (TREE_OPERAND (t, 0) == var);
340 switch (TREE_CODE (t))
341 {
342 case PLUS_EXPR:
343 case POINTER_PLUS_EXPR:
344 loop->step = TREE_OPERAND (t, 1);
345 break;
346 case MINUS_EXPR:
347 loop->step = TREE_OPERAND (t, 1);
348 loop->step = fold_build1_loc (loc,
349 NEGATE_EXPR, TREE_TYPE (loop->step),
350 loop->step);
351 break;
352 default:
353 gcc_unreachable ();
354 }
355
356 if (iter_type != long_long_unsigned_type_node)
357 {
358 if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
359 iter_type = long_long_unsigned_type_node;
360 else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
361 && TYPE_PRECISION (TREE_TYPE (loop->v))
362 >= TYPE_PRECISION (iter_type))
363 {
364 tree n;
365
366 if (loop->cond_code == LT_EXPR)
367 n = fold_build2_loc (loc,
368 PLUS_EXPR, TREE_TYPE (loop->v),
369 loop->n2, loop->step);
370 else
371 n = loop->n1;
372 if (TREE_CODE (n) != INTEGER_CST
373 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
374 iter_type = long_long_unsigned_type_node;
375 }
376 else if (TYPE_PRECISION (TREE_TYPE (loop->v))
377 > TYPE_PRECISION (iter_type))
378 {
379 tree n1, n2;
380
381 if (loop->cond_code == LT_EXPR)
382 {
383 n1 = loop->n1;
384 n2 = fold_build2_loc (loc,
385 PLUS_EXPR, TREE_TYPE (loop->v),
386 loop->n2, loop->step);
387 }
388 else
389 {
390 n1 = fold_build2_loc (loc,
391 MINUS_EXPR, TREE_TYPE (loop->v),
392 loop->n2, loop->step);
393 n2 = loop->n1;
394 }
395 if (TREE_CODE (n1) != INTEGER_CST
396 || TREE_CODE (n2) != INTEGER_CST
397 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
398 || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
399 iter_type = long_long_unsigned_type_node;
400 }
401 }
402
403 if (collapse_count && *collapse_count == NULL)
404 {
405 if ((i == 0 || count != NULL_TREE)
406 && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
407 && TREE_CONSTANT (loop->n1)
408 && TREE_CONSTANT (loop->n2)
409 && TREE_CODE (loop->step) == INTEGER_CST)
410 {
411 tree itype = TREE_TYPE (loop->v);
412
413 if (POINTER_TYPE_P (itype))
414 itype
415 = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
416 t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
417 t = fold_build2_loc (loc,
418 PLUS_EXPR, itype,
419 fold_convert_loc (loc, itype, loop->step), t);
420 t = fold_build2_loc (loc, PLUS_EXPR, itype, t,
421 fold_convert_loc (loc, itype, loop->n2));
422 t = fold_build2_loc (loc, MINUS_EXPR, itype, t,
423 fold_convert_loc (loc, itype, loop->n1));
424 if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
425 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype,
426 fold_build1_loc (loc, NEGATE_EXPR, itype, t),
427 fold_build1_loc (loc, NEGATE_EXPR, itype,
428 fold_convert_loc (loc, itype,
429 loop->step)));
430 else
431 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, t,
432 fold_convert_loc (loc, itype, loop->step));
433 t = fold_convert_loc (loc, long_long_unsigned_type_node, t);
434 if (count != NULL_TREE)
435 count = fold_build2_loc (loc,
436 MULT_EXPR, long_long_unsigned_type_node,
437 count, t);
438 else
439 count = t;
440 if (TREE_CODE (count) != INTEGER_CST)
441 count = NULL_TREE;
442 }
443 else
444 count = NULL_TREE;
445 }
446 }
447
448 if (count)
449 {
450 if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
451 iter_type = long_long_unsigned_type_node;
452 else
453 iter_type = long_integer_type_node;
454 }
455 else if (collapse_iter && *collapse_iter != NULL)
456 iter_type = TREE_TYPE (*collapse_iter);
457 fd->iter_type = iter_type;
458 if (collapse_iter && *collapse_iter == NULL)
459 *collapse_iter = create_tmp_var (iter_type, ".iter");
460 if (collapse_count && *collapse_count == NULL)
461 {
462 if (count)
463 *collapse_count = fold_convert_loc (loc, iter_type, count);
464 else
465 *collapse_count = create_tmp_var (iter_type, ".count");
466 }
467
468 if (fd->collapse > 1)
469 {
470 fd->loop.v = *collapse_iter;
471 fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
472 fd->loop.n2 = *collapse_count;
473 fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
474 fd->loop.cond_code = LT_EXPR;
475 }
476 }
477
478
479 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
480 is the immediate dominator of PAR_ENTRY_BB, return true if there
481 are no data dependencies that would prevent expanding the parallel
482 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
483
484 When expanding a combined parallel+workshare region, the call to
485 the child function may need additional arguments in the case of
486 GIMPLE_OMP_FOR regions. In some cases, these arguments are
487 computed out of variables passed in from the parent to the child
488 via 'struct .omp_data_s'. For instance:
489
490 #pragma omp parallel for schedule (guided, i * 4)
491 for (j ...)
492
493 Is lowered into:
494
495 # BLOCK 2 (PAR_ENTRY_BB)
496 .omp_data_o.i = i;
497 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
498
499 # BLOCK 3 (WS_ENTRY_BB)
500 .omp_data_i = &.omp_data_o;
501 D.1667 = .omp_data_i->i;
502 D.1598 = D.1667 * 4;
503 #pragma omp for schedule (guided, D.1598)
504
505 When we outline the parallel region, the call to the child function
506 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
507 that value is computed *after* the call site. So, in principle we
508 cannot do the transformation.
509
510 To see whether the code in WS_ENTRY_BB blocks the combined
511 parallel+workshare call, we collect all the variables used in the
512 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
513 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
514 call.
515
516 FIXME. If we had the SSA form built at this point, we could merely
517 hoist the code in block 3 into block 2 and be done with it. But at
518 this point we don't have dataflow information and though we could
519 hack something up here, it is really not worth the aggravation. */
520
521 static bool
522 workshare_safe_to_combine_p (basic_block ws_entry_bb)
523 {
524 struct omp_for_data fd;
525 gimple ws_stmt = last_stmt (ws_entry_bb);
526
527 if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
528 return true;
529
530 gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR);
531
532 extract_omp_for_data (ws_stmt, &fd, NULL);
533
534 if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
535 return false;
536 if (fd.iter_type != long_integer_type_node)
537 return false;
538
539 /* FIXME. We give up too easily here. If any of these arguments
540 are not constants, they will likely involve variables that have
541 been mapped into fields of .omp_data_s for sharing with the child
542 function. With appropriate data flow, it would be possible to
543 see through this. */
544 if (!is_gimple_min_invariant (fd.loop.n1)
545 || !is_gimple_min_invariant (fd.loop.n2)
546 || !is_gimple_min_invariant (fd.loop.step)
547 || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
548 return false;
549
550 return true;
551 }
552
553
554 /* Collect additional arguments needed to emit a combined
555 parallel+workshare call. WS_STMT is the workshare directive being
556 expanded. */
557
558 static tree
559 get_ws_args_for (gimple ws_stmt)
560 {
561 tree t;
562 location_t loc = gimple_location (ws_stmt);
563
564 if (gimple_code (ws_stmt) == GIMPLE_OMP_FOR)
565 {
566 struct omp_for_data fd;
567 tree ws_args;
568
569 extract_omp_for_data (ws_stmt, &fd, NULL);
570
571 ws_args = NULL_TREE;
572 if (fd.chunk_size)
573 {
574 t = fold_convert_loc (loc, long_integer_type_node, fd.chunk_size);
575 ws_args = tree_cons (NULL, t, ws_args);
576 }
577
578 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.step);
579 ws_args = tree_cons (NULL, t, ws_args);
580
581 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n2);
582 ws_args = tree_cons (NULL, t, ws_args);
583
584 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n1);
585 ws_args = tree_cons (NULL, t, ws_args);
586
587 return ws_args;
588 }
589 else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
590 {
591 /* Number of sections is equal to the number of edges from the
592 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
593 the exit of the sections region. */
594 basic_block bb = single_succ (gimple_bb (ws_stmt));
595 t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
596 t = tree_cons (NULL, t, NULL);
597 return t;
598 }
599
600 gcc_unreachable ();
601 }
602
603
604 /* Discover whether REGION is a combined parallel+workshare region. */
605
606 static void
607 determine_parallel_type (struct omp_region *region)
608 {
609 basic_block par_entry_bb, par_exit_bb;
610 basic_block ws_entry_bb, ws_exit_bb;
611
612 if (region == NULL || region->inner == NULL
613 || region->exit == NULL || region->inner->exit == NULL
614 || region->inner->cont == NULL)
615 return;
616
617 /* We only support parallel+for and parallel+sections. */
618 if (region->type != GIMPLE_OMP_PARALLEL
619 || (region->inner->type != GIMPLE_OMP_FOR
620 && region->inner->type != GIMPLE_OMP_SECTIONS))
621 return;
622
623 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
624 WS_EXIT_BB -> PAR_EXIT_BB. */
625 par_entry_bb = region->entry;
626 par_exit_bb = region->exit;
627 ws_entry_bb = region->inner->entry;
628 ws_exit_bb = region->inner->exit;
629
630 if (single_succ (par_entry_bb) == ws_entry_bb
631 && single_succ (ws_exit_bb) == par_exit_bb
632 && workshare_safe_to_combine_p (ws_entry_bb)
633 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb))
634 || (last_and_only_stmt (ws_entry_bb)
635 && last_and_only_stmt (par_exit_bb))))
636 {
637 gimple ws_stmt = last_stmt (ws_entry_bb);
638
639 if (region->inner->type == GIMPLE_OMP_FOR)
640 {
641 /* If this is a combined parallel loop, we need to determine
642 whether or not to use the combined library calls. There
643 are two cases where we do not apply the transformation:
644 static loops and any kind of ordered loop. In the first
645 case, we already open code the loop so there is no need
646 to do anything else. In the latter case, the combined
647 parallel loop call would still need extra synchronization
648 to implement ordered semantics, so there would not be any
649 gain in using the combined call. */
650 tree clauses = gimple_omp_for_clauses (ws_stmt);
651 tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
652 if (c == NULL
653 || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC
654 || find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
655 {
656 region->is_combined_parallel = false;
657 region->inner->is_combined_parallel = false;
658 return;
659 }
660 }
661
662 region->is_combined_parallel = true;
663 region->inner->is_combined_parallel = true;
664 region->ws_args = get_ws_args_for (ws_stmt);
665 }
666 }
667
668
669 /* Return true if EXPR is variable sized. */
670
671 static inline bool
672 is_variable_sized (const_tree expr)
673 {
674 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
675 }
676
677 /* Return true if DECL is a reference type. */
678
679 static inline bool
680 is_reference (tree decl)
681 {
682 return lang_hooks.decls.omp_privatize_by_reference (decl);
683 }
684
685 /* Lookup variables in the decl or field splay trees. The "maybe" form
686 allows for the variable form to not have been entered, otherwise we
687 assert that the variable must have been entered. */
688
689 static inline tree
690 lookup_decl (tree var, omp_context *ctx)
691 {
692 tree *n;
693 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
694 return *n;
695 }
696
697 static inline tree
698 maybe_lookup_decl (const_tree var, omp_context *ctx)
699 {
700 tree *n;
701 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
702 return n ? *n : NULL_TREE;
703 }
704
705 static inline tree
706 lookup_field (tree var, omp_context *ctx)
707 {
708 splay_tree_node n;
709 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
710 return (tree) n->value;
711 }
712
713 static inline tree
714 lookup_sfield (tree var, omp_context *ctx)
715 {
716 splay_tree_node n;
717 n = splay_tree_lookup (ctx->sfield_map
718 ? ctx->sfield_map : ctx->field_map,
719 (splay_tree_key) var);
720 return (tree) n->value;
721 }
722
723 static inline tree
724 maybe_lookup_field (tree var, omp_context *ctx)
725 {
726 splay_tree_node n;
727 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
728 return n ? (tree) n->value : NULL_TREE;
729 }
730
731 /* Return true if DECL should be copied by pointer. SHARED_CTX is
732 the parallel context if DECL is to be shared. */
733
734 static bool
735 use_pointer_for_field (tree decl, omp_context *shared_ctx)
736 {
737 if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
738 return true;
739
740 /* We can only use copy-in/copy-out semantics for shared variables
741 when we know the value is not accessible from an outer scope. */
742 if (shared_ctx)
743 {
744 /* ??? Trivially accessible from anywhere. But why would we even
745 be passing an address in this case? Should we simply assert
746 this to be false, or should we have a cleanup pass that removes
747 these from the list of mappings? */
748 if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
749 return true;
750
751 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
752 without analyzing the expression whether or not its location
753 is accessible to anyone else. In the case of nested parallel
754 regions it certainly may be. */
755 if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
756 return true;
757
758 /* Do not use copy-in/copy-out for variables that have their
759 address taken. */
760 if (TREE_ADDRESSABLE (decl))
761 return true;
762
763 /* Disallow copy-in/out in nested parallel if
764 decl is shared in outer parallel, otherwise
765 each thread could store the shared variable
766 in its own copy-in location, making the
767 variable no longer really shared. */
768 if (!TREE_READONLY (decl) && shared_ctx->is_nested)
769 {
770 omp_context *up;
771
772 for (up = shared_ctx->outer; up; up = up->outer)
773 if (is_taskreg_ctx (up) && maybe_lookup_decl (decl, up))
774 break;
775
776 if (up)
777 {
778 tree c;
779
780 for (c = gimple_omp_taskreg_clauses (up->stmt);
781 c; c = OMP_CLAUSE_CHAIN (c))
782 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
783 && OMP_CLAUSE_DECL (c) == decl)
784 break;
785
786 if (c)
787 return true;
788 }
789 }
790
791 /* For tasks avoid using copy-in/out, unless they are readonly
792 (in which case just copy-in is used). As tasks can be
793 deferred or executed in different thread, when GOMP_task
794 returns, the task hasn't necessarily terminated. */
795 if (!TREE_READONLY (decl) && is_task_ctx (shared_ctx))
796 {
797 tree outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
798 if (is_gimple_reg (outer))
799 {
800 /* Taking address of OUTER in lower_send_shared_vars
801 might need regimplification of everything that uses the
802 variable. */
803 if (!task_shared_vars)
804 task_shared_vars = BITMAP_ALLOC (NULL);
805 bitmap_set_bit (task_shared_vars, DECL_UID (outer));
806 TREE_ADDRESSABLE (outer) = 1;
807 }
808 return true;
809 }
810 }
811
812 return false;
813 }
814
815 /* Create a new VAR_DECL and copy information from VAR to it. */
816
817 tree
818 copy_var_decl (tree var, tree name, tree type)
819 {
820 tree copy = build_decl (DECL_SOURCE_LOCATION (var), VAR_DECL, name, type);
821
822 TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (var);
823 TREE_THIS_VOLATILE (copy) = TREE_THIS_VOLATILE (var);
824 DECL_GIMPLE_REG_P (copy) = DECL_GIMPLE_REG_P (var);
825 DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (var);
826 DECL_IGNORED_P (copy) = DECL_IGNORED_P (var);
827 DECL_CONTEXT (copy) = DECL_CONTEXT (var);
828 TREE_USED (copy) = 1;
829 DECL_SEEN_IN_BIND_EXPR_P (copy) = 1;
830
831 return copy;
832 }
833
834 /* Construct a new automatic decl similar to VAR. */
835
836 static tree
837 omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
838 {
839 tree copy = copy_var_decl (var, name, type);
840
841 DECL_CONTEXT (copy) = current_function_decl;
842 TREE_CHAIN (copy) = ctx->block_vars;
843 ctx->block_vars = copy;
844
845 return copy;
846 }
847
848 static tree
849 omp_copy_decl_1 (tree var, omp_context *ctx)
850 {
851 return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
852 }
853
854 /* Build tree nodes to access the field for VAR on the receiver side. */
855
856 static tree
857 build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
858 {
859 tree x, field = lookup_field (var, ctx);
860
861 /* If the receiver record type was remapped in the child function,
862 remap the field into the new record type. */
863 x = maybe_lookup_field (field, ctx);
864 if (x != NULL)
865 field = x;
866
867 x = build_simple_mem_ref (ctx->receiver_decl);
868 x = build3 (COMPONENT_REF, TREE_TYPE (field), x, field, NULL);
869 if (by_ref)
870 x = build_simple_mem_ref (x);
871
872 return x;
873 }
874
875 /* Build tree nodes to access VAR in the scope outer to CTX. In the case
876 of a parallel, this is a component reference; for workshare constructs
877 this is some variable. */
878
879 static tree
880 build_outer_var_ref (tree var, omp_context *ctx)
881 {
882 tree x;
883
884 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
885 x = var;
886 else if (is_variable_sized (var))
887 {
888 x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
889 x = build_outer_var_ref (x, ctx);
890 x = build_simple_mem_ref (x);
891 }
892 else if (is_taskreg_ctx (ctx))
893 {
894 bool by_ref = use_pointer_for_field (var, NULL);
895 x = build_receiver_ref (var, by_ref, ctx);
896 }
897 else if (ctx->outer)
898 x = lookup_decl (var, ctx->outer);
899 else if (is_reference (var))
900 /* This can happen with orphaned constructs. If var is reference, it is
901 possible it is shared and as such valid. */
902 x = var;
903 else
904 gcc_unreachable ();
905
906 if (is_reference (var))
907 x = build_simple_mem_ref (x);
908
909 return x;
910 }
911
912 /* Build tree nodes to access the field for VAR on the sender side. */
913
914 static tree
915 build_sender_ref (tree var, omp_context *ctx)
916 {
917 tree field = lookup_sfield (var, ctx);
918 return build3 (COMPONENT_REF, TREE_TYPE (field),
919 ctx->sender_decl, field, NULL);
920 }
921
922 /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
923
924 static void
925 install_var_field (tree var, bool by_ref, int mask, omp_context *ctx)
926 {
927 tree field, type, sfield = NULL_TREE;
928
929 gcc_assert ((mask & 1) == 0
930 || !splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
931 gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
932 || !splay_tree_lookup (ctx->sfield_map, (splay_tree_key) var));
933
934 type = TREE_TYPE (var);
935 if (by_ref)
936 type = build_pointer_type (type);
937 else if ((mask & 3) == 1 && is_reference (var))
938 type = TREE_TYPE (type);
939
940 field = build_decl (DECL_SOURCE_LOCATION (var),
941 FIELD_DECL, DECL_NAME (var), type);
942
943 /* Remember what variable this field was created for. This does have a
944 side effect of making dwarf2out ignore this member, so for helpful
945 debugging we clear it later in delete_omp_context. */
946 DECL_ABSTRACT_ORIGIN (field) = var;
947 if (type == TREE_TYPE (var))
948 {
949 DECL_ALIGN (field) = DECL_ALIGN (var);
950 DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
951 TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
952 }
953 else
954 DECL_ALIGN (field) = TYPE_ALIGN (type);
955
956 if ((mask & 3) == 3)
957 {
958 insert_field_into_struct (ctx->record_type, field);
959 if (ctx->srecord_type)
960 {
961 sfield = build_decl (DECL_SOURCE_LOCATION (var),
962 FIELD_DECL, DECL_NAME (var), type);
963 DECL_ABSTRACT_ORIGIN (sfield) = var;
964 DECL_ALIGN (sfield) = DECL_ALIGN (field);
965 DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
966 TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
967 insert_field_into_struct (ctx->srecord_type, sfield);
968 }
969 }
970 else
971 {
972 if (ctx->srecord_type == NULL_TREE)
973 {
974 tree t;
975
976 ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
977 ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
978 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
979 {
980 sfield = build_decl (DECL_SOURCE_LOCATION (var),
981 FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
982 DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
983 insert_field_into_struct (ctx->srecord_type, sfield);
984 splay_tree_insert (ctx->sfield_map,
985 (splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
986 (splay_tree_value) sfield);
987 }
988 }
989 sfield = field;
990 insert_field_into_struct ((mask & 1) ? ctx->record_type
991 : ctx->srecord_type, field);
992 }
993
994 if (mask & 1)
995 splay_tree_insert (ctx->field_map, (splay_tree_key) var,
996 (splay_tree_value) field);
997 if ((mask & 2) && ctx->sfield_map)
998 splay_tree_insert (ctx->sfield_map, (splay_tree_key) var,
999 (splay_tree_value) sfield);
1000 }
1001
1002 static tree
1003 install_var_local (tree var, omp_context *ctx)
1004 {
1005 tree new_var = omp_copy_decl_1 (var, ctx);
1006 insert_decl_map (&ctx->cb, var, new_var);
1007 return new_var;
1008 }
1009
1010 /* Adjust the replacement for DECL in CTX for the new context. This means
1011 copying the DECL_VALUE_EXPR, and fixing up the type. */
1012
1013 static void
1014 fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
1015 {
1016 tree new_decl, size;
1017
1018 new_decl = lookup_decl (decl, ctx);
1019
1020 TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
1021
1022 if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
1023 && DECL_HAS_VALUE_EXPR_P (decl))
1024 {
1025 tree ve = DECL_VALUE_EXPR (decl);
1026 walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL);
1027 SET_DECL_VALUE_EXPR (new_decl, ve);
1028 DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
1029 }
1030
1031 if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
1032 {
1033 size = remap_decl (DECL_SIZE (decl), &ctx->cb);
1034 if (size == error_mark_node)
1035 size = TYPE_SIZE (TREE_TYPE (new_decl));
1036 DECL_SIZE (new_decl) = size;
1037
1038 size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
1039 if (size == error_mark_node)
1040 size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
1041 DECL_SIZE_UNIT (new_decl) = size;
1042 }
1043 }
1044
1045 /* The callback for remap_decl. Search all containing contexts for a
1046 mapping of the variable; this avoids having to duplicate the splay
1047 tree ahead of time. We know a mapping doesn't already exist in the
1048 given context. Create new mappings to implement default semantics. */
1049
1050 static tree
1051 omp_copy_decl (tree var, copy_body_data *cb)
1052 {
1053 omp_context *ctx = (omp_context *) cb;
1054 tree new_var;
1055
1056 if (TREE_CODE (var) == LABEL_DECL)
1057 {
1058 new_var = create_artificial_label (DECL_SOURCE_LOCATION (var));
1059 DECL_CONTEXT (new_var) = current_function_decl;
1060 insert_decl_map (&ctx->cb, var, new_var);
1061 return new_var;
1062 }
1063
1064 while (!is_taskreg_ctx (ctx))
1065 {
1066 ctx = ctx->outer;
1067 if (ctx == NULL)
1068 return var;
1069 new_var = maybe_lookup_decl (var, ctx);
1070 if (new_var)
1071 return new_var;
1072 }
1073
1074 if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
1075 return var;
1076
1077 return error_mark_node;
1078 }
1079
1080
1081 /* Return the parallel region associated with STMT. */
1082
1083 /* Debugging dumps for parallel regions. */
1084 void dump_omp_region (FILE *, struct omp_region *, int);
1085 void debug_omp_region (struct omp_region *);
1086 void debug_all_omp_regions (void);
1087
1088 /* Dump the parallel region tree rooted at REGION. */
1089
1090 void
1091 dump_omp_region (FILE *file, struct omp_region *region, int indent)
1092 {
1093 fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
1094 gimple_code_name[region->type]);
1095
1096 if (region->inner)
1097 dump_omp_region (file, region->inner, indent + 4);
1098
1099 if (region->cont)
1100 {
1101 fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
1102 region->cont->index);
1103 }
1104
1105 if (region->exit)
1106 fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
1107 region->exit->index);
1108 else
1109 fprintf (file, "%*s[no exit marker]\n", indent, "");
1110
1111 if (region->next)
1112 dump_omp_region (file, region->next, indent);
1113 }
1114
1115 DEBUG_FUNCTION void
1116 debug_omp_region (struct omp_region *region)
1117 {
1118 dump_omp_region (stderr, region, 0);
1119 }
1120
1121 DEBUG_FUNCTION void
1122 debug_all_omp_regions (void)
1123 {
1124 dump_omp_region (stderr, root_omp_region, 0);
1125 }
1126
1127
1128 /* Create a new parallel region starting at STMT inside region PARENT. */
1129
1130 struct omp_region *
1131 new_omp_region (basic_block bb, enum gimple_code type,
1132 struct omp_region *parent)
1133 {
1134 struct omp_region *region = XCNEW (struct omp_region);
1135
1136 region->outer = parent;
1137 region->entry = bb;
1138 region->type = type;
1139
1140 if (parent)
1141 {
1142 /* This is a nested region. Add it to the list of inner
1143 regions in PARENT. */
1144 region->next = parent->inner;
1145 parent->inner = region;
1146 }
1147 else
1148 {
1149 /* This is a toplevel region. Add it to the list of toplevel
1150 regions in ROOT_OMP_REGION. */
1151 region->next = root_omp_region;
1152 root_omp_region = region;
1153 }
1154
1155 return region;
1156 }
1157
1158 /* Release the memory associated with the region tree rooted at REGION. */
1159
1160 static void
1161 free_omp_region_1 (struct omp_region *region)
1162 {
1163 struct omp_region *i, *n;
1164
1165 for (i = region->inner; i ; i = n)
1166 {
1167 n = i->next;
1168 free_omp_region_1 (i);
1169 }
1170
1171 free (region);
1172 }
1173
1174 /* Release the memory for the entire omp region tree. */
1175
1176 void
1177 free_omp_regions (void)
1178 {
1179 struct omp_region *r, *n;
1180 for (r = root_omp_region; r ; r = n)
1181 {
1182 n = r->next;
1183 free_omp_region_1 (r);
1184 }
1185 root_omp_region = NULL;
1186 }
1187
1188
1189 /* Create a new context, with OUTER_CTX being the surrounding context. */
1190
1191 static omp_context *
1192 new_omp_context (gimple stmt, omp_context *outer_ctx)
1193 {
1194 omp_context *ctx = XCNEW (omp_context);
1195
1196 splay_tree_insert (all_contexts, (splay_tree_key) stmt,
1197 (splay_tree_value) ctx);
1198 ctx->stmt = stmt;
1199
1200 if (outer_ctx)
1201 {
1202 ctx->outer = outer_ctx;
1203 ctx->cb = outer_ctx->cb;
1204 ctx->cb.block = NULL;
1205 ctx->depth = outer_ctx->depth + 1;
1206 }
1207 else
1208 {
1209 ctx->cb.src_fn = current_function_decl;
1210 ctx->cb.dst_fn = current_function_decl;
1211 ctx->cb.src_node = cgraph_node (current_function_decl);
1212 ctx->cb.dst_node = ctx->cb.src_node;
1213 ctx->cb.src_cfun = cfun;
1214 ctx->cb.copy_decl = omp_copy_decl;
1215 ctx->cb.eh_lp_nr = 0;
1216 ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
1217 ctx->depth = 1;
1218 }
1219
1220 ctx->cb.decl_map = pointer_map_create ();
1221
1222 return ctx;
1223 }
1224
1225 static gimple_seq maybe_catch_exception (gimple_seq);
1226
1227 /* Finalize task copyfn. */
1228
1229 static void
1230 finalize_task_copyfn (gimple task_stmt)
1231 {
1232 struct function *child_cfun;
1233 tree child_fn, old_fn;
1234 gimple_seq seq, new_seq;
1235 gimple bind;
1236
1237 child_fn = gimple_omp_task_copy_fn (task_stmt);
1238 if (child_fn == NULL_TREE)
1239 return;
1240
1241 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
1242
1243 /* Inform the callgraph about the new function. */
1244 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
1245 = cfun->curr_properties;
1246
1247 old_fn = current_function_decl;
1248 push_cfun (child_cfun);
1249 current_function_decl = child_fn;
1250 bind = gimplify_body (&DECL_SAVED_TREE (child_fn), child_fn, false);
1251 seq = gimple_seq_alloc ();
1252 gimple_seq_add_stmt (&seq, bind);
1253 new_seq = maybe_catch_exception (seq);
1254 if (new_seq != seq)
1255 {
1256 bind = gimple_build_bind (NULL, new_seq, NULL);
1257 seq = gimple_seq_alloc ();
1258 gimple_seq_add_stmt (&seq, bind);
1259 }
1260 gimple_set_body (child_fn, seq);
1261 pop_cfun ();
1262 current_function_decl = old_fn;
1263
1264 cgraph_add_new_function (child_fn, false);
1265 }
1266
1267 /* Destroy a omp_context data structures. Called through the splay tree
1268 value delete callback. */
1269
1270 static void
1271 delete_omp_context (splay_tree_value value)
1272 {
1273 omp_context *ctx = (omp_context *) value;
1274
1275 pointer_map_destroy (ctx->cb.decl_map);
1276
1277 if (ctx->field_map)
1278 splay_tree_delete (ctx->field_map);
1279 if (ctx->sfield_map)
1280 splay_tree_delete (ctx->sfield_map);
1281
1282 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1283 it produces corrupt debug information. */
1284 if (ctx->record_type)
1285 {
1286 tree t;
1287 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
1288 DECL_ABSTRACT_ORIGIN (t) = NULL;
1289 }
1290 if (ctx->srecord_type)
1291 {
1292 tree t;
1293 for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = TREE_CHAIN (t))
1294 DECL_ABSTRACT_ORIGIN (t) = NULL;
1295 }
1296
1297 if (is_task_ctx (ctx))
1298 finalize_task_copyfn (ctx->stmt);
1299
1300 XDELETE (ctx);
1301 }
1302
1303 /* Fix up RECEIVER_DECL with a type that has been remapped to the child
1304 context. */
1305
1306 static void
1307 fixup_child_record_type (omp_context *ctx)
1308 {
1309 tree f, type = ctx->record_type;
1310
1311 /* ??? It isn't sufficient to just call remap_type here, because
1312 variably_modified_type_p doesn't work the way we expect for
1313 record types. Testing each field for whether it needs remapping
1314 and creating a new record by hand works, however. */
1315 for (f = TYPE_FIELDS (type); f ; f = TREE_CHAIN (f))
1316 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
1317 break;
1318 if (f)
1319 {
1320 tree name, new_fields = NULL;
1321
1322 type = lang_hooks.types.make_type (RECORD_TYPE);
1323 name = DECL_NAME (TYPE_NAME (ctx->record_type));
1324 name = build_decl (DECL_SOURCE_LOCATION (ctx->receiver_decl),
1325 TYPE_DECL, name, type);
1326 TYPE_NAME (type) = name;
1327
1328 for (f = TYPE_FIELDS (ctx->record_type); f ; f = TREE_CHAIN (f))
1329 {
1330 tree new_f = copy_node (f);
1331 DECL_CONTEXT (new_f) = type;
1332 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
1333 TREE_CHAIN (new_f) = new_fields;
1334 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
1335 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
1336 &ctx->cb, NULL);
1337 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
1338 &ctx->cb, NULL);
1339 new_fields = new_f;
1340
1341 /* Arrange to be able to look up the receiver field
1342 given the sender field. */
1343 splay_tree_insert (ctx->field_map, (splay_tree_key) f,
1344 (splay_tree_value) new_f);
1345 }
1346 TYPE_FIELDS (type) = nreverse (new_fields);
1347 layout_type (type);
1348 }
1349
1350 TREE_TYPE (ctx->receiver_decl) = build_pointer_type (type);
1351 }
1352
1353 /* Instantiate decls as necessary in CTX to satisfy the data sharing
1354 specified by CLAUSES. */
1355
1356 static void
1357 scan_sharing_clauses (tree clauses, omp_context *ctx)
1358 {
1359 tree c, decl;
1360 bool scan_array_reductions = false;
1361
1362 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1363 {
1364 bool by_ref;
1365
1366 switch (OMP_CLAUSE_CODE (c))
1367 {
1368 case OMP_CLAUSE_PRIVATE:
1369 decl = OMP_CLAUSE_DECL (c);
1370 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
1371 goto do_private;
1372 else if (!is_variable_sized (decl))
1373 install_var_local (decl, ctx);
1374 break;
1375
1376 case OMP_CLAUSE_SHARED:
1377 gcc_assert (is_taskreg_ctx (ctx));
1378 decl = OMP_CLAUSE_DECL (c);
1379 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
1380 || !is_variable_sized (decl));
1381 /* Global variables don't need to be copied,
1382 the receiver side will use them directly. */
1383 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1384 break;
1385 by_ref = use_pointer_for_field (decl, ctx);
1386 if (! TREE_READONLY (decl)
1387 || TREE_ADDRESSABLE (decl)
1388 || by_ref
1389 || is_reference (decl))
1390 {
1391 install_var_field (decl, by_ref, 3, ctx);
1392 install_var_local (decl, ctx);
1393 break;
1394 }
1395 /* We don't need to copy const scalar vars back. */
1396 OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
1397 goto do_private;
1398
1399 case OMP_CLAUSE_LASTPRIVATE:
1400 /* Let the corresponding firstprivate clause create
1401 the variable. */
1402 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1403 break;
1404 /* FALLTHRU */
1405
1406 case OMP_CLAUSE_FIRSTPRIVATE:
1407 case OMP_CLAUSE_REDUCTION:
1408 decl = OMP_CLAUSE_DECL (c);
1409 do_private:
1410 if (is_variable_sized (decl))
1411 {
1412 if (is_task_ctx (ctx))
1413 install_var_field (decl, false, 1, ctx);
1414 break;
1415 }
1416 else if (is_taskreg_ctx (ctx))
1417 {
1418 bool global
1419 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
1420 by_ref = use_pointer_for_field (decl, NULL);
1421
1422 if (is_task_ctx (ctx)
1423 && (global || by_ref || is_reference (decl)))
1424 {
1425 install_var_field (decl, false, 1, ctx);
1426 if (!global)
1427 install_var_field (decl, by_ref, 2, ctx);
1428 }
1429 else if (!global)
1430 install_var_field (decl, by_ref, 3, ctx);
1431 }
1432 install_var_local (decl, ctx);
1433 break;
1434
1435 case OMP_CLAUSE_COPYPRIVATE:
1436 case OMP_CLAUSE_COPYIN:
1437 decl = OMP_CLAUSE_DECL (c);
1438 by_ref = use_pointer_for_field (decl, NULL);
1439 install_var_field (decl, by_ref, 3, ctx);
1440 break;
1441
1442 case OMP_CLAUSE_DEFAULT:
1443 ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
1444 break;
1445
1446 case OMP_CLAUSE_IF:
1447 case OMP_CLAUSE_NUM_THREADS:
1448 case OMP_CLAUSE_SCHEDULE:
1449 if (ctx->outer)
1450 scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
1451 break;
1452
1453 case OMP_CLAUSE_NOWAIT:
1454 case OMP_CLAUSE_ORDERED:
1455 case OMP_CLAUSE_COLLAPSE:
1456 case OMP_CLAUSE_UNTIED:
1457 break;
1458
1459 default:
1460 gcc_unreachable ();
1461 }
1462 }
1463
1464 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1465 {
1466 switch (OMP_CLAUSE_CODE (c))
1467 {
1468 case OMP_CLAUSE_LASTPRIVATE:
1469 /* Let the corresponding firstprivate clause create
1470 the variable. */
1471 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1472 scan_array_reductions = true;
1473 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1474 break;
1475 /* FALLTHRU */
1476
1477 case OMP_CLAUSE_PRIVATE:
1478 case OMP_CLAUSE_FIRSTPRIVATE:
1479 case OMP_CLAUSE_REDUCTION:
1480 decl = OMP_CLAUSE_DECL (c);
1481 if (is_variable_sized (decl))
1482 install_var_local (decl, ctx);
1483 fixup_remapped_decl (decl, ctx,
1484 OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
1485 && OMP_CLAUSE_PRIVATE_DEBUG (c));
1486 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1487 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1488 scan_array_reductions = true;
1489 break;
1490
1491 case OMP_CLAUSE_SHARED:
1492 decl = OMP_CLAUSE_DECL (c);
1493 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1494 fixup_remapped_decl (decl, ctx, false);
1495 break;
1496
1497 case OMP_CLAUSE_COPYPRIVATE:
1498 case OMP_CLAUSE_COPYIN:
1499 case OMP_CLAUSE_DEFAULT:
1500 case OMP_CLAUSE_IF:
1501 case OMP_CLAUSE_NUM_THREADS:
1502 case OMP_CLAUSE_SCHEDULE:
1503 case OMP_CLAUSE_NOWAIT:
1504 case OMP_CLAUSE_ORDERED:
1505 case OMP_CLAUSE_COLLAPSE:
1506 case OMP_CLAUSE_UNTIED:
1507 break;
1508
1509 default:
1510 gcc_unreachable ();
1511 }
1512 }
1513
1514 if (scan_array_reductions)
1515 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1516 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1517 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1518 {
1519 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
1520 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
1521 }
1522 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
1523 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1524 scan_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
1525 }
1526
1527 /* Create a new name for omp child function. Returns an identifier. */
1528
1529 static GTY(()) unsigned int tmp_ompfn_id_num;
1530
1531 static tree
1532 create_omp_child_function_name (bool task_copy)
1533 {
1534 return (clone_function_name (current_function_decl,
1535 task_copy ? "_omp_cpyfn" : "_omp_fn"));
1536 }
1537
1538 /* Build a decl for the omp child function. It'll not contain a body
1539 yet, just the bare decl. */
1540
1541 static void
1542 create_omp_child_function (omp_context *ctx, bool task_copy)
1543 {
1544 tree decl, type, name, t;
1545
1546 name = create_omp_child_function_name (task_copy);
1547 if (task_copy)
1548 type = build_function_type_list (void_type_node, ptr_type_node,
1549 ptr_type_node, NULL_TREE);
1550 else
1551 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
1552
1553 decl = build_decl (gimple_location (ctx->stmt),
1554 FUNCTION_DECL, name, type);
1555
1556 if (!task_copy)
1557 ctx->cb.dst_fn = decl;
1558 else
1559 gimple_omp_task_set_copy_fn (ctx->stmt, decl);
1560
1561 TREE_STATIC (decl) = 1;
1562 TREE_USED (decl) = 1;
1563 DECL_ARTIFICIAL (decl) = 1;
1564 DECL_IGNORED_P (decl) = 0;
1565 TREE_PUBLIC (decl) = 0;
1566 DECL_UNINLINABLE (decl) = 1;
1567 DECL_EXTERNAL (decl) = 0;
1568 DECL_CONTEXT (decl) = NULL_TREE;
1569 DECL_INITIAL (decl) = make_node (BLOCK);
1570
1571 t = build_decl (DECL_SOURCE_LOCATION (decl),
1572 RESULT_DECL, NULL_TREE, void_type_node);
1573 DECL_ARTIFICIAL (t) = 1;
1574 DECL_IGNORED_P (t) = 1;
1575 DECL_CONTEXT (t) = decl;
1576 DECL_RESULT (decl) = t;
1577
1578 t = build_decl (DECL_SOURCE_LOCATION (decl),
1579 PARM_DECL, get_identifier (".omp_data_i"), ptr_type_node);
1580 DECL_ARTIFICIAL (t) = 1;
1581 DECL_ARG_TYPE (t) = ptr_type_node;
1582 DECL_CONTEXT (t) = current_function_decl;
1583 TREE_USED (t) = 1;
1584 DECL_ARGUMENTS (decl) = t;
1585 if (!task_copy)
1586 ctx->receiver_decl = t;
1587 else
1588 {
1589 t = build_decl (DECL_SOURCE_LOCATION (decl),
1590 PARM_DECL, get_identifier (".omp_data_o"),
1591 ptr_type_node);
1592 DECL_ARTIFICIAL (t) = 1;
1593 DECL_ARG_TYPE (t) = ptr_type_node;
1594 DECL_CONTEXT (t) = current_function_decl;
1595 TREE_USED (t) = 1;
1596 TREE_ADDRESSABLE (t) = 1;
1597 TREE_CHAIN (t) = DECL_ARGUMENTS (decl);
1598 DECL_ARGUMENTS (decl) = t;
1599 }
1600
1601 /* Allocate memory for the function structure. The call to
1602 allocate_struct_function clobbers CFUN, so we need to restore
1603 it afterward. */
1604 push_struct_function (decl);
1605 cfun->function_end_locus = gimple_location (ctx->stmt);
1606 pop_cfun ();
1607 }
1608
1609
1610 /* Scan an OpenMP parallel directive. */
1611
1612 static void
1613 scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1614 {
1615 omp_context *ctx;
1616 tree name;
1617 gimple stmt = gsi_stmt (*gsi);
1618
1619 /* Ignore parallel directives with empty bodies, unless there
1620 are copyin clauses. */
1621 if (optimize > 0
1622 && empty_body_p (gimple_omp_body (stmt))
1623 && find_omp_clause (gimple_omp_parallel_clauses (stmt),
1624 OMP_CLAUSE_COPYIN) == NULL)
1625 {
1626 gsi_replace (gsi, gimple_build_nop (), false);
1627 return;
1628 }
1629
1630 ctx = new_omp_context (stmt, outer_ctx);
1631 if (taskreg_nesting_level > 1)
1632 ctx->is_nested = true;
1633 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1634 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1635 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1636 name = create_tmp_var_name (".omp_data_s");
1637 name = build_decl (gimple_location (stmt),
1638 TYPE_DECL, name, ctx->record_type);
1639 TYPE_NAME (ctx->record_type) = name;
1640 create_omp_child_function (ctx, false);
1641 gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
1642
1643 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx);
1644 scan_omp (gimple_omp_body (stmt), ctx);
1645
1646 if (TYPE_FIELDS (ctx->record_type) == NULL)
1647 ctx->record_type = ctx->receiver_decl = NULL;
1648 else
1649 {
1650 layout_type (ctx->record_type);
1651 fixup_child_record_type (ctx);
1652 }
1653 }
1654
1655 /* Scan an OpenMP task directive. */
1656
1657 static void
1658 scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1659 {
1660 omp_context *ctx;
1661 tree name, t;
1662 gimple stmt = gsi_stmt (*gsi);
1663 location_t loc = gimple_location (stmt);
1664
1665 /* Ignore task directives with empty bodies. */
1666 if (optimize > 0
1667 && empty_body_p (gimple_omp_body (stmt)))
1668 {
1669 gsi_replace (gsi, gimple_build_nop (), false);
1670 return;
1671 }
1672
1673 ctx = new_omp_context (stmt, outer_ctx);
1674 if (taskreg_nesting_level > 1)
1675 ctx->is_nested = true;
1676 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1677 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1678 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1679 name = create_tmp_var_name (".omp_data_s");
1680 name = build_decl (gimple_location (stmt),
1681 TYPE_DECL, name, ctx->record_type);
1682 TYPE_NAME (ctx->record_type) = name;
1683 create_omp_child_function (ctx, false);
1684 gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
1685
1686 scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
1687
1688 if (ctx->srecord_type)
1689 {
1690 name = create_tmp_var_name (".omp_data_a");
1691 name = build_decl (gimple_location (stmt),
1692 TYPE_DECL, name, ctx->srecord_type);
1693 TYPE_NAME (ctx->srecord_type) = name;
1694 create_omp_child_function (ctx, true);
1695 }
1696
1697 scan_omp (gimple_omp_body (stmt), ctx);
1698
1699 if (TYPE_FIELDS (ctx->record_type) == NULL)
1700 {
1701 ctx->record_type = ctx->receiver_decl = NULL;
1702 t = build_int_cst (long_integer_type_node, 0);
1703 gimple_omp_task_set_arg_size (stmt, t);
1704 t = build_int_cst (long_integer_type_node, 1);
1705 gimple_omp_task_set_arg_align (stmt, t);
1706 }
1707 else
1708 {
1709 tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
1710 /* Move VLA fields to the end. */
1711 p = &TYPE_FIELDS (ctx->record_type);
1712 while (*p)
1713 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
1714 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
1715 {
1716 *q = *p;
1717 *p = TREE_CHAIN (*p);
1718 TREE_CHAIN (*q) = NULL_TREE;
1719 q = &TREE_CHAIN (*q);
1720 }
1721 else
1722 p = &TREE_CHAIN (*p);
1723 *p = vla_fields;
1724 layout_type (ctx->record_type);
1725 fixup_child_record_type (ctx);
1726 if (ctx->srecord_type)
1727 layout_type (ctx->srecord_type);
1728 t = fold_convert_loc (loc, long_integer_type_node,
1729 TYPE_SIZE_UNIT (ctx->record_type));
1730 gimple_omp_task_set_arg_size (stmt, t);
1731 t = build_int_cst (long_integer_type_node,
1732 TYPE_ALIGN_UNIT (ctx->record_type));
1733 gimple_omp_task_set_arg_align (stmt, t);
1734 }
1735 }
1736
1737
1738 /* Scan an OpenMP loop directive. */
1739
1740 static void
1741 scan_omp_for (gimple stmt, omp_context *outer_ctx)
1742 {
1743 omp_context *ctx;
1744 size_t i;
1745
1746 ctx = new_omp_context (stmt, outer_ctx);
1747
1748 scan_sharing_clauses (gimple_omp_for_clauses (stmt), ctx);
1749
1750 scan_omp (gimple_omp_for_pre_body (stmt), ctx);
1751 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
1752 {
1753 scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx);
1754 scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx);
1755 scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx);
1756 scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx);
1757 }
1758 scan_omp (gimple_omp_body (stmt), ctx);
1759 }
1760
1761 /* Scan an OpenMP sections directive. */
1762
1763 static void
1764 scan_omp_sections (gimple stmt, omp_context *outer_ctx)
1765 {
1766 omp_context *ctx;
1767
1768 ctx = new_omp_context (stmt, outer_ctx);
1769 scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx);
1770 scan_omp (gimple_omp_body (stmt), ctx);
1771 }
1772
1773 /* Scan an OpenMP single directive. */
1774
1775 static void
1776 scan_omp_single (gimple stmt, omp_context *outer_ctx)
1777 {
1778 omp_context *ctx;
1779 tree name;
1780
1781 ctx = new_omp_context (stmt, outer_ctx);
1782 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1783 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1784 name = create_tmp_var_name (".omp_copy_s");
1785 name = build_decl (gimple_location (stmt),
1786 TYPE_DECL, name, ctx->record_type);
1787 TYPE_NAME (ctx->record_type) = name;
1788
1789 scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx);
1790 scan_omp (gimple_omp_body (stmt), ctx);
1791
1792 if (TYPE_FIELDS (ctx->record_type) == NULL)
1793 ctx->record_type = NULL;
1794 else
1795 layout_type (ctx->record_type);
1796 }
1797
1798
1799 /* Check OpenMP nesting restrictions. */
1800 static void
1801 check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
1802 {
1803 switch (gimple_code (stmt))
1804 {
1805 case GIMPLE_OMP_FOR:
1806 case GIMPLE_OMP_SECTIONS:
1807 case GIMPLE_OMP_SINGLE:
1808 case GIMPLE_CALL:
1809 for (; ctx != NULL; ctx = ctx->outer)
1810 switch (gimple_code (ctx->stmt))
1811 {
1812 case GIMPLE_OMP_FOR:
1813 case GIMPLE_OMP_SECTIONS:
1814 case GIMPLE_OMP_SINGLE:
1815 case GIMPLE_OMP_ORDERED:
1816 case GIMPLE_OMP_MASTER:
1817 case GIMPLE_OMP_TASK:
1818 if (is_gimple_call (stmt))
1819 {
1820 warning (0, "barrier region may not be closely nested inside "
1821 "of work-sharing, critical, ordered, master or "
1822 "explicit task region");
1823 return;
1824 }
1825 warning (0, "work-sharing region may not be closely nested inside "
1826 "of work-sharing, critical, ordered, master or explicit "
1827 "task region");
1828 return;
1829 case GIMPLE_OMP_PARALLEL:
1830 return;
1831 default:
1832 break;
1833 }
1834 break;
1835 case GIMPLE_OMP_MASTER:
1836 for (; ctx != NULL; ctx = ctx->outer)
1837 switch (gimple_code (ctx->stmt))
1838 {
1839 case GIMPLE_OMP_FOR:
1840 case GIMPLE_OMP_SECTIONS:
1841 case GIMPLE_OMP_SINGLE:
1842 case GIMPLE_OMP_TASK:
1843 warning (0, "master region may not be closely nested inside "
1844 "of work-sharing or explicit task region");
1845 return;
1846 case GIMPLE_OMP_PARALLEL:
1847 return;
1848 default:
1849 break;
1850 }
1851 break;
1852 case GIMPLE_OMP_ORDERED:
1853 for (; ctx != NULL; ctx = ctx->outer)
1854 switch (gimple_code (ctx->stmt))
1855 {
1856 case GIMPLE_OMP_CRITICAL:
1857 case GIMPLE_OMP_TASK:
1858 warning (0, "ordered region may not be closely nested inside "
1859 "of critical or explicit task region");
1860 return;
1861 case GIMPLE_OMP_FOR:
1862 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
1863 OMP_CLAUSE_ORDERED) == NULL)
1864 warning (0, "ordered region must be closely nested inside "
1865 "a loop region with an ordered clause");
1866 return;
1867 case GIMPLE_OMP_PARALLEL:
1868 return;
1869 default:
1870 break;
1871 }
1872 break;
1873 case GIMPLE_OMP_CRITICAL:
1874 for (; ctx != NULL; ctx = ctx->outer)
1875 if (gimple_code (ctx->stmt) == GIMPLE_OMP_CRITICAL
1876 && (gimple_omp_critical_name (stmt)
1877 == gimple_omp_critical_name (ctx->stmt)))
1878 {
1879 warning (0, "critical region may not be nested inside a critical "
1880 "region with the same name");
1881 return;
1882 }
1883 break;
1884 default:
1885 break;
1886 }
1887 }
1888
1889
1890 /* Helper function scan_omp.
1891
1892 Callback for walk_tree or operators in walk_gimple_stmt used to
1893 scan for OpenMP directives in TP. */
1894
1895 static tree
1896 scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
1897 {
1898 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
1899 omp_context *ctx = (omp_context *) wi->info;
1900 tree t = *tp;
1901
1902 switch (TREE_CODE (t))
1903 {
1904 case VAR_DECL:
1905 case PARM_DECL:
1906 case LABEL_DECL:
1907 case RESULT_DECL:
1908 if (ctx)
1909 *tp = remap_decl (t, &ctx->cb);
1910 break;
1911
1912 default:
1913 if (ctx && TYPE_P (t))
1914 *tp = remap_type (t, &ctx->cb);
1915 else if (!DECL_P (t))
1916 {
1917 *walk_subtrees = 1;
1918 if (ctx)
1919 {
1920 tree tem = remap_type (TREE_TYPE (t), &ctx->cb);
1921 if (tem != TREE_TYPE (t))
1922 {
1923 if (TREE_CODE (t) == INTEGER_CST)
1924 *tp = build_int_cst_wide (tem,
1925 TREE_INT_CST_LOW (t),
1926 TREE_INT_CST_HIGH (t));
1927 else
1928 TREE_TYPE (t) = tem;
1929 }
1930 }
1931 }
1932 break;
1933 }
1934
1935 return NULL_TREE;
1936 }
1937
1938
1939 /* Helper function for scan_omp.
1940
1941 Callback for walk_gimple_stmt used to scan for OpenMP directives in
1942 the current statement in GSI. */
1943
1944 static tree
1945 scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
1946 struct walk_stmt_info *wi)
1947 {
1948 gimple stmt = gsi_stmt (*gsi);
1949 omp_context *ctx = (omp_context *) wi->info;
1950
1951 if (gimple_has_location (stmt))
1952 input_location = gimple_location (stmt);
1953
1954 /* Check the OpenMP nesting restrictions. */
1955 if (ctx != NULL)
1956 {
1957 if (is_gimple_omp (stmt))
1958 check_omp_nesting_restrictions (stmt, ctx);
1959 else if (is_gimple_call (stmt))
1960 {
1961 tree fndecl = gimple_call_fndecl (stmt);
1962 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
1963 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
1964 check_omp_nesting_restrictions (stmt, ctx);
1965 }
1966 }
1967
1968 *handled_ops_p = true;
1969
1970 switch (gimple_code (stmt))
1971 {
1972 case GIMPLE_OMP_PARALLEL:
1973 taskreg_nesting_level++;
1974 scan_omp_parallel (gsi, ctx);
1975 taskreg_nesting_level--;
1976 break;
1977
1978 case GIMPLE_OMP_TASK:
1979 taskreg_nesting_level++;
1980 scan_omp_task (gsi, ctx);
1981 taskreg_nesting_level--;
1982 break;
1983
1984 case GIMPLE_OMP_FOR:
1985 scan_omp_for (stmt, ctx);
1986 break;
1987
1988 case GIMPLE_OMP_SECTIONS:
1989 scan_omp_sections (stmt, ctx);
1990 break;
1991
1992 case GIMPLE_OMP_SINGLE:
1993 scan_omp_single (stmt, ctx);
1994 break;
1995
1996 case GIMPLE_OMP_SECTION:
1997 case GIMPLE_OMP_MASTER:
1998 case GIMPLE_OMP_ORDERED:
1999 case GIMPLE_OMP_CRITICAL:
2000 ctx = new_omp_context (stmt, ctx);
2001 scan_omp (gimple_omp_body (stmt), ctx);
2002 break;
2003
2004 case GIMPLE_BIND:
2005 {
2006 tree var;
2007
2008 *handled_ops_p = false;
2009 if (ctx)
2010 for (var = gimple_bind_vars (stmt); var ; var = TREE_CHAIN (var))
2011 insert_decl_map (&ctx->cb, var, var);
2012 }
2013 break;
2014 default:
2015 *handled_ops_p = false;
2016 break;
2017 }
2018
2019 return NULL_TREE;
2020 }
2021
2022
2023 /* Scan all the statements starting at the current statement. CTX
2024 contains context information about the OpenMP directives and
2025 clauses found during the scan. */
2026
2027 static void
2028 scan_omp (gimple_seq body, omp_context *ctx)
2029 {
2030 location_t saved_location;
2031 struct walk_stmt_info wi;
2032
2033 memset (&wi, 0, sizeof (wi));
2034 wi.info = ctx;
2035 wi.want_locations = true;
2036
2037 saved_location = input_location;
2038 walk_gimple_seq (body, scan_omp_1_stmt, scan_omp_1_op, &wi);
2039 input_location = saved_location;
2040 }
2041 \f
2042 /* Re-gimplification and code generation routines. */
2043
2044 /* Build a call to GOMP_barrier. */
2045
2046 static tree
2047 build_omp_barrier (void)
2048 {
2049 return build_call_expr (built_in_decls[BUILT_IN_GOMP_BARRIER], 0);
2050 }
2051
2052 /* If a context was created for STMT when it was scanned, return it. */
2053
2054 static omp_context *
2055 maybe_lookup_ctx (gimple stmt)
2056 {
2057 splay_tree_node n;
2058 n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
2059 return n ? (omp_context *) n->value : NULL;
2060 }
2061
2062
2063 /* Find the mapping for DECL in CTX or the immediately enclosing
2064 context that has a mapping for DECL.
2065
2066 If CTX is a nested parallel directive, we may have to use the decl
2067 mappings created in CTX's parent context. Suppose that we have the
2068 following parallel nesting (variable UIDs showed for clarity):
2069
2070 iD.1562 = 0;
2071 #omp parallel shared(iD.1562) -> outer parallel
2072 iD.1562 = iD.1562 + 1;
2073
2074 #omp parallel shared (iD.1562) -> inner parallel
2075 iD.1562 = iD.1562 - 1;
2076
2077 Each parallel structure will create a distinct .omp_data_s structure
2078 for copying iD.1562 in/out of the directive:
2079
2080 outer parallel .omp_data_s.1.i -> iD.1562
2081 inner parallel .omp_data_s.2.i -> iD.1562
2082
2083 A shared variable mapping will produce a copy-out operation before
2084 the parallel directive and a copy-in operation after it. So, in
2085 this case we would have:
2086
2087 iD.1562 = 0;
2088 .omp_data_o.1.i = iD.1562;
2089 #omp parallel shared(iD.1562) -> outer parallel
2090 .omp_data_i.1 = &.omp_data_o.1
2091 .omp_data_i.1->i = .omp_data_i.1->i + 1;
2092
2093 .omp_data_o.2.i = iD.1562; -> **
2094 #omp parallel shared(iD.1562) -> inner parallel
2095 .omp_data_i.2 = &.omp_data_o.2
2096 .omp_data_i.2->i = .omp_data_i.2->i - 1;
2097
2098
2099 ** This is a problem. The symbol iD.1562 cannot be referenced
2100 inside the body of the outer parallel region. But since we are
2101 emitting this copy operation while expanding the inner parallel
2102 directive, we need to access the CTX structure of the outer
2103 parallel directive to get the correct mapping:
2104
2105 .omp_data_o.2.i = .omp_data_i.1->i
2106
2107 Since there may be other workshare or parallel directives enclosing
2108 the parallel directive, it may be necessary to walk up the context
2109 parent chain. This is not a problem in general because nested
2110 parallelism happens only rarely. */
2111
2112 static tree
2113 lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2114 {
2115 tree t;
2116 omp_context *up;
2117
2118 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2119 t = maybe_lookup_decl (decl, up);
2120
2121 gcc_assert (!ctx->is_nested || t || is_global_var (decl));
2122
2123 return t ? t : decl;
2124 }
2125
2126
2127 /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
2128 in outer contexts. */
2129
2130 static tree
2131 maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2132 {
2133 tree t = NULL;
2134 omp_context *up;
2135
2136 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2137 t = maybe_lookup_decl (decl, up);
2138
2139 return t ? t : decl;
2140 }
2141
2142
2143 /* Construct the initialization value for reduction CLAUSE. */
2144
2145 tree
2146 omp_reduction_init (tree clause, tree type)
2147 {
2148 location_t loc = OMP_CLAUSE_LOCATION (clause);
2149 switch (OMP_CLAUSE_REDUCTION_CODE (clause))
2150 {
2151 case PLUS_EXPR:
2152 case MINUS_EXPR:
2153 case BIT_IOR_EXPR:
2154 case BIT_XOR_EXPR:
2155 case TRUTH_OR_EXPR:
2156 case TRUTH_ORIF_EXPR:
2157 case TRUTH_XOR_EXPR:
2158 case NE_EXPR:
2159 return fold_convert_loc (loc, type, integer_zero_node);
2160
2161 case MULT_EXPR:
2162 case TRUTH_AND_EXPR:
2163 case TRUTH_ANDIF_EXPR:
2164 case EQ_EXPR:
2165 return fold_convert_loc (loc, type, integer_one_node);
2166
2167 case BIT_AND_EXPR:
2168 return fold_convert_loc (loc, type, integer_minus_one_node);
2169
2170 case MAX_EXPR:
2171 if (SCALAR_FLOAT_TYPE_P (type))
2172 {
2173 REAL_VALUE_TYPE max, min;
2174 if (HONOR_INFINITIES (TYPE_MODE (type)))
2175 {
2176 real_inf (&max);
2177 real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
2178 }
2179 else
2180 real_maxval (&min, 1, TYPE_MODE (type));
2181 return build_real (type, min);
2182 }
2183 else
2184 {
2185 gcc_assert (INTEGRAL_TYPE_P (type));
2186 return TYPE_MIN_VALUE (type);
2187 }
2188
2189 case MIN_EXPR:
2190 if (SCALAR_FLOAT_TYPE_P (type))
2191 {
2192 REAL_VALUE_TYPE max;
2193 if (HONOR_INFINITIES (TYPE_MODE (type)))
2194 real_inf (&max);
2195 else
2196 real_maxval (&max, 0, TYPE_MODE (type));
2197 return build_real (type, max);
2198 }
2199 else
2200 {
2201 gcc_assert (INTEGRAL_TYPE_P (type));
2202 return TYPE_MAX_VALUE (type);
2203 }
2204
2205 default:
2206 gcc_unreachable ();
2207 }
2208 }
2209
2210 /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
2211 from the receiver (aka child) side and initializers for REFERENCE_TYPE
2212 private variables. Initialization statements go in ILIST, while calls
2213 to destructors go in DLIST. */
2214
2215 static void
2216 lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
2217 omp_context *ctx)
2218 {
2219 gimple_stmt_iterator diter;
2220 tree c, dtor, copyin_seq, x, ptr;
2221 bool copyin_by_ref = false;
2222 bool lastprivate_firstprivate = false;
2223 int pass;
2224
2225 *dlist = gimple_seq_alloc ();
2226 diter = gsi_start (*dlist);
2227 copyin_seq = NULL;
2228
2229 /* Do all the fixed sized types in the first pass, and the variable sized
2230 types in the second pass. This makes sure that the scalar arguments to
2231 the variable sized types are processed before we use them in the
2232 variable sized operations. */
2233 for (pass = 0; pass < 2; ++pass)
2234 {
2235 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2236 {
2237 enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
2238 tree var, new_var;
2239 bool by_ref;
2240 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2241
2242 switch (c_kind)
2243 {
2244 case OMP_CLAUSE_PRIVATE:
2245 if (OMP_CLAUSE_PRIVATE_DEBUG (c))
2246 continue;
2247 break;
2248 case OMP_CLAUSE_SHARED:
2249 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
2250 {
2251 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
2252 continue;
2253 }
2254 case OMP_CLAUSE_FIRSTPRIVATE:
2255 case OMP_CLAUSE_COPYIN:
2256 case OMP_CLAUSE_REDUCTION:
2257 break;
2258 case OMP_CLAUSE_LASTPRIVATE:
2259 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2260 {
2261 lastprivate_firstprivate = true;
2262 if (pass != 0)
2263 continue;
2264 }
2265 break;
2266 default:
2267 continue;
2268 }
2269
2270 new_var = var = OMP_CLAUSE_DECL (c);
2271 if (c_kind != OMP_CLAUSE_COPYIN)
2272 new_var = lookup_decl (var, ctx);
2273
2274 if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
2275 {
2276 if (pass != 0)
2277 continue;
2278 }
2279 else if (is_variable_sized (var))
2280 {
2281 /* For variable sized types, we need to allocate the
2282 actual storage here. Call alloca and store the
2283 result in the pointer decl that we created elsewhere. */
2284 if (pass == 0)
2285 continue;
2286
2287 if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
2288 {
2289 gimple stmt;
2290 tree tmp;
2291
2292 ptr = DECL_VALUE_EXPR (new_var);
2293 gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
2294 ptr = TREE_OPERAND (ptr, 0);
2295 gcc_assert (DECL_P (ptr));
2296 x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
2297
2298 /* void *tmp = __builtin_alloca */
2299 stmt
2300 = gimple_build_call (built_in_decls[BUILT_IN_ALLOCA], 1, x);
2301 tmp = create_tmp_var_raw (ptr_type_node, NULL);
2302 gimple_add_tmp_var (tmp);
2303 gimple_call_set_lhs (stmt, tmp);
2304
2305 gimple_seq_add_stmt (ilist, stmt);
2306
2307 x = fold_convert_loc (clause_loc, TREE_TYPE (ptr), tmp);
2308 gimplify_assign (ptr, x, ilist);
2309 }
2310 }
2311 else if (is_reference (var))
2312 {
2313 /* For references that are being privatized for Fortran,
2314 allocate new backing storage for the new pointer
2315 variable. This allows us to avoid changing all the
2316 code that expects a pointer to something that expects
2317 a direct variable. Note that this doesn't apply to
2318 C++, since reference types are disallowed in data
2319 sharing clauses there, except for NRV optimized
2320 return values. */
2321 if (pass == 0)
2322 continue;
2323
2324 x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
2325 if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
2326 {
2327 x = build_receiver_ref (var, false, ctx);
2328 x = build_fold_addr_expr_loc (clause_loc, x);
2329 }
2330 else if (TREE_CONSTANT (x))
2331 {
2332 const char *name = NULL;
2333 if (DECL_NAME (var))
2334 name = IDENTIFIER_POINTER (DECL_NAME (new_var));
2335
2336 x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
2337 name);
2338 gimple_add_tmp_var (x);
2339 TREE_ADDRESSABLE (x) = 1;
2340 x = build_fold_addr_expr_loc (clause_loc, x);
2341 }
2342 else
2343 {
2344 x = build_call_expr_loc (clause_loc,
2345 built_in_decls[BUILT_IN_ALLOCA], 1, x);
2346 }
2347
2348 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
2349 gimplify_assign (new_var, x, ilist);
2350
2351 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2352 }
2353 else if (c_kind == OMP_CLAUSE_REDUCTION
2354 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2355 {
2356 if (pass == 0)
2357 continue;
2358 }
2359 else if (pass != 0)
2360 continue;
2361
2362 switch (OMP_CLAUSE_CODE (c))
2363 {
2364 case OMP_CLAUSE_SHARED:
2365 /* Shared global vars are just accessed directly. */
2366 if (is_global_var (new_var))
2367 break;
2368 /* Set up the DECL_VALUE_EXPR for shared variables now. This
2369 needs to be delayed until after fixup_child_record_type so
2370 that we get the correct type during the dereference. */
2371 by_ref = use_pointer_for_field (var, ctx);
2372 x = build_receiver_ref (var, by_ref, ctx);
2373 SET_DECL_VALUE_EXPR (new_var, x);
2374 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2375
2376 /* ??? If VAR is not passed by reference, and the variable
2377 hasn't been initialized yet, then we'll get a warning for
2378 the store into the omp_data_s structure. Ideally, we'd be
2379 able to notice this and not store anything at all, but
2380 we're generating code too early. Suppress the warning. */
2381 if (!by_ref)
2382 TREE_NO_WARNING (var) = 1;
2383 break;
2384
2385 case OMP_CLAUSE_LASTPRIVATE:
2386 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2387 break;
2388 /* FALLTHRU */
2389
2390 case OMP_CLAUSE_PRIVATE:
2391 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE)
2392 x = build_outer_var_ref (var, ctx);
2393 else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2394 {
2395 if (is_task_ctx (ctx))
2396 x = build_receiver_ref (var, false, ctx);
2397 else
2398 x = build_outer_var_ref (var, ctx);
2399 }
2400 else
2401 x = NULL;
2402 x = lang_hooks.decls.omp_clause_default_ctor (c, new_var, x);
2403 if (x)
2404 gimplify_and_add (x, ilist);
2405 /* FALLTHRU */
2406
2407 do_dtor:
2408 x = lang_hooks.decls.omp_clause_dtor (c, new_var);
2409 if (x)
2410 {
2411 gimple_seq tseq = NULL;
2412
2413 dtor = x;
2414 gimplify_stmt (&dtor, &tseq);
2415 gsi_insert_seq_before (&diter, tseq, GSI_SAME_STMT);
2416 }
2417 break;
2418
2419 case OMP_CLAUSE_FIRSTPRIVATE:
2420 if (is_task_ctx (ctx))
2421 {
2422 if (is_reference (var) || is_variable_sized (var))
2423 goto do_dtor;
2424 else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var,
2425 ctx))
2426 || use_pointer_for_field (var, NULL))
2427 {
2428 x = build_receiver_ref (var, false, ctx);
2429 SET_DECL_VALUE_EXPR (new_var, x);
2430 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2431 goto do_dtor;
2432 }
2433 }
2434 x = build_outer_var_ref (var, ctx);
2435 x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x);
2436 gimplify_and_add (x, ilist);
2437 goto do_dtor;
2438 break;
2439
2440 case OMP_CLAUSE_COPYIN:
2441 by_ref = use_pointer_for_field (var, NULL);
2442 x = build_receiver_ref (var, by_ref, ctx);
2443 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
2444 append_to_statement_list (x, &copyin_seq);
2445 copyin_by_ref |= by_ref;
2446 break;
2447
2448 case OMP_CLAUSE_REDUCTION:
2449 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2450 {
2451 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2452 x = build_outer_var_ref (var, ctx);
2453
2454 if (is_reference (var))
2455 x = build_fold_addr_expr_loc (clause_loc, x);
2456 SET_DECL_VALUE_EXPR (placeholder, x);
2457 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2458 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
2459 gimple_seq_add_seq (ilist,
2460 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c));
2461 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
2462 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
2463 }
2464 else
2465 {
2466 x = omp_reduction_init (c, TREE_TYPE (new_var));
2467 gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
2468 gimplify_assign (new_var, x, ilist);
2469 }
2470 break;
2471
2472 default:
2473 gcc_unreachable ();
2474 }
2475 }
2476 }
2477
2478 /* The copyin sequence is not to be executed by the main thread, since
2479 that would result in self-copies. Perhaps not visible to scalars,
2480 but it certainly is to C++ operator=. */
2481 if (copyin_seq)
2482 {
2483 x = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
2484 x = build2 (NE_EXPR, boolean_type_node, x,
2485 build_int_cst (TREE_TYPE (x), 0));
2486 x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
2487 gimplify_and_add (x, ilist);
2488 }
2489
2490 /* If any copyin variable is passed by reference, we must ensure the
2491 master thread doesn't modify it before it is copied over in all
2492 threads. Similarly for variables in both firstprivate and
2493 lastprivate clauses we need to ensure the lastprivate copying
2494 happens after firstprivate copying in all threads. */
2495 if (copyin_by_ref || lastprivate_firstprivate)
2496 gimplify_and_add (build_omp_barrier (), ilist);
2497 }
2498
2499
2500 /* Generate code to implement the LASTPRIVATE clauses. This is used for
2501 both parallel and workshare constructs. PREDICATE may be NULL if it's
2502 always true. */
2503
2504 static void
2505 lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *stmt_list,
2506 omp_context *ctx)
2507 {
2508 tree x, c, label = NULL;
2509 bool par_clauses = false;
2510
2511 /* Early exit if there are no lastprivate clauses. */
2512 clauses = find_omp_clause (clauses, OMP_CLAUSE_LASTPRIVATE);
2513 if (clauses == NULL)
2514 {
2515 /* If this was a workshare clause, see if it had been combined
2516 with its parallel. In that case, look for the clauses on the
2517 parallel statement itself. */
2518 if (is_parallel_ctx (ctx))
2519 return;
2520
2521 ctx = ctx->outer;
2522 if (ctx == NULL || !is_parallel_ctx (ctx))
2523 return;
2524
2525 clauses = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2526 OMP_CLAUSE_LASTPRIVATE);
2527 if (clauses == NULL)
2528 return;
2529 par_clauses = true;
2530 }
2531
2532 if (predicate)
2533 {
2534 gimple stmt;
2535 tree label_true, arm1, arm2;
2536
2537 label = create_artificial_label (UNKNOWN_LOCATION);
2538 label_true = create_artificial_label (UNKNOWN_LOCATION);
2539 arm1 = TREE_OPERAND (predicate, 0);
2540 arm2 = TREE_OPERAND (predicate, 1);
2541 gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
2542 gimplify_expr (&arm2, stmt_list, NULL, is_gimple_val, fb_rvalue);
2543 stmt = gimple_build_cond (TREE_CODE (predicate), arm1, arm2,
2544 label_true, label);
2545 gimple_seq_add_stmt (stmt_list, stmt);
2546 gimple_seq_add_stmt (stmt_list, gimple_build_label (label_true));
2547 }
2548
2549 for (c = clauses; c ;)
2550 {
2551 tree var, new_var;
2552 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2553
2554 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
2555 {
2556 var = OMP_CLAUSE_DECL (c);
2557 new_var = lookup_decl (var, ctx);
2558
2559 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
2560 {
2561 lower_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
2562 gimple_seq_add_seq (stmt_list,
2563 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
2564 }
2565 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) = NULL;
2566
2567 x = build_outer_var_ref (var, ctx);
2568 if (is_reference (var))
2569 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2570 x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
2571 gimplify_and_add (x, stmt_list);
2572 }
2573 c = OMP_CLAUSE_CHAIN (c);
2574 if (c == NULL && !par_clauses)
2575 {
2576 /* If this was a workshare clause, see if it had been combined
2577 with its parallel. In that case, continue looking for the
2578 clauses also on the parallel statement itself. */
2579 if (is_parallel_ctx (ctx))
2580 break;
2581
2582 ctx = ctx->outer;
2583 if (ctx == NULL || !is_parallel_ctx (ctx))
2584 break;
2585
2586 c = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2587 OMP_CLAUSE_LASTPRIVATE);
2588 par_clauses = true;
2589 }
2590 }
2591
2592 if (label)
2593 gimple_seq_add_stmt (stmt_list, gimple_build_label (label));
2594 }
2595
2596
2597 /* Generate code to implement the REDUCTION clauses. */
2598
2599 static void
2600 lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
2601 {
2602 gimple_seq sub_seq = NULL;
2603 gimple stmt;
2604 tree x, c;
2605 int count = 0;
2606
2607 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
2608 update in that case, otherwise use a lock. */
2609 for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
2610 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
2611 {
2612 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2613 {
2614 /* Never use OMP_ATOMIC for array reductions. */
2615 count = -1;
2616 break;
2617 }
2618 count++;
2619 }
2620
2621 if (count == 0)
2622 return;
2623
2624 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2625 {
2626 tree var, ref, new_var;
2627 enum tree_code code;
2628 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2629
2630 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
2631 continue;
2632
2633 var = OMP_CLAUSE_DECL (c);
2634 new_var = lookup_decl (var, ctx);
2635 if (is_reference (var))
2636 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2637 ref = build_outer_var_ref (var, ctx);
2638 code = OMP_CLAUSE_REDUCTION_CODE (c);
2639
2640 /* reduction(-:var) sums up the partial results, so it acts
2641 identically to reduction(+:var). */
2642 if (code == MINUS_EXPR)
2643 code = PLUS_EXPR;
2644
2645 if (count == 1)
2646 {
2647 tree addr = build_fold_addr_expr_loc (clause_loc, ref);
2648
2649 addr = save_expr (addr);
2650 ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
2651 x = fold_build2_loc (clause_loc, code, TREE_TYPE (ref), ref, new_var);
2652 x = build2 (OMP_ATOMIC, void_type_node, addr, x);
2653 gimplify_and_add (x, stmt_seqp);
2654 return;
2655 }
2656
2657 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2658 {
2659 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2660
2661 if (is_reference (var))
2662 ref = build_fold_addr_expr_loc (clause_loc, ref);
2663 SET_DECL_VALUE_EXPR (placeholder, ref);
2664 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2665 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
2666 gimple_seq_add_seq (&sub_seq, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
2667 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
2668 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
2669 }
2670 else
2671 {
2672 x = build2 (code, TREE_TYPE (ref), ref, new_var);
2673 ref = build_outer_var_ref (var, ctx);
2674 gimplify_assign (ref, x, &sub_seq);
2675 }
2676 }
2677
2678 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ATOMIC_START], 0);
2679 gimple_seq_add_stmt (stmt_seqp, stmt);
2680
2681 gimple_seq_add_seq (stmt_seqp, sub_seq);
2682
2683 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ATOMIC_END], 0);
2684 gimple_seq_add_stmt (stmt_seqp, stmt);
2685 }
2686
2687
2688 /* Generate code to implement the COPYPRIVATE clauses. */
2689
2690 static void
2691 lower_copyprivate_clauses (tree clauses, gimple_seq *slist, gimple_seq *rlist,
2692 omp_context *ctx)
2693 {
2694 tree c;
2695
2696 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2697 {
2698 tree var, new_var, ref, x;
2699 bool by_ref;
2700 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2701
2702 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
2703 continue;
2704
2705 var = OMP_CLAUSE_DECL (c);
2706 by_ref = use_pointer_for_field (var, NULL);
2707
2708 ref = build_sender_ref (var, ctx);
2709 x = new_var = lookup_decl_in_outer_ctx (var, ctx);
2710 if (by_ref)
2711 {
2712 x = build_fold_addr_expr_loc (clause_loc, new_var);
2713 x = fold_convert_loc (clause_loc, TREE_TYPE (ref), x);
2714 }
2715 gimplify_assign (ref, x, slist);
2716
2717 ref = build_receiver_ref (var, false, ctx);
2718 if (by_ref)
2719 {
2720 ref = fold_convert_loc (clause_loc,
2721 build_pointer_type (TREE_TYPE (new_var)),
2722 ref);
2723 ref = build_fold_indirect_ref_loc (clause_loc, ref);
2724 }
2725 if (is_reference (var))
2726 {
2727 ref = fold_convert_loc (clause_loc, TREE_TYPE (new_var), ref);
2728 ref = build_simple_mem_ref_loc (clause_loc, ref);
2729 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2730 }
2731 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, ref);
2732 gimplify_and_add (x, rlist);
2733 }
2734 }
2735
2736
2737 /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
2738 and REDUCTION from the sender (aka parent) side. */
2739
2740 static void
2741 lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
2742 omp_context *ctx)
2743 {
2744 tree c;
2745
2746 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2747 {
2748 tree val, ref, x, var;
2749 bool by_ref, do_in = false, do_out = false;
2750 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2751
2752 switch (OMP_CLAUSE_CODE (c))
2753 {
2754 case OMP_CLAUSE_PRIVATE:
2755 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2756 break;
2757 continue;
2758 case OMP_CLAUSE_FIRSTPRIVATE:
2759 case OMP_CLAUSE_COPYIN:
2760 case OMP_CLAUSE_LASTPRIVATE:
2761 case OMP_CLAUSE_REDUCTION:
2762 break;
2763 default:
2764 continue;
2765 }
2766
2767 val = OMP_CLAUSE_DECL (c);
2768 var = lookup_decl_in_outer_ctx (val, ctx);
2769
2770 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
2771 && is_global_var (var))
2772 continue;
2773 if (is_variable_sized (val))
2774 continue;
2775 by_ref = use_pointer_for_field (val, NULL);
2776
2777 switch (OMP_CLAUSE_CODE (c))
2778 {
2779 case OMP_CLAUSE_PRIVATE:
2780 case OMP_CLAUSE_FIRSTPRIVATE:
2781 case OMP_CLAUSE_COPYIN:
2782 do_in = true;
2783 break;
2784
2785 case OMP_CLAUSE_LASTPRIVATE:
2786 if (by_ref || is_reference (val))
2787 {
2788 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2789 continue;
2790 do_in = true;
2791 }
2792 else
2793 {
2794 do_out = true;
2795 if (lang_hooks.decls.omp_private_outer_ref (val))
2796 do_in = true;
2797 }
2798 break;
2799
2800 case OMP_CLAUSE_REDUCTION:
2801 do_in = true;
2802 do_out = !(by_ref || is_reference (val));
2803 break;
2804
2805 default:
2806 gcc_unreachable ();
2807 }
2808
2809 if (do_in)
2810 {
2811 ref = build_sender_ref (val, ctx);
2812 x = by_ref ? build_fold_addr_expr_loc (clause_loc, var) : var;
2813 gimplify_assign (ref, x, ilist);
2814 if (is_task_ctx (ctx))
2815 DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL;
2816 }
2817
2818 if (do_out)
2819 {
2820 ref = build_sender_ref (val, ctx);
2821 gimplify_assign (var, ref, olist);
2822 }
2823 }
2824 }
2825
2826 /* Generate code to implement SHARED from the sender (aka parent)
2827 side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
2828 list things that got automatically shared. */
2829
2830 static void
2831 lower_send_shared_vars (gimple_seq *ilist, gimple_seq *olist, omp_context *ctx)
2832 {
2833 tree var, ovar, nvar, f, x, record_type;
2834
2835 if (ctx->record_type == NULL)
2836 return;
2837
2838 record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type;
2839 for (f = TYPE_FIELDS (record_type); f ; f = TREE_CHAIN (f))
2840 {
2841 ovar = DECL_ABSTRACT_ORIGIN (f);
2842 nvar = maybe_lookup_decl (ovar, ctx);
2843 if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
2844 continue;
2845
2846 /* If CTX is a nested parallel directive. Find the immediately
2847 enclosing parallel or workshare construct that contains a
2848 mapping for OVAR. */
2849 var = lookup_decl_in_outer_ctx (ovar, ctx);
2850
2851 if (use_pointer_for_field (ovar, ctx))
2852 {
2853 x = build_sender_ref (ovar, ctx);
2854 var = build_fold_addr_expr (var);
2855 gimplify_assign (x, var, ilist);
2856 }
2857 else
2858 {
2859 x = build_sender_ref (ovar, ctx);
2860 gimplify_assign (x, var, ilist);
2861
2862 if (!TREE_READONLY (var)
2863 /* We don't need to receive a new reference to a result
2864 or parm decl. In fact we may not store to it as we will
2865 invalidate any pending RSO and generate wrong gimple
2866 during inlining. */
2867 && !((TREE_CODE (var) == RESULT_DECL
2868 || TREE_CODE (var) == PARM_DECL)
2869 && DECL_BY_REFERENCE (var)))
2870 {
2871 x = build_sender_ref (ovar, ctx);
2872 gimplify_assign (var, x, olist);
2873 }
2874 }
2875 }
2876 }
2877
2878
2879 /* A convenience function to build an empty GIMPLE_COND with just the
2880 condition. */
2881
2882 static gimple
2883 gimple_build_cond_empty (tree cond)
2884 {
2885 enum tree_code pred_code;
2886 tree lhs, rhs;
2887
2888 gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs);
2889 return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE);
2890 }
2891
2892
2893 /* Build the function calls to GOMP_parallel_start etc to actually
2894 generate the parallel operation. REGION is the parallel region
2895 being expanded. BB is the block where to insert the code. WS_ARGS
2896 will be set if this is a call to a combined parallel+workshare
2897 construct, it contains the list of additional arguments needed by
2898 the workshare construct. */
2899
2900 static void
2901 expand_parallel_call (struct omp_region *region, basic_block bb,
2902 gimple entry_stmt, tree ws_args)
2903 {
2904 tree t, t1, t2, val, cond, c, clauses;
2905 gimple_stmt_iterator gsi;
2906 gimple stmt;
2907 int start_ix;
2908 location_t clause_loc;
2909
2910 clauses = gimple_omp_parallel_clauses (entry_stmt);
2911
2912 /* Determine what flavor of GOMP_parallel_start we will be
2913 emitting. */
2914 start_ix = BUILT_IN_GOMP_PARALLEL_START;
2915 if (is_combined_parallel (region))
2916 {
2917 switch (region->inner->type)
2918 {
2919 case GIMPLE_OMP_FOR:
2920 gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
2921 start_ix = BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START
2922 + (region->inner->sched_kind
2923 == OMP_CLAUSE_SCHEDULE_RUNTIME
2924 ? 3 : region->inner->sched_kind);
2925 break;
2926 case GIMPLE_OMP_SECTIONS:
2927 start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS_START;
2928 break;
2929 default:
2930 gcc_unreachable ();
2931 }
2932 }
2933
2934 /* By default, the value of NUM_THREADS is zero (selected at run time)
2935 and there is no conditional. */
2936 cond = NULL_TREE;
2937 val = build_int_cst (unsigned_type_node, 0);
2938
2939 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
2940 if (c)
2941 cond = OMP_CLAUSE_IF_EXPR (c);
2942
2943 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
2944 if (c)
2945 {
2946 val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
2947 clause_loc = OMP_CLAUSE_LOCATION (c);
2948 }
2949 else
2950 clause_loc = gimple_location (entry_stmt);
2951
2952 /* Ensure 'val' is of the correct type. */
2953 val = fold_convert_loc (clause_loc, unsigned_type_node, val);
2954
2955 /* If we found the clause 'if (cond)', build either
2956 (cond != 0) or (cond ? val : 1u). */
2957 if (cond)
2958 {
2959 gimple_stmt_iterator gsi;
2960
2961 cond = gimple_boolify (cond);
2962
2963 if (integer_zerop (val))
2964 val = fold_build2_loc (clause_loc,
2965 EQ_EXPR, unsigned_type_node, cond,
2966 build_int_cst (TREE_TYPE (cond), 0));
2967 else
2968 {
2969 basic_block cond_bb, then_bb, else_bb;
2970 edge e, e_then, e_else;
2971 tree tmp_then, tmp_else, tmp_join, tmp_var;
2972
2973 tmp_var = create_tmp_var (TREE_TYPE (val), NULL);
2974 if (gimple_in_ssa_p (cfun))
2975 {
2976 tmp_then = make_ssa_name (tmp_var, NULL);
2977 tmp_else = make_ssa_name (tmp_var, NULL);
2978 tmp_join = make_ssa_name (tmp_var, NULL);
2979 }
2980 else
2981 {
2982 tmp_then = tmp_var;
2983 tmp_else = tmp_var;
2984 tmp_join = tmp_var;
2985 }
2986
2987 e = split_block (bb, NULL);
2988 cond_bb = e->src;
2989 bb = e->dest;
2990 remove_edge (e);
2991
2992 then_bb = create_empty_bb (cond_bb);
2993 else_bb = create_empty_bb (then_bb);
2994 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
2995 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
2996
2997 stmt = gimple_build_cond_empty (cond);
2998 gsi = gsi_start_bb (cond_bb);
2999 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3000
3001 gsi = gsi_start_bb (then_bb);
3002 stmt = gimple_build_assign (tmp_then, val);
3003 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3004
3005 gsi = gsi_start_bb (else_bb);
3006 stmt = gimple_build_assign
3007 (tmp_else, build_int_cst (unsigned_type_node, 1));
3008 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3009
3010 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
3011 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
3012 e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
3013 e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
3014
3015 if (gimple_in_ssa_p (cfun))
3016 {
3017 gimple phi = create_phi_node (tmp_join, bb);
3018 SSA_NAME_DEF_STMT (tmp_join) = phi;
3019 add_phi_arg (phi, tmp_then, e_then, UNKNOWN_LOCATION);
3020 add_phi_arg (phi, tmp_else, e_else, UNKNOWN_LOCATION);
3021 }
3022
3023 val = tmp_join;
3024 }
3025
3026 gsi = gsi_start_bb (bb);
3027 val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE,
3028 false, GSI_CONTINUE_LINKING);
3029 }
3030
3031 gsi = gsi_last_bb (bb);
3032 t = gimple_omp_parallel_data_arg (entry_stmt);
3033 if (t == NULL)
3034 t1 = null_pointer_node;
3035 else
3036 t1 = build_fold_addr_expr (t);
3037 t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
3038
3039 if (ws_args)
3040 {
3041 tree args = tree_cons (NULL, t2,
3042 tree_cons (NULL, t1,
3043 tree_cons (NULL, val, ws_args)));
3044 t = build_function_call_expr (UNKNOWN_LOCATION,
3045 built_in_decls[start_ix], args);
3046 }
3047 else
3048 t = build_call_expr (built_in_decls[start_ix], 3, t2, t1, val);
3049
3050 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3051 false, GSI_CONTINUE_LINKING);
3052
3053 t = gimple_omp_parallel_data_arg (entry_stmt);
3054 if (t == NULL)
3055 t = null_pointer_node;
3056 else
3057 t = build_fold_addr_expr (t);
3058 t = build_call_expr_loc (gimple_location (entry_stmt),
3059 gimple_omp_parallel_child_fn (entry_stmt), 1, t);
3060 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3061 false, GSI_CONTINUE_LINKING);
3062
3063 t = build_call_expr_loc (gimple_location (entry_stmt),
3064 built_in_decls[BUILT_IN_GOMP_PARALLEL_END], 0);
3065 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3066 false, GSI_CONTINUE_LINKING);
3067 }
3068
3069
3070 /* Build the function call to GOMP_task to actually
3071 generate the task operation. BB is the block where to insert the code. */
3072
3073 static void
3074 expand_task_call (basic_block bb, gimple entry_stmt)
3075 {
3076 tree t, t1, t2, t3, flags, cond, c, clauses;
3077 gimple_stmt_iterator gsi;
3078 location_t loc = gimple_location (entry_stmt);
3079
3080 clauses = gimple_omp_task_clauses (entry_stmt);
3081
3082 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
3083 if (c)
3084 cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (c));
3085 else
3086 cond = boolean_true_node;
3087
3088 c = find_omp_clause (clauses, OMP_CLAUSE_UNTIED);
3089 flags = build_int_cst (unsigned_type_node, (c ? 1 : 0));
3090
3091 gsi = gsi_last_bb (bb);
3092 t = gimple_omp_task_data_arg (entry_stmt);
3093 if (t == NULL)
3094 t2 = null_pointer_node;
3095 else
3096 t2 = build_fold_addr_expr_loc (loc, t);
3097 t1 = build_fold_addr_expr_loc (loc, gimple_omp_task_child_fn (entry_stmt));
3098 t = gimple_omp_task_copy_fn (entry_stmt);
3099 if (t == NULL)
3100 t3 = null_pointer_node;
3101 else
3102 t3 = build_fold_addr_expr_loc (loc, t);
3103
3104 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_TASK], 7, t1, t2, t3,
3105 gimple_omp_task_arg_size (entry_stmt),
3106 gimple_omp_task_arg_align (entry_stmt), cond, flags);
3107
3108 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3109 false, GSI_CONTINUE_LINKING);
3110 }
3111
3112
3113 /* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
3114 catch handler and return it. This prevents programs from violating the
3115 structured block semantics with throws. */
3116
3117 static gimple_seq
3118 maybe_catch_exception (gimple_seq body)
3119 {
3120 gimple g;
3121 tree decl;
3122
3123 if (!flag_exceptions)
3124 return body;
3125
3126 if (lang_hooks.eh_protect_cleanup_actions != NULL)
3127 decl = lang_hooks.eh_protect_cleanup_actions ();
3128 else
3129 decl = built_in_decls[BUILT_IN_TRAP];
3130
3131 g = gimple_build_eh_must_not_throw (decl);
3132 g = gimple_build_try (body, gimple_seq_alloc_with_stmt (g),
3133 GIMPLE_TRY_CATCH);
3134
3135 return gimple_seq_alloc_with_stmt (g);
3136 }
3137
3138 /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
3139
3140 static tree
3141 vec2chain (VEC(tree,gc) *v)
3142 {
3143 tree chain = NULL_TREE, t;
3144 unsigned ix;
3145
3146 FOR_EACH_VEC_ELT_REVERSE (tree, v, ix, t)
3147 {
3148 TREE_CHAIN (t) = chain;
3149 chain = t;
3150 }
3151
3152 return chain;
3153 }
3154
3155
3156 /* Remove barriers in REGION->EXIT's block. Note that this is only
3157 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
3158 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
3159 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
3160 removed. */
3161
3162 static void
3163 remove_exit_barrier (struct omp_region *region)
3164 {
3165 gimple_stmt_iterator gsi;
3166 basic_block exit_bb;
3167 edge_iterator ei;
3168 edge e;
3169 gimple stmt;
3170 int any_addressable_vars = -1;
3171
3172 exit_bb = region->exit;
3173
3174 /* If the parallel region doesn't return, we don't have REGION->EXIT
3175 block at all. */
3176 if (! exit_bb)
3177 return;
3178
3179 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
3180 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
3181 statements that can appear in between are extremely limited -- no
3182 memory operations at all. Here, we allow nothing at all, so the
3183 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
3184 gsi = gsi_last_bb (exit_bb);
3185 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3186 gsi_prev (&gsi);
3187 if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
3188 return;
3189
3190 FOR_EACH_EDGE (e, ei, exit_bb->preds)
3191 {
3192 gsi = gsi_last_bb (e->src);
3193 if (gsi_end_p (gsi))
3194 continue;
3195 stmt = gsi_stmt (gsi);
3196 if (gimple_code (stmt) == GIMPLE_OMP_RETURN
3197 && !gimple_omp_return_nowait_p (stmt))
3198 {
3199 /* OpenMP 3.0 tasks unfortunately prevent this optimization
3200 in many cases. If there could be tasks queued, the barrier
3201 might be needed to let the tasks run before some local
3202 variable of the parallel that the task uses as shared
3203 runs out of scope. The task can be spawned either
3204 from within current function (this would be easy to check)
3205 or from some function it calls and gets passed an address
3206 of such a variable. */
3207 if (any_addressable_vars < 0)
3208 {
3209 gimple parallel_stmt = last_stmt (region->entry);
3210 tree child_fun = gimple_omp_parallel_child_fn (parallel_stmt);
3211 tree local_decls, block, decl;
3212 unsigned ix;
3213
3214 any_addressable_vars = 0;
3215 FOR_EACH_LOCAL_DECL (DECL_STRUCT_FUNCTION (child_fun), ix, decl)
3216 if (TREE_ADDRESSABLE (decl))
3217 {
3218 any_addressable_vars = 1;
3219 break;
3220 }
3221 for (block = gimple_block (stmt);
3222 !any_addressable_vars
3223 && block
3224 && TREE_CODE (block) == BLOCK;
3225 block = BLOCK_SUPERCONTEXT (block))
3226 {
3227 for (local_decls = BLOCK_VARS (block);
3228 local_decls;
3229 local_decls = TREE_CHAIN (local_decls))
3230 if (TREE_ADDRESSABLE (local_decls))
3231 {
3232 any_addressable_vars = 1;
3233 break;
3234 }
3235 if (block == gimple_block (parallel_stmt))
3236 break;
3237 }
3238 }
3239 if (!any_addressable_vars)
3240 gimple_omp_return_set_nowait (stmt);
3241 }
3242 }
3243 }
3244
3245 static void
3246 remove_exit_barriers (struct omp_region *region)
3247 {
3248 if (region->type == GIMPLE_OMP_PARALLEL)
3249 remove_exit_barrier (region);
3250
3251 if (region->inner)
3252 {
3253 region = region->inner;
3254 remove_exit_barriers (region);
3255 while (region->next)
3256 {
3257 region = region->next;
3258 remove_exit_barriers (region);
3259 }
3260 }
3261 }
3262
3263 /* Optimize omp_get_thread_num () and omp_get_num_threads ()
3264 calls. These can't be declared as const functions, but
3265 within one parallel body they are constant, so they can be
3266 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
3267 which are declared const. Similarly for task body, except
3268 that in untied task omp_get_thread_num () can change at any task
3269 scheduling point. */
3270
3271 static void
3272 optimize_omp_library_calls (gimple entry_stmt)
3273 {
3274 basic_block bb;
3275 gimple_stmt_iterator gsi;
3276 tree thr_num_id
3277 = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM]);
3278 tree num_thr_id
3279 = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS]);
3280 bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK
3281 && find_omp_clause (gimple_omp_task_clauses (entry_stmt),
3282 OMP_CLAUSE_UNTIED) != NULL);
3283
3284 FOR_EACH_BB (bb)
3285 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3286 {
3287 gimple call = gsi_stmt (gsi);
3288 tree decl;
3289
3290 if (is_gimple_call (call)
3291 && (decl = gimple_call_fndecl (call))
3292 && DECL_EXTERNAL (decl)
3293 && TREE_PUBLIC (decl)
3294 && DECL_INITIAL (decl) == NULL)
3295 {
3296 tree built_in;
3297
3298 if (DECL_NAME (decl) == thr_num_id)
3299 {
3300 /* In #pragma omp task untied omp_get_thread_num () can change
3301 during the execution of the task region. */
3302 if (untied_task)
3303 continue;
3304 built_in = built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM];
3305 }
3306 else if (DECL_NAME (decl) == num_thr_id)
3307 built_in = built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS];
3308 else
3309 continue;
3310
3311 if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
3312 || gimple_call_num_args (call) != 0)
3313 continue;
3314
3315 if (flag_exceptions && !TREE_NOTHROW (decl))
3316 continue;
3317
3318 if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
3319 || !types_compatible_p (TREE_TYPE (TREE_TYPE (decl)),
3320 TREE_TYPE (TREE_TYPE (built_in))))
3321 continue;
3322
3323 gimple_call_set_fndecl (call, built_in);
3324 }
3325 }
3326 }
3327
3328 /* Expand the OpenMP parallel or task directive starting at REGION. */
3329
3330 static void
3331 expand_omp_taskreg (struct omp_region *region)
3332 {
3333 basic_block entry_bb, exit_bb, new_bb;
3334 struct function *child_cfun;
3335 tree child_fn, block, t, ws_args;
3336 tree save_current;
3337 gimple_stmt_iterator gsi;
3338 gimple entry_stmt, stmt;
3339 edge e;
3340
3341 entry_stmt = last_stmt (region->entry);
3342 child_fn = gimple_omp_taskreg_child_fn (entry_stmt);
3343 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
3344 /* If this function has been already instrumented, make sure
3345 the child function isn't instrumented again. */
3346 child_cfun->after_tree_profile = cfun->after_tree_profile;
3347
3348 entry_bb = region->entry;
3349 exit_bb = region->exit;
3350
3351 if (is_combined_parallel (region))
3352 ws_args = region->ws_args;
3353 else
3354 ws_args = NULL_TREE;
3355
3356 if (child_cfun->cfg)
3357 {
3358 /* Due to inlining, it may happen that we have already outlined
3359 the region, in which case all we need to do is make the
3360 sub-graph unreachable and emit the parallel call. */
3361 edge entry_succ_e, exit_succ_e;
3362 gimple_stmt_iterator gsi;
3363
3364 entry_succ_e = single_succ_edge (entry_bb);
3365
3366 gsi = gsi_last_bb (entry_bb);
3367 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL
3368 || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK);
3369 gsi_remove (&gsi, true);
3370
3371 new_bb = entry_bb;
3372 if (exit_bb)
3373 {
3374 exit_succ_e = single_succ_edge (exit_bb);
3375 make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
3376 }
3377 remove_edge_and_dominated_blocks (entry_succ_e);
3378 }
3379 else
3380 {
3381 unsigned srcidx, dstidx, num;
3382
3383 /* If the parallel region needs data sent from the parent
3384 function, then the very first statement (except possible
3385 tree profile counter updates) of the parallel body
3386 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
3387 &.OMP_DATA_O is passed as an argument to the child function,
3388 we need to replace it with the argument as seen by the child
3389 function.
3390
3391 In most cases, this will end up being the identity assignment
3392 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
3393 a function call that has been inlined, the original PARM_DECL
3394 .OMP_DATA_I may have been converted into a different local
3395 variable. In which case, we need to keep the assignment. */
3396 if (gimple_omp_taskreg_data_arg (entry_stmt))
3397 {
3398 basic_block entry_succ_bb = single_succ (entry_bb);
3399 gimple_stmt_iterator gsi;
3400 tree arg, narg;
3401 gimple parcopy_stmt = NULL;
3402
3403 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
3404 {
3405 gimple stmt;
3406
3407 gcc_assert (!gsi_end_p (gsi));
3408 stmt = gsi_stmt (gsi);
3409 if (gimple_code (stmt) != GIMPLE_ASSIGN)
3410 continue;
3411
3412 if (gimple_num_ops (stmt) == 2)
3413 {
3414 tree arg = gimple_assign_rhs1 (stmt);
3415
3416 /* We're ignore the subcode because we're
3417 effectively doing a STRIP_NOPS. */
3418
3419 if (TREE_CODE (arg) == ADDR_EXPR
3420 && TREE_OPERAND (arg, 0)
3421 == gimple_omp_taskreg_data_arg (entry_stmt))
3422 {
3423 parcopy_stmt = stmt;
3424 break;
3425 }
3426 }
3427 }
3428
3429 gcc_assert (parcopy_stmt != NULL);
3430 arg = DECL_ARGUMENTS (child_fn);
3431
3432 if (!gimple_in_ssa_p (cfun))
3433 {
3434 if (gimple_assign_lhs (parcopy_stmt) == arg)
3435 gsi_remove (&gsi, true);
3436 else
3437 {
3438 /* ?? Is setting the subcode really necessary ?? */
3439 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg));
3440 gimple_assign_set_rhs1 (parcopy_stmt, arg);
3441 }
3442 }
3443 else
3444 {
3445 /* If we are in ssa form, we must load the value from the default
3446 definition of the argument. That should not be defined now,
3447 since the argument is not used uninitialized. */
3448 gcc_assert (gimple_default_def (cfun, arg) == NULL);
3449 narg = make_ssa_name (arg, gimple_build_nop ());
3450 set_default_def (arg, narg);
3451 /* ?? Is setting the subcode really necessary ?? */
3452 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (narg));
3453 gimple_assign_set_rhs1 (parcopy_stmt, narg);
3454 update_stmt (parcopy_stmt);
3455 }
3456 }
3457
3458 /* Declare local variables needed in CHILD_CFUN. */
3459 block = DECL_INITIAL (child_fn);
3460 BLOCK_VARS (block) = vec2chain (child_cfun->local_decls);
3461 /* The gimplifier could record temporaries in parallel/task block
3462 rather than in containing function's local_decls chain,
3463 which would mean cgraph missed finalizing them. Do it now. */
3464 for (t = BLOCK_VARS (block); t; t = TREE_CHAIN (t))
3465 if (TREE_CODE (t) == VAR_DECL
3466 && TREE_STATIC (t)
3467 && !DECL_EXTERNAL (t))
3468 varpool_finalize_decl (t);
3469 DECL_SAVED_TREE (child_fn) = NULL;
3470 gimple_set_body (child_fn, bb_seq (single_succ (entry_bb)));
3471 TREE_USED (block) = 1;
3472
3473 /* Reset DECL_CONTEXT on function arguments. */
3474 for (t = DECL_ARGUMENTS (child_fn); t; t = TREE_CHAIN (t))
3475 DECL_CONTEXT (t) = child_fn;
3476
3477 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
3478 so that it can be moved to the child function. */
3479 gsi = gsi_last_bb (entry_bb);
3480 stmt = gsi_stmt (gsi);
3481 gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
3482 || gimple_code (stmt) == GIMPLE_OMP_TASK));
3483 gsi_remove (&gsi, true);
3484 e = split_block (entry_bb, stmt);
3485 entry_bb = e->dest;
3486 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
3487
3488 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
3489 if (exit_bb)
3490 {
3491 gsi = gsi_last_bb (exit_bb);
3492 gcc_assert (!gsi_end_p (gsi)
3493 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3494 stmt = gimple_build_return (NULL);
3495 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
3496 gsi_remove (&gsi, true);
3497 }
3498
3499 /* Move the parallel region into CHILD_CFUN. */
3500
3501 if (gimple_in_ssa_p (cfun))
3502 {
3503 push_cfun (child_cfun);
3504 init_tree_ssa (child_cfun);
3505 init_ssa_operands ();
3506 cfun->gimple_df->in_ssa_p = true;
3507 pop_cfun ();
3508 block = NULL_TREE;
3509 }
3510 else
3511 block = gimple_block (entry_stmt);
3512
3513 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
3514 if (exit_bb)
3515 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
3516
3517 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
3518 num = VEC_length (tree, child_cfun->local_decls);
3519 for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
3520 {
3521 t = VEC_index (tree, child_cfun->local_decls, srcidx);
3522 if (DECL_CONTEXT (t) == cfun->decl)
3523 continue;
3524 if (srcidx != dstidx)
3525 VEC_replace (tree, child_cfun->local_decls, dstidx, t);
3526 dstidx++;
3527 }
3528 if (dstidx != num)
3529 VEC_truncate (tree, child_cfun->local_decls, dstidx);
3530
3531 /* Inform the callgraph about the new function. */
3532 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
3533 = cfun->curr_properties;
3534 cgraph_add_new_function (child_fn, true);
3535
3536 /* Fix the callgraph edges for child_cfun. Those for cfun will be
3537 fixed in a following pass. */
3538 push_cfun (child_cfun);
3539 save_current = current_function_decl;
3540 current_function_decl = child_fn;
3541 if (optimize)
3542 optimize_omp_library_calls (entry_stmt);
3543 rebuild_cgraph_edges ();
3544
3545 /* Some EH regions might become dead, see PR34608. If
3546 pass_cleanup_cfg isn't the first pass to happen with the
3547 new child, these dead EH edges might cause problems.
3548 Clean them up now. */
3549 if (flag_exceptions)
3550 {
3551 basic_block bb;
3552 bool changed = false;
3553
3554 FOR_EACH_BB (bb)
3555 changed |= gimple_purge_dead_eh_edges (bb);
3556 if (changed)
3557 cleanup_tree_cfg ();
3558 }
3559 if (gimple_in_ssa_p (cfun))
3560 update_ssa (TODO_update_ssa);
3561 current_function_decl = save_current;
3562 pop_cfun ();
3563 }
3564
3565 /* Emit a library call to launch the children threads. */
3566 if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
3567 expand_parallel_call (region, new_bb, entry_stmt, ws_args);
3568 else
3569 expand_task_call (new_bb, entry_stmt);
3570 update_ssa (TODO_update_ssa_only_virtuals);
3571 }
3572
3573
3574 /* A subroutine of expand_omp_for. Generate code for a parallel
3575 loop with any schedule. Given parameters:
3576
3577 for (V = N1; V cond N2; V += STEP) BODY;
3578
3579 where COND is "<" or ">", we generate pseudocode
3580
3581 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
3582 if (more) goto L0; else goto L3;
3583 L0:
3584 V = istart0;
3585 iend = iend0;
3586 L1:
3587 BODY;
3588 V += STEP;
3589 if (V cond iend) goto L1; else goto L2;
3590 L2:
3591 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3592 L3:
3593
3594 If this is a combined omp parallel loop, instead of the call to
3595 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
3596
3597 For collapsed loops, given parameters:
3598 collapse(3)
3599 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
3600 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
3601 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
3602 BODY;
3603
3604 we generate pseudocode
3605
3606 if (cond3 is <)
3607 adj = STEP3 - 1;
3608 else
3609 adj = STEP3 + 1;
3610 count3 = (adj + N32 - N31) / STEP3;
3611 if (cond2 is <)
3612 adj = STEP2 - 1;
3613 else
3614 adj = STEP2 + 1;
3615 count2 = (adj + N22 - N21) / STEP2;
3616 if (cond1 is <)
3617 adj = STEP1 - 1;
3618 else
3619 adj = STEP1 + 1;
3620 count1 = (adj + N12 - N11) / STEP1;
3621 count = count1 * count2 * count3;
3622 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
3623 if (more) goto L0; else goto L3;
3624 L0:
3625 V = istart0;
3626 T = V;
3627 V3 = N31 + (T % count3) * STEP3;
3628 T = T / count3;
3629 V2 = N21 + (T % count2) * STEP2;
3630 T = T / count2;
3631 V1 = N11 + T * STEP1;
3632 iend = iend0;
3633 L1:
3634 BODY;
3635 V += 1;
3636 if (V < iend) goto L10; else goto L2;
3637 L10:
3638 V3 += STEP3;
3639 if (V3 cond3 N32) goto L1; else goto L11;
3640 L11:
3641 V3 = N31;
3642 V2 += STEP2;
3643 if (V2 cond2 N22) goto L1; else goto L12;
3644 L12:
3645 V2 = N21;
3646 V1 += STEP1;
3647 goto L1;
3648 L2:
3649 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3650 L3:
3651
3652 */
3653
3654 static void
3655 expand_omp_for_generic (struct omp_region *region,
3656 struct omp_for_data *fd,
3657 enum built_in_function start_fn,
3658 enum built_in_function next_fn)
3659 {
3660 tree type, istart0, iend0, iend;
3661 tree t, vmain, vback, bias = NULL_TREE;
3662 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb;
3663 basic_block l2_bb = NULL, l3_bb = NULL;
3664 gimple_stmt_iterator gsi;
3665 gimple stmt;
3666 bool in_combined_parallel = is_combined_parallel (region);
3667 bool broken_loop = region->cont == NULL;
3668 edge e, ne;
3669 tree *counts = NULL;
3670 int i;
3671
3672 gcc_assert (!broken_loop || !in_combined_parallel);
3673 gcc_assert (fd->iter_type == long_integer_type_node
3674 || !in_combined_parallel);
3675
3676 type = TREE_TYPE (fd->loop.v);
3677 istart0 = create_tmp_var (fd->iter_type, ".istart0");
3678 iend0 = create_tmp_var (fd->iter_type, ".iend0");
3679 TREE_ADDRESSABLE (istart0) = 1;
3680 TREE_ADDRESSABLE (iend0) = 1;
3681 if (gimple_in_ssa_p (cfun))
3682 {
3683 add_referenced_var (istart0);
3684 add_referenced_var (iend0);
3685 }
3686
3687 /* See if we need to bias by LLONG_MIN. */
3688 if (fd->iter_type == long_long_unsigned_type_node
3689 && TREE_CODE (type) == INTEGER_TYPE
3690 && !TYPE_UNSIGNED (type))
3691 {
3692 tree n1, n2;
3693
3694 if (fd->loop.cond_code == LT_EXPR)
3695 {
3696 n1 = fd->loop.n1;
3697 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
3698 }
3699 else
3700 {
3701 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
3702 n2 = fd->loop.n1;
3703 }
3704 if (TREE_CODE (n1) != INTEGER_CST
3705 || TREE_CODE (n2) != INTEGER_CST
3706 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
3707 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
3708 }
3709
3710 entry_bb = region->entry;
3711 cont_bb = region->cont;
3712 collapse_bb = NULL;
3713 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
3714 gcc_assert (broken_loop
3715 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
3716 l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
3717 l1_bb = single_succ (l0_bb);
3718 if (!broken_loop)
3719 {
3720 l2_bb = create_empty_bb (cont_bb);
3721 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb);
3722 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
3723 }
3724 else
3725 l2_bb = NULL;
3726 l3_bb = BRANCH_EDGE (entry_bb)->dest;
3727 exit_bb = region->exit;
3728
3729 gsi = gsi_last_bb (entry_bb);
3730
3731 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
3732 if (fd->collapse > 1)
3733 {
3734 /* collapsed loops need work for expansion in SSA form. */
3735 gcc_assert (!gimple_in_ssa_p (cfun));
3736 counts = (tree *) alloca (fd->collapse * sizeof (tree));
3737 for (i = 0; i < fd->collapse; i++)
3738 {
3739 tree itype = TREE_TYPE (fd->loops[i].v);
3740
3741 if (POINTER_TYPE_P (itype))
3742 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
3743 t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR
3744 ? -1 : 1));
3745 t = fold_build2 (PLUS_EXPR, itype,
3746 fold_convert (itype, fd->loops[i].step), t);
3747 t = fold_build2 (PLUS_EXPR, itype, t,
3748 fold_convert (itype, fd->loops[i].n2));
3749 t = fold_build2 (MINUS_EXPR, itype, t,
3750 fold_convert (itype, fd->loops[i].n1));
3751 if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
3752 t = fold_build2 (TRUNC_DIV_EXPR, itype,
3753 fold_build1 (NEGATE_EXPR, itype, t),
3754 fold_build1 (NEGATE_EXPR, itype,
3755 fold_convert (itype,
3756 fd->loops[i].step)));
3757 else
3758 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
3759 fold_convert (itype, fd->loops[i].step));
3760 t = fold_convert (type, t);
3761 if (TREE_CODE (t) == INTEGER_CST)
3762 counts[i] = t;
3763 else
3764 {
3765 counts[i] = create_tmp_var (type, ".count");
3766 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3767 true, GSI_SAME_STMT);
3768 stmt = gimple_build_assign (counts[i], t);
3769 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3770 }
3771 if (SSA_VAR_P (fd->loop.n2))
3772 {
3773 if (i == 0)
3774 t = counts[0];
3775 else
3776 {
3777 t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]);
3778 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3779 true, GSI_SAME_STMT);
3780 }
3781 stmt = gimple_build_assign (fd->loop.n2, t);
3782 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3783 }
3784 }
3785 }
3786 if (in_combined_parallel)
3787 {
3788 /* In a combined parallel loop, emit a call to
3789 GOMP_loop_foo_next. */
3790 t = build_call_expr (built_in_decls[next_fn], 2,
3791 build_fold_addr_expr (istart0),
3792 build_fold_addr_expr (iend0));
3793 }
3794 else
3795 {
3796 tree t0, t1, t2, t3, t4;
3797 /* If this is not a combined parallel loop, emit a call to
3798 GOMP_loop_foo_start in ENTRY_BB. */
3799 t4 = build_fold_addr_expr (iend0);
3800 t3 = build_fold_addr_expr (istart0);
3801 t2 = fold_convert (fd->iter_type, fd->loop.step);
3802 if (POINTER_TYPE_P (type)
3803 && TYPE_PRECISION (type) != TYPE_PRECISION (fd->iter_type))
3804 {
3805 /* Avoid casting pointers to integer of a different size. */
3806 tree itype
3807 = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
3808 t1 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n2));
3809 t0 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n1));
3810 }
3811 else
3812 {
3813 t1 = fold_convert (fd->iter_type, fd->loop.n2);
3814 t0 = fold_convert (fd->iter_type, fd->loop.n1);
3815 }
3816 if (bias)
3817 {
3818 t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
3819 t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
3820 }
3821 if (fd->iter_type == long_integer_type_node)
3822 {
3823 if (fd->chunk_size)
3824 {
3825 t = fold_convert (fd->iter_type, fd->chunk_size);
3826 t = build_call_expr (built_in_decls[start_fn], 6,
3827 t0, t1, t2, t, t3, t4);
3828 }
3829 else
3830 t = build_call_expr (built_in_decls[start_fn], 5,
3831 t0, t1, t2, t3, t4);
3832 }
3833 else
3834 {
3835 tree t5;
3836 tree c_bool_type;
3837
3838 /* The GOMP_loop_ull_*start functions have additional boolean
3839 argument, true for < loops and false for > loops.
3840 In Fortran, the C bool type can be different from
3841 boolean_type_node. */
3842 c_bool_type = TREE_TYPE (TREE_TYPE (built_in_decls[start_fn]));
3843 t5 = build_int_cst (c_bool_type,
3844 fd->loop.cond_code == LT_EXPR ? 1 : 0);
3845 if (fd->chunk_size)
3846 {
3847 t = fold_convert (fd->iter_type, fd->chunk_size);
3848 t = build_call_expr (built_in_decls[start_fn], 7,
3849 t5, t0, t1, t2, t, t3, t4);
3850 }
3851 else
3852 t = build_call_expr (built_in_decls[start_fn], 6,
3853 t5, t0, t1, t2, t3, t4);
3854 }
3855 }
3856 if (TREE_TYPE (t) != boolean_type_node)
3857 t = fold_build2 (NE_EXPR, boolean_type_node,
3858 t, build_int_cst (TREE_TYPE (t), 0));
3859 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3860 true, GSI_SAME_STMT);
3861 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
3862
3863 /* Remove the GIMPLE_OMP_FOR statement. */
3864 gsi_remove (&gsi, true);
3865
3866 /* Iteration setup for sequential loop goes in L0_BB. */
3867 gsi = gsi_start_bb (l0_bb);
3868 t = istart0;
3869 if (bias)
3870 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
3871 if (POINTER_TYPE_P (type))
3872 t = fold_convert (lang_hooks.types.type_for_size (TYPE_PRECISION (type),
3873 0), t);
3874 t = fold_convert (type, t);
3875 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3876 false, GSI_CONTINUE_LINKING);
3877 stmt = gimple_build_assign (fd->loop.v, t);
3878 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3879
3880 t = iend0;
3881 if (bias)
3882 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
3883 if (POINTER_TYPE_P (type))
3884 t = fold_convert (lang_hooks.types.type_for_size (TYPE_PRECISION (type),
3885 0), t);
3886 t = fold_convert (type, t);
3887 iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3888 false, GSI_CONTINUE_LINKING);
3889 if (fd->collapse > 1)
3890 {
3891 tree tem = create_tmp_var (type, ".tem");
3892
3893 stmt = gimple_build_assign (tem, fd->loop.v);
3894 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3895 for (i = fd->collapse - 1; i >= 0; i--)
3896 {
3897 tree vtype = TREE_TYPE (fd->loops[i].v), itype;
3898 itype = vtype;
3899 if (POINTER_TYPE_P (vtype))
3900 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (vtype), 0);
3901 t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]);
3902 t = fold_convert (itype, t);
3903 t = fold_build2 (MULT_EXPR, itype, t,
3904 fold_convert (itype, fd->loops[i].step));
3905 if (POINTER_TYPE_P (vtype))
3906 t = fold_build2 (POINTER_PLUS_EXPR, vtype,
3907 fd->loops[i].n1, fold_convert (sizetype, t));
3908 else
3909 t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t);
3910 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3911 false, GSI_CONTINUE_LINKING);
3912 stmt = gimple_build_assign (fd->loops[i].v, t);
3913 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3914 if (i != 0)
3915 {
3916 t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]);
3917 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3918 false, GSI_CONTINUE_LINKING);
3919 stmt = gimple_build_assign (tem, t);
3920 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3921 }
3922 }
3923 }
3924
3925 if (!broken_loop)
3926 {
3927 /* Code to control the increment and predicate for the sequential
3928 loop goes in the CONT_BB. */
3929 gsi = gsi_last_bb (cont_bb);
3930 stmt = gsi_stmt (gsi);
3931 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
3932 vmain = gimple_omp_continue_control_use (stmt);
3933 vback = gimple_omp_continue_control_def (stmt);
3934
3935 if (POINTER_TYPE_P (type))
3936 t = fold_build2 (POINTER_PLUS_EXPR, type, vmain,
3937 fold_convert (sizetype, fd->loop.step));
3938 else
3939 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
3940 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3941 true, GSI_SAME_STMT);
3942 stmt = gimple_build_assign (vback, t);
3943 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3944
3945 t = build2 (fd->loop.cond_code, boolean_type_node, vback, iend);
3946 stmt = gimple_build_cond_empty (t);
3947 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3948
3949 /* Remove GIMPLE_OMP_CONTINUE. */
3950 gsi_remove (&gsi, true);
3951
3952 if (fd->collapse > 1)
3953 {
3954 basic_block last_bb, bb;
3955
3956 last_bb = cont_bb;
3957 for (i = fd->collapse - 1; i >= 0; i--)
3958 {
3959 tree vtype = TREE_TYPE (fd->loops[i].v);
3960
3961 bb = create_empty_bb (last_bb);
3962 gsi = gsi_start_bb (bb);
3963
3964 if (i < fd->collapse - 1)
3965 {
3966 e = make_edge (last_bb, bb, EDGE_FALSE_VALUE);
3967 e->probability = REG_BR_PROB_BASE / 8;
3968
3969 t = fd->loops[i + 1].n1;
3970 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3971 false, GSI_CONTINUE_LINKING);
3972 stmt = gimple_build_assign (fd->loops[i + 1].v, t);
3973 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3974 }
3975 else
3976 collapse_bb = bb;
3977
3978 set_immediate_dominator (CDI_DOMINATORS, bb, last_bb);
3979
3980 if (POINTER_TYPE_P (vtype))
3981 t = fold_build2 (POINTER_PLUS_EXPR, vtype,
3982 fd->loops[i].v,
3983 fold_convert (sizetype, fd->loops[i].step));
3984 else
3985 t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v,
3986 fd->loops[i].step);
3987 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3988 false, GSI_CONTINUE_LINKING);
3989 stmt = gimple_build_assign (fd->loops[i].v, t);
3990 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3991
3992 if (i > 0)
3993 {
3994 t = fd->loops[i].n2;
3995 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3996 false, GSI_CONTINUE_LINKING);
3997 t = fold_build2 (fd->loops[i].cond_code, boolean_type_node,
3998 fd->loops[i].v, t);
3999 stmt = gimple_build_cond_empty (t);
4000 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4001 e = make_edge (bb, l1_bb, EDGE_TRUE_VALUE);
4002 e->probability = REG_BR_PROB_BASE * 7 / 8;
4003 }
4004 else
4005 make_edge (bb, l1_bb, EDGE_FALLTHRU);
4006 last_bb = bb;
4007 }
4008 }
4009
4010 /* Emit code to get the next parallel iteration in L2_BB. */
4011 gsi = gsi_start_bb (l2_bb);
4012
4013 t = build_call_expr (built_in_decls[next_fn], 2,
4014 build_fold_addr_expr (istart0),
4015 build_fold_addr_expr (iend0));
4016 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4017 false, GSI_CONTINUE_LINKING);
4018 if (TREE_TYPE (t) != boolean_type_node)
4019 t = fold_build2 (NE_EXPR, boolean_type_node,
4020 t, build_int_cst (TREE_TYPE (t), 0));
4021 stmt = gimple_build_cond_empty (t);
4022 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4023 }
4024
4025 /* Add the loop cleanup function. */
4026 gsi = gsi_last_bb (exit_bb);
4027 if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4028 t = built_in_decls[BUILT_IN_GOMP_LOOP_END_NOWAIT];
4029 else
4030 t = built_in_decls[BUILT_IN_GOMP_LOOP_END];
4031 stmt = gimple_build_call (t, 0);
4032 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
4033 gsi_remove (&gsi, true);
4034
4035 /* Connect the new blocks. */
4036 find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
4037 find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
4038
4039 if (!broken_loop)
4040 {
4041 gimple_seq phis;
4042
4043 e = find_edge (cont_bb, l3_bb);
4044 ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
4045
4046 phis = phi_nodes (l3_bb);
4047 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
4048 {
4049 gimple phi = gsi_stmt (gsi);
4050 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
4051 PHI_ARG_DEF_FROM_EDGE (phi, e));
4052 }
4053 remove_edge (e);
4054
4055 make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
4056 if (fd->collapse > 1)
4057 {
4058 e = find_edge (cont_bb, l1_bb);
4059 remove_edge (e);
4060 e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
4061 }
4062 else
4063 {
4064 e = find_edge (cont_bb, l1_bb);
4065 e->flags = EDGE_TRUE_VALUE;
4066 }
4067 e->probability = REG_BR_PROB_BASE * 7 / 8;
4068 find_edge (cont_bb, l2_bb)->probability = REG_BR_PROB_BASE / 8;
4069 make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
4070
4071 set_immediate_dominator (CDI_DOMINATORS, l2_bb,
4072 recompute_dominator (CDI_DOMINATORS, l2_bb));
4073 set_immediate_dominator (CDI_DOMINATORS, l3_bb,
4074 recompute_dominator (CDI_DOMINATORS, l3_bb));
4075 set_immediate_dominator (CDI_DOMINATORS, l0_bb,
4076 recompute_dominator (CDI_DOMINATORS, l0_bb));
4077 set_immediate_dominator (CDI_DOMINATORS, l1_bb,
4078 recompute_dominator (CDI_DOMINATORS, l1_bb));
4079 }
4080 }
4081
4082
4083 /* A subroutine of expand_omp_for. Generate code for a parallel
4084 loop with static schedule and no specified chunk size. Given
4085 parameters:
4086
4087 for (V = N1; V cond N2; V += STEP) BODY;
4088
4089 where COND is "<" or ">", we generate pseudocode
4090
4091 if (cond is <)
4092 adj = STEP - 1;
4093 else
4094 adj = STEP + 1;
4095 if ((__typeof (V)) -1 > 0 && cond is >)
4096 n = -(adj + N2 - N1) / -STEP;
4097 else
4098 n = (adj + N2 - N1) / STEP;
4099 q = n / nthreads;
4100 q += (q * nthreads != n);
4101 s0 = q * threadid;
4102 e0 = min(s0 + q, n);
4103 V = s0 * STEP + N1;
4104 if (s0 >= e0) goto L2; else goto L0;
4105 L0:
4106 e = e0 * STEP + N1;
4107 L1:
4108 BODY;
4109 V += STEP;
4110 if (V cond e) goto L1;
4111 L2:
4112 */
4113
4114 static void
4115 expand_omp_for_static_nochunk (struct omp_region *region,
4116 struct omp_for_data *fd)
4117 {
4118 tree n, q, s0, e0, e, t, nthreads, threadid;
4119 tree type, itype, vmain, vback;
4120 basic_block entry_bb, exit_bb, seq_start_bb, body_bb, cont_bb;
4121 basic_block fin_bb;
4122 gimple_stmt_iterator gsi;
4123 gimple stmt;
4124
4125 itype = type = TREE_TYPE (fd->loop.v);
4126 if (POINTER_TYPE_P (type))
4127 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
4128
4129 entry_bb = region->entry;
4130 cont_bb = region->cont;
4131 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
4132 gcc_assert (BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
4133 seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
4134 body_bb = single_succ (seq_start_bb);
4135 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4136 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4137 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4138 exit_bb = region->exit;
4139
4140 /* Iteration space partitioning goes in ENTRY_BB. */
4141 gsi = gsi_last_bb (entry_bb);
4142 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4143
4144 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
4145 t = fold_convert (itype, t);
4146 nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4147 true, GSI_SAME_STMT);
4148
4149 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
4150 t = fold_convert (itype, t);
4151 threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4152 true, GSI_SAME_STMT);
4153
4154 fd->loop.n1
4155 = force_gimple_operand_gsi (&gsi, fold_convert (type, fd->loop.n1),
4156 true, NULL_TREE, true, GSI_SAME_STMT);
4157 fd->loop.n2
4158 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.n2),
4159 true, NULL_TREE, true, GSI_SAME_STMT);
4160 fd->loop.step
4161 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.step),
4162 true, NULL_TREE, true, GSI_SAME_STMT);
4163
4164 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4165 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4166 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4167 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4168 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4169 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4170 fold_build1 (NEGATE_EXPR, itype, t),
4171 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4172 else
4173 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4174 t = fold_convert (itype, t);
4175 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4176
4177 t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads);
4178 q = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4179
4180 t = fold_build2 (MULT_EXPR, itype, q, nthreads);
4181 t = fold_build2 (NE_EXPR, itype, t, n);
4182 t = fold_build2 (PLUS_EXPR, itype, q, t);
4183 q = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4184
4185 t = build2 (MULT_EXPR, itype, q, threadid);
4186 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4187
4188 t = fold_build2 (PLUS_EXPR, itype, s0, q);
4189 t = fold_build2 (MIN_EXPR, itype, t, n);
4190 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4191
4192 t = build2 (GE_EXPR, boolean_type_node, s0, e0);
4193 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4194
4195 /* Remove the GIMPLE_OMP_FOR statement. */
4196 gsi_remove (&gsi, true);
4197
4198 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4199 gsi = gsi_start_bb (seq_start_bb);
4200
4201 t = fold_convert (itype, s0);
4202 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4203 if (POINTER_TYPE_P (type))
4204 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4205 fold_convert (sizetype, t));
4206 else
4207 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4208 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4209 false, GSI_CONTINUE_LINKING);
4210 stmt = gimple_build_assign (fd->loop.v, t);
4211 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4212
4213 t = fold_convert (itype, e0);
4214 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4215 if (POINTER_TYPE_P (type))
4216 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4217 fold_convert (sizetype, t));
4218 else
4219 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4220 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4221 false, GSI_CONTINUE_LINKING);
4222
4223 /* The code controlling the sequential loop replaces the
4224 GIMPLE_OMP_CONTINUE. */
4225 gsi = gsi_last_bb (cont_bb);
4226 stmt = gsi_stmt (gsi);
4227 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4228 vmain = gimple_omp_continue_control_use (stmt);
4229 vback = gimple_omp_continue_control_def (stmt);
4230
4231 if (POINTER_TYPE_P (type))
4232 t = fold_build2 (POINTER_PLUS_EXPR, type, vmain,
4233 fold_convert (sizetype, fd->loop.step));
4234 else
4235 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
4236 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4237 true, GSI_SAME_STMT);
4238 stmt = gimple_build_assign (vback, t);
4239 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4240
4241 t = build2 (fd->loop.cond_code, boolean_type_node, vback, e);
4242 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4243
4244 /* Remove the GIMPLE_OMP_CONTINUE statement. */
4245 gsi_remove (&gsi, true);
4246
4247 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4248 gsi = gsi_last_bb (exit_bb);
4249 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4250 force_gimple_operand_gsi (&gsi, build_omp_barrier (), false, NULL_TREE,
4251 false, GSI_SAME_STMT);
4252 gsi_remove (&gsi, true);
4253
4254 /* Connect all the blocks. */
4255 find_edge (entry_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
4256 find_edge (entry_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
4257
4258 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4259 find_edge (cont_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4260
4261 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, entry_bb);
4262 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4263 recompute_dominator (CDI_DOMINATORS, body_bb));
4264 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4265 recompute_dominator (CDI_DOMINATORS, fin_bb));
4266 }
4267
4268
4269 /* A subroutine of expand_omp_for. Generate code for a parallel
4270 loop with static schedule and a specified chunk size. Given
4271 parameters:
4272
4273 for (V = N1; V cond N2; V += STEP) BODY;
4274
4275 where COND is "<" or ">", we generate pseudocode
4276
4277 if (cond is <)
4278 adj = STEP - 1;
4279 else
4280 adj = STEP + 1;
4281 if ((__typeof (V)) -1 > 0 && cond is >)
4282 n = -(adj + N2 - N1) / -STEP;
4283 else
4284 n = (adj + N2 - N1) / STEP;
4285 trip = 0;
4286 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
4287 here so that V is defined
4288 if the loop is not entered
4289 L0:
4290 s0 = (trip * nthreads + threadid) * CHUNK;
4291 e0 = min(s0 + CHUNK, n);
4292 if (s0 < n) goto L1; else goto L4;
4293 L1:
4294 V = s0 * STEP + N1;
4295 e = e0 * STEP + N1;
4296 L2:
4297 BODY;
4298 V += STEP;
4299 if (V cond e) goto L2; else goto L3;
4300 L3:
4301 trip += 1;
4302 goto L0;
4303 L4:
4304 */
4305
4306 static void
4307 expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
4308 {
4309 tree n, s0, e0, e, t;
4310 tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
4311 tree type, itype, v_main, v_back, v_extra;
4312 basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
4313 basic_block trip_update_bb, cont_bb, fin_bb;
4314 gimple_stmt_iterator si;
4315 gimple stmt;
4316 edge se;
4317
4318 itype = type = TREE_TYPE (fd->loop.v);
4319 if (POINTER_TYPE_P (type))
4320 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
4321
4322 entry_bb = region->entry;
4323 se = split_block (entry_bb, last_stmt (entry_bb));
4324 entry_bb = se->src;
4325 iter_part_bb = se->dest;
4326 cont_bb = region->cont;
4327 gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
4328 gcc_assert (BRANCH_EDGE (iter_part_bb)->dest
4329 == FALLTHRU_EDGE (cont_bb)->dest);
4330 seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
4331 body_bb = single_succ (seq_start_bb);
4332 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4333 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4334 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4335 trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
4336 exit_bb = region->exit;
4337
4338 /* Trip and adjustment setup goes in ENTRY_BB. */
4339 si = gsi_last_bb (entry_bb);
4340 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_FOR);
4341
4342 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
4343 t = fold_convert (itype, t);
4344 nthreads = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4345 true, GSI_SAME_STMT);
4346
4347 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
4348 t = fold_convert (itype, t);
4349 threadid = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4350 true, GSI_SAME_STMT);
4351
4352 fd->loop.n1
4353 = force_gimple_operand_gsi (&si, fold_convert (type, fd->loop.n1),
4354 true, NULL_TREE, true, GSI_SAME_STMT);
4355 fd->loop.n2
4356 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.n2),
4357 true, NULL_TREE, true, GSI_SAME_STMT);
4358 fd->loop.step
4359 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.step),
4360 true, NULL_TREE, true, GSI_SAME_STMT);
4361 fd->chunk_size
4362 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->chunk_size),
4363 true, NULL_TREE, true, GSI_SAME_STMT);
4364
4365 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4366 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4367 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4368 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4369 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4370 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4371 fold_build1 (NEGATE_EXPR, itype, t),
4372 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4373 else
4374 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4375 t = fold_convert (itype, t);
4376 n = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4377 true, GSI_SAME_STMT);
4378
4379 trip_var = create_tmp_var (itype, ".trip");
4380 if (gimple_in_ssa_p (cfun))
4381 {
4382 add_referenced_var (trip_var);
4383 trip_init = make_ssa_name (trip_var, NULL);
4384 trip_main = make_ssa_name (trip_var, NULL);
4385 trip_back = make_ssa_name (trip_var, NULL);
4386 }
4387 else
4388 {
4389 trip_init = trip_var;
4390 trip_main = trip_var;
4391 trip_back = trip_var;
4392 }
4393
4394 stmt = gimple_build_assign (trip_init, build_int_cst (itype, 0));
4395 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4396
4397 t = fold_build2 (MULT_EXPR, itype, threadid, fd->chunk_size);
4398 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4399 if (POINTER_TYPE_P (type))
4400 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4401 fold_convert (sizetype, t));
4402 else
4403 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4404 v_extra = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4405 true, GSI_SAME_STMT);
4406
4407 /* Remove the GIMPLE_OMP_FOR. */
4408 gsi_remove (&si, true);
4409
4410 /* Iteration space partitioning goes in ITER_PART_BB. */
4411 si = gsi_last_bb (iter_part_bb);
4412
4413 t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads);
4414 t = fold_build2 (PLUS_EXPR, itype, t, threadid);
4415 t = fold_build2 (MULT_EXPR, itype, t, fd->chunk_size);
4416 s0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4417 false, GSI_CONTINUE_LINKING);
4418
4419 t = fold_build2 (PLUS_EXPR, itype, s0, fd->chunk_size);
4420 t = fold_build2 (MIN_EXPR, itype, t, n);
4421 e0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4422 false, GSI_CONTINUE_LINKING);
4423
4424 t = build2 (LT_EXPR, boolean_type_node, s0, n);
4425 gsi_insert_after (&si, gimple_build_cond_empty (t), GSI_CONTINUE_LINKING);
4426
4427 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4428 si = gsi_start_bb (seq_start_bb);
4429
4430 t = fold_convert (itype, s0);
4431 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4432 if (POINTER_TYPE_P (type))
4433 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4434 fold_convert (sizetype, t));
4435 else
4436 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4437 t = force_gimple_operand_gsi (&si, t, false, NULL_TREE,
4438 false, GSI_CONTINUE_LINKING);
4439 stmt = gimple_build_assign (fd->loop.v, t);
4440 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4441
4442 t = fold_convert (itype, e0);
4443 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4444 if (POINTER_TYPE_P (type))
4445 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4446 fold_convert (sizetype, t));
4447 else
4448 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4449 e = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4450 false, GSI_CONTINUE_LINKING);
4451
4452 /* The code controlling the sequential loop goes in CONT_BB,
4453 replacing the GIMPLE_OMP_CONTINUE. */
4454 si = gsi_last_bb (cont_bb);
4455 stmt = gsi_stmt (si);
4456 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4457 v_main = gimple_omp_continue_control_use (stmt);
4458 v_back = gimple_omp_continue_control_def (stmt);
4459
4460 if (POINTER_TYPE_P (type))
4461 t = fold_build2 (POINTER_PLUS_EXPR, type, v_main,
4462 fold_convert (sizetype, fd->loop.step));
4463 else
4464 t = fold_build2 (PLUS_EXPR, type, v_main, fd->loop.step);
4465 stmt = gimple_build_assign (v_back, t);
4466 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4467
4468 t = build2 (fd->loop.cond_code, boolean_type_node, v_back, e);
4469 gsi_insert_before (&si, gimple_build_cond_empty (t), GSI_SAME_STMT);
4470
4471 /* Remove GIMPLE_OMP_CONTINUE. */
4472 gsi_remove (&si, true);
4473
4474 /* Trip update code goes into TRIP_UPDATE_BB. */
4475 si = gsi_start_bb (trip_update_bb);
4476
4477 t = build_int_cst (itype, 1);
4478 t = build2 (PLUS_EXPR, itype, trip_main, t);
4479 stmt = gimple_build_assign (trip_back, t);
4480 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4481
4482 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4483 si = gsi_last_bb (exit_bb);
4484 if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
4485 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4486 false, GSI_SAME_STMT);
4487 gsi_remove (&si, true);
4488
4489 /* Connect the new blocks. */
4490 find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
4491 find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4492
4493 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4494 find_edge (cont_bb, trip_update_bb)->flags = EDGE_FALSE_VALUE;
4495
4496 redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
4497
4498 if (gimple_in_ssa_p (cfun))
4499 {
4500 gimple_stmt_iterator psi;
4501 gimple phi;
4502 edge re, ene;
4503 edge_var_map_vector head;
4504 edge_var_map *vm;
4505 size_t i;
4506
4507 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
4508 remove arguments of the phi nodes in fin_bb. We need to create
4509 appropriate phi nodes in iter_part_bb instead. */
4510 se = single_pred_edge (fin_bb);
4511 re = single_succ_edge (trip_update_bb);
4512 head = redirect_edge_var_map_vector (re);
4513 ene = single_succ_edge (entry_bb);
4514
4515 psi = gsi_start_phis (fin_bb);
4516 for (i = 0; !gsi_end_p (psi) && VEC_iterate (edge_var_map, head, i, vm);
4517 gsi_next (&psi), ++i)
4518 {
4519 gimple nphi;
4520 source_location locus;
4521
4522 phi = gsi_stmt (psi);
4523 t = gimple_phi_result (phi);
4524 gcc_assert (t == redirect_edge_var_map_result (vm));
4525 nphi = create_phi_node (t, iter_part_bb);
4526 SSA_NAME_DEF_STMT (t) = nphi;
4527
4528 t = PHI_ARG_DEF_FROM_EDGE (phi, se);
4529 locus = gimple_phi_arg_location_from_edge (phi, se);
4530
4531 /* A special case -- fd->loop.v is not yet computed in
4532 iter_part_bb, we need to use v_extra instead. */
4533 if (t == fd->loop.v)
4534 t = v_extra;
4535 add_phi_arg (nphi, t, ene, locus);
4536 locus = redirect_edge_var_map_location (vm);
4537 add_phi_arg (nphi, redirect_edge_var_map_def (vm), re, locus);
4538 }
4539 gcc_assert (!gsi_end_p (psi) && i == VEC_length (edge_var_map, head));
4540 redirect_edge_var_map_clear (re);
4541 while (1)
4542 {
4543 psi = gsi_start_phis (fin_bb);
4544 if (gsi_end_p (psi))
4545 break;
4546 remove_phi_node (&psi, false);
4547 }
4548
4549 /* Make phi node for trip. */
4550 phi = create_phi_node (trip_main, iter_part_bb);
4551 SSA_NAME_DEF_STMT (trip_main) = phi;
4552 add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb),
4553 UNKNOWN_LOCATION);
4554 add_phi_arg (phi, trip_init, single_succ_edge (entry_bb),
4555 UNKNOWN_LOCATION);
4556 }
4557
4558 set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
4559 set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
4560 recompute_dominator (CDI_DOMINATORS, iter_part_bb));
4561 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4562 recompute_dominator (CDI_DOMINATORS, fin_bb));
4563 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
4564 recompute_dominator (CDI_DOMINATORS, seq_start_bb));
4565 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4566 recompute_dominator (CDI_DOMINATORS, body_bb));
4567 }
4568
4569
4570 /* Expand the OpenMP loop defined by REGION. */
4571
4572 static void
4573 expand_omp_for (struct omp_region *region)
4574 {
4575 struct omp_for_data fd;
4576 struct omp_for_data_loop *loops;
4577
4578 loops
4579 = (struct omp_for_data_loop *)
4580 alloca (gimple_omp_for_collapse (last_stmt (region->entry))
4581 * sizeof (struct omp_for_data_loop));
4582 extract_omp_for_data (last_stmt (region->entry), &fd, loops);
4583 region->sched_kind = fd.sched_kind;
4584
4585 gcc_assert (EDGE_COUNT (region->entry->succs) == 2);
4586 BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4587 FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4588 if (region->cont)
4589 {
4590 gcc_assert (EDGE_COUNT (region->cont->succs) == 2);
4591 BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4592 FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4593 }
4594
4595 if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
4596 && !fd.have_ordered
4597 && fd.collapse == 1
4598 && region->cont != NULL)
4599 {
4600 if (fd.chunk_size == NULL)
4601 expand_omp_for_static_nochunk (region, &fd);
4602 else
4603 expand_omp_for_static_chunk (region, &fd);
4604 }
4605 else
4606 {
4607 int fn_index, start_ix, next_ix;
4608
4609 gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
4610 fn_index = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
4611 ? 3 : fd.sched_kind;
4612 fn_index += fd.have_ordered * 4;
4613 start_ix = BUILT_IN_GOMP_LOOP_STATIC_START + fn_index;
4614 next_ix = BUILT_IN_GOMP_LOOP_STATIC_NEXT + fn_index;
4615 if (fd.iter_type == long_long_unsigned_type_node)
4616 {
4617 start_ix += BUILT_IN_GOMP_LOOP_ULL_STATIC_START
4618 - BUILT_IN_GOMP_LOOP_STATIC_START;
4619 next_ix += BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
4620 - BUILT_IN_GOMP_LOOP_STATIC_NEXT;
4621 }
4622 expand_omp_for_generic (region, &fd, (enum built_in_function) start_ix,
4623 (enum built_in_function) next_ix);
4624 }
4625
4626 update_ssa (TODO_update_ssa_only_virtuals);
4627 }
4628
4629
4630 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
4631
4632 v = GOMP_sections_start (n);
4633 L0:
4634 switch (v)
4635 {
4636 case 0:
4637 goto L2;
4638 case 1:
4639 section 1;
4640 goto L1;
4641 case 2:
4642 ...
4643 case n:
4644 ...
4645 default:
4646 abort ();
4647 }
4648 L1:
4649 v = GOMP_sections_next ();
4650 goto L0;
4651 L2:
4652 reduction;
4653
4654 If this is a combined parallel sections, replace the call to
4655 GOMP_sections_start with call to GOMP_sections_next. */
4656
4657 static void
4658 expand_omp_sections (struct omp_region *region)
4659 {
4660 tree t, u, vin = NULL, vmain, vnext, l2;
4661 VEC (tree,heap) *label_vec;
4662 unsigned len;
4663 basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
4664 gimple_stmt_iterator si, switch_si;
4665 gimple sections_stmt, stmt, cont;
4666 edge_iterator ei;
4667 edge e;
4668 struct omp_region *inner;
4669 unsigned i, casei;
4670 bool exit_reachable = region->cont != NULL;
4671
4672 gcc_assert (exit_reachable == (region->exit != NULL));
4673 entry_bb = region->entry;
4674 l0_bb = single_succ (entry_bb);
4675 l1_bb = region->cont;
4676 l2_bb = region->exit;
4677 if (exit_reachable)
4678 {
4679 if (single_pred_p (l2_bb) && single_pred (l2_bb) == l0_bb)
4680 l2 = gimple_block_label (l2_bb);
4681 else
4682 {
4683 /* This can happen if there are reductions. */
4684 len = EDGE_COUNT (l0_bb->succs);
4685 gcc_assert (len > 0);
4686 e = EDGE_SUCC (l0_bb, len - 1);
4687 si = gsi_last_bb (e->dest);
4688 l2 = NULL_TREE;
4689 if (gsi_end_p (si)
4690 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4691 l2 = gimple_block_label (e->dest);
4692 else
4693 FOR_EACH_EDGE (e, ei, l0_bb->succs)
4694 {
4695 si = gsi_last_bb (e->dest);
4696 if (gsi_end_p (si)
4697 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4698 {
4699 l2 = gimple_block_label (e->dest);
4700 break;
4701 }
4702 }
4703 }
4704 default_bb = create_empty_bb (l1_bb->prev_bb);
4705 }
4706 else
4707 {
4708 default_bb = create_empty_bb (l0_bb);
4709 l2 = gimple_block_label (default_bb);
4710 }
4711
4712 /* We will build a switch() with enough cases for all the
4713 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
4714 and a default case to abort if something goes wrong. */
4715 len = EDGE_COUNT (l0_bb->succs);
4716
4717 /* Use VEC_quick_push on label_vec throughout, since we know the size
4718 in advance. */
4719 label_vec = VEC_alloc (tree, heap, len);
4720
4721 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
4722 GIMPLE_OMP_SECTIONS statement. */
4723 si = gsi_last_bb (entry_bb);
4724 sections_stmt = gsi_stmt (si);
4725 gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS);
4726 vin = gimple_omp_sections_control (sections_stmt);
4727 if (!is_combined_parallel (region))
4728 {
4729 /* If we are not inside a combined parallel+sections region,
4730 call GOMP_sections_start. */
4731 t = build_int_cst (unsigned_type_node,
4732 exit_reachable ? len - 1 : len);
4733 u = built_in_decls[BUILT_IN_GOMP_SECTIONS_START];
4734 stmt = gimple_build_call (u, 1, t);
4735 }
4736 else
4737 {
4738 /* Otherwise, call GOMP_sections_next. */
4739 u = built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT];
4740 stmt = gimple_build_call (u, 0);
4741 }
4742 gimple_call_set_lhs (stmt, vin);
4743 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4744 gsi_remove (&si, true);
4745
4746 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
4747 L0_BB. */
4748 switch_si = gsi_last_bb (l0_bb);
4749 gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH);
4750 if (exit_reachable)
4751 {
4752 cont = last_stmt (l1_bb);
4753 gcc_assert (gimple_code (cont) == GIMPLE_OMP_CONTINUE);
4754 vmain = gimple_omp_continue_control_use (cont);
4755 vnext = gimple_omp_continue_control_def (cont);
4756 }
4757 else
4758 {
4759 vmain = vin;
4760 vnext = NULL_TREE;
4761 }
4762
4763 i = 0;
4764 if (exit_reachable)
4765 {
4766 t = build3 (CASE_LABEL_EXPR, void_type_node,
4767 build_int_cst (unsigned_type_node, 0), NULL, l2);
4768 VEC_quick_push (tree, label_vec, t);
4769 i++;
4770 }
4771
4772 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
4773 for (inner = region->inner, casei = 1;
4774 inner;
4775 inner = inner->next, i++, casei++)
4776 {
4777 basic_block s_entry_bb, s_exit_bb;
4778
4779 /* Skip optional reduction region. */
4780 if (inner->type == GIMPLE_OMP_ATOMIC_LOAD)
4781 {
4782 --i;
4783 --casei;
4784 continue;
4785 }
4786
4787 s_entry_bb = inner->entry;
4788 s_exit_bb = inner->exit;
4789
4790 t = gimple_block_label (s_entry_bb);
4791 u = build_int_cst (unsigned_type_node, casei);
4792 u = build3 (CASE_LABEL_EXPR, void_type_node, u, NULL, t);
4793 VEC_quick_push (tree, label_vec, u);
4794
4795 si = gsi_last_bb (s_entry_bb);
4796 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION);
4797 gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si)));
4798 gsi_remove (&si, true);
4799 single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
4800
4801 if (s_exit_bb == NULL)
4802 continue;
4803
4804 si = gsi_last_bb (s_exit_bb);
4805 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4806 gsi_remove (&si, true);
4807
4808 single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
4809 }
4810
4811 /* Error handling code goes in DEFAULT_BB. */
4812 t = gimple_block_label (default_bb);
4813 u = build3 (CASE_LABEL_EXPR, void_type_node, NULL, NULL, t);
4814 make_edge (l0_bb, default_bb, 0);
4815
4816 stmt = gimple_build_switch_vec (vmain, u, label_vec);
4817 gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT);
4818 gsi_remove (&switch_si, true);
4819 VEC_free (tree, heap, label_vec);
4820
4821 si = gsi_start_bb (default_bb);
4822 stmt = gimple_build_call (built_in_decls[BUILT_IN_TRAP], 0);
4823 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4824
4825 if (exit_reachable)
4826 {
4827 /* Code to get the next section goes in L1_BB. */
4828 si = gsi_last_bb (l1_bb);
4829 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE);
4830
4831 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT], 0);
4832 gimple_call_set_lhs (stmt, vnext);
4833 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4834 gsi_remove (&si, true);
4835
4836 single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
4837
4838 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
4839 si = gsi_last_bb (l2_bb);
4840 if (gimple_omp_return_nowait_p (gsi_stmt (si)))
4841 t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END_NOWAIT];
4842 else
4843 t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END];
4844 stmt = gimple_build_call (t, 0);
4845 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4846 gsi_remove (&si, true);
4847 }
4848
4849 set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
4850 }
4851
4852
4853 /* Expand code for an OpenMP single directive. We've already expanded
4854 much of the code, here we simply place the GOMP_barrier call. */
4855
4856 static void
4857 expand_omp_single (struct omp_region *region)
4858 {
4859 basic_block entry_bb, exit_bb;
4860 gimple_stmt_iterator si;
4861 bool need_barrier = false;
4862
4863 entry_bb = region->entry;
4864 exit_bb = region->exit;
4865
4866 si = gsi_last_bb (entry_bb);
4867 /* The terminal barrier at the end of a GOMP_single_copy sequence cannot
4868 be removed. We need to ensure that the thread that entered the single
4869 does not exit before the data is copied out by the other threads. */
4870 if (find_omp_clause (gimple_omp_single_clauses (gsi_stmt (si)),
4871 OMP_CLAUSE_COPYPRIVATE))
4872 need_barrier = true;
4873 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE);
4874 gsi_remove (&si, true);
4875 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4876
4877 si = gsi_last_bb (exit_bb);
4878 if (!gimple_omp_return_nowait_p (gsi_stmt (si)) || need_barrier)
4879 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4880 false, GSI_SAME_STMT);
4881 gsi_remove (&si, true);
4882 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4883 }
4884
4885
4886 /* Generic expansion for OpenMP synchronization directives: master,
4887 ordered and critical. All we need to do here is remove the entry
4888 and exit markers for REGION. */
4889
4890 static void
4891 expand_omp_synch (struct omp_region *region)
4892 {
4893 basic_block entry_bb, exit_bb;
4894 gimple_stmt_iterator si;
4895
4896 entry_bb = region->entry;
4897 exit_bb = region->exit;
4898
4899 si = gsi_last_bb (entry_bb);
4900 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE
4901 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER
4902 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED
4903 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL);
4904 gsi_remove (&si, true);
4905 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4906
4907 if (exit_bb)
4908 {
4909 si = gsi_last_bb (exit_bb);
4910 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4911 gsi_remove (&si, true);
4912 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4913 }
4914 }
4915
4916 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
4917 operation as a __sync_fetch_and_op builtin. INDEX is log2 of the
4918 size of the data type, and thus usable to find the index of the builtin
4919 decl. Returns false if the expression is not of the proper form. */
4920
4921 static bool
4922 expand_omp_atomic_fetch_op (basic_block load_bb,
4923 tree addr, tree loaded_val,
4924 tree stored_val, int index)
4925 {
4926 enum built_in_function base;
4927 tree decl, itype, call;
4928 direct_optab optab;
4929 tree rhs;
4930 basic_block store_bb = single_succ (load_bb);
4931 gimple_stmt_iterator gsi;
4932 gimple stmt;
4933 location_t loc;
4934
4935 /* We expect to find the following sequences:
4936
4937 load_bb:
4938 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
4939
4940 store_bb:
4941 val = tmp OP something; (or: something OP tmp)
4942 GIMPLE_OMP_STORE (val)
4943
4944 ???FIXME: Allow a more flexible sequence.
4945 Perhaps use data flow to pick the statements.
4946
4947 */
4948
4949 gsi = gsi_after_labels (store_bb);
4950 stmt = gsi_stmt (gsi);
4951 loc = gimple_location (stmt);
4952 if (!is_gimple_assign (stmt))
4953 return false;
4954 gsi_next (&gsi);
4955 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE)
4956 return false;
4957
4958 if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0))
4959 return false;
4960
4961 /* Check for one of the supported fetch-op operations. */
4962 switch (gimple_assign_rhs_code (stmt))
4963 {
4964 case PLUS_EXPR:
4965 case POINTER_PLUS_EXPR:
4966 base = BUILT_IN_FETCH_AND_ADD_N;
4967 optab = sync_add_optab;
4968 break;
4969 case MINUS_EXPR:
4970 base = BUILT_IN_FETCH_AND_SUB_N;
4971 optab = sync_add_optab;
4972 break;
4973 case BIT_AND_EXPR:
4974 base = BUILT_IN_FETCH_AND_AND_N;
4975 optab = sync_and_optab;
4976 break;
4977 case BIT_IOR_EXPR:
4978 base = BUILT_IN_FETCH_AND_OR_N;
4979 optab = sync_ior_optab;
4980 break;
4981 case BIT_XOR_EXPR:
4982 base = BUILT_IN_FETCH_AND_XOR_N;
4983 optab = sync_xor_optab;
4984 break;
4985 default:
4986 return false;
4987 }
4988 /* Make sure the expression is of the proper form. */
4989 if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0))
4990 rhs = gimple_assign_rhs2 (stmt);
4991 else if (commutative_tree_code (gimple_assign_rhs_code (stmt))
4992 && operand_equal_p (gimple_assign_rhs2 (stmt), loaded_val, 0))
4993 rhs = gimple_assign_rhs1 (stmt);
4994 else
4995 return false;
4996
4997 decl = built_in_decls[base + index + 1];
4998 itype = TREE_TYPE (TREE_TYPE (decl));
4999
5000 if (direct_optab_handler (optab, TYPE_MODE (itype)) == CODE_FOR_nothing)
5001 return false;
5002
5003 gsi = gsi_last_bb (load_bb);
5004 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD);
5005 call = build_call_expr_loc (loc,
5006 decl, 2, addr,
5007 fold_convert_loc (loc, itype, rhs));
5008 call = fold_convert_loc (loc, void_type_node, call);
5009 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
5010 gsi_remove (&gsi, true);
5011
5012 gsi = gsi_last_bb (store_bb);
5013 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
5014 gsi_remove (&gsi, true);
5015 gsi = gsi_last_bb (store_bb);
5016 gsi_remove (&gsi, true);
5017
5018 if (gimple_in_ssa_p (cfun))
5019 update_ssa (TODO_update_ssa_no_phi);
5020
5021 return true;
5022 }
5023
5024 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5025
5026 oldval = *addr;
5027 repeat:
5028 newval = rhs; // with oldval replacing *addr in rhs
5029 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
5030 if (oldval != newval)
5031 goto repeat;
5032
5033 INDEX is log2 of the size of the data type, and thus usable to find the
5034 index of the builtin decl. */
5035
5036 static bool
5037 expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
5038 tree addr, tree loaded_val, tree stored_val,
5039 int index)
5040 {
5041 tree loadedi, storedi, initial, new_storedi, old_vali;
5042 tree type, itype, cmpxchg, iaddr;
5043 gimple_stmt_iterator si;
5044 basic_block loop_header = single_succ (load_bb);
5045 gimple phi, stmt;
5046 edge e;
5047
5048 cmpxchg = built_in_decls[BUILT_IN_VAL_COMPARE_AND_SWAP_N + index + 1];
5049 type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5050 itype = TREE_TYPE (TREE_TYPE (cmpxchg));
5051
5052 if (direct_optab_handler (sync_compare_and_swap_optab, TYPE_MODE (itype))
5053 == CODE_FOR_nothing)
5054 return false;
5055
5056 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
5057 si = gsi_last_bb (load_bb);
5058 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
5059
5060 /* For floating-point values, we'll need to view-convert them to integers
5061 so that we can perform the atomic compare and swap. Simplify the
5062 following code by always setting up the "i"ntegral variables. */
5063 if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
5064 {
5065 tree iaddr_val;
5066
5067 iaddr = create_tmp_var (build_pointer_type_for_mode (itype, ptr_mode,
5068 true), NULL);
5069 iaddr_val
5070 = force_gimple_operand_gsi (&si,
5071 fold_convert (TREE_TYPE (iaddr), addr),
5072 false, NULL_TREE, true, GSI_SAME_STMT);
5073 stmt = gimple_build_assign (iaddr, iaddr_val);
5074 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5075 loadedi = create_tmp_var (itype, NULL);
5076 if (gimple_in_ssa_p (cfun))
5077 {
5078 add_referenced_var (iaddr);
5079 add_referenced_var (loadedi);
5080 loadedi = make_ssa_name (loadedi, NULL);
5081 }
5082 }
5083 else
5084 {
5085 iaddr = addr;
5086 loadedi = loaded_val;
5087 }
5088
5089 initial
5090 = force_gimple_operand_gsi (&si,
5091 build2 (MEM_REF, TREE_TYPE (TREE_TYPE (iaddr)),
5092 iaddr,
5093 build_int_cst (TREE_TYPE (iaddr), 0)),
5094 true, NULL_TREE, true, GSI_SAME_STMT);
5095
5096 /* Move the value to the LOADEDI temporary. */
5097 if (gimple_in_ssa_p (cfun))
5098 {
5099 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header)));
5100 phi = create_phi_node (loadedi, loop_header);
5101 SSA_NAME_DEF_STMT (loadedi) = phi;
5102 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
5103 initial);
5104 }
5105 else
5106 gsi_insert_before (&si,
5107 gimple_build_assign (loadedi, initial),
5108 GSI_SAME_STMT);
5109 if (loadedi != loaded_val)
5110 {
5111 gimple_stmt_iterator gsi2;
5112 tree x;
5113
5114 x = build1 (VIEW_CONVERT_EXPR, type, loadedi);
5115 gsi2 = gsi_start_bb (loop_header);
5116 if (gimple_in_ssa_p (cfun))
5117 {
5118 gimple stmt;
5119 x = force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5120 true, GSI_SAME_STMT);
5121 stmt = gimple_build_assign (loaded_val, x);
5122 gsi_insert_before (&gsi2, stmt, GSI_SAME_STMT);
5123 }
5124 else
5125 {
5126 x = build2 (MODIFY_EXPR, TREE_TYPE (loaded_val), loaded_val, x);
5127 force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5128 true, GSI_SAME_STMT);
5129 }
5130 }
5131 gsi_remove (&si, true);
5132
5133 si = gsi_last_bb (store_bb);
5134 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
5135
5136 if (iaddr == addr)
5137 storedi = stored_val;
5138 else
5139 storedi =
5140 force_gimple_operand_gsi (&si,
5141 build1 (VIEW_CONVERT_EXPR, itype,
5142 stored_val), true, NULL_TREE, true,
5143 GSI_SAME_STMT);
5144
5145 /* Build the compare&swap statement. */
5146 new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
5147 new_storedi = force_gimple_operand_gsi (&si,
5148 fold_convert (TREE_TYPE (loadedi),
5149 new_storedi),
5150 true, NULL_TREE,
5151 true, GSI_SAME_STMT);
5152
5153 if (gimple_in_ssa_p (cfun))
5154 old_vali = loadedi;
5155 else
5156 {
5157 old_vali = create_tmp_var (TREE_TYPE (loadedi), NULL);
5158 if (gimple_in_ssa_p (cfun))
5159 add_referenced_var (old_vali);
5160 stmt = gimple_build_assign (old_vali, loadedi);
5161 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5162
5163 stmt = gimple_build_assign (loadedi, new_storedi);
5164 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5165 }
5166
5167 /* Note that we always perform the comparison as an integer, even for
5168 floating point. This allows the atomic operation to properly
5169 succeed even with NaNs and -0.0. */
5170 stmt = gimple_build_cond_empty
5171 (build2 (NE_EXPR, boolean_type_node,
5172 new_storedi, old_vali));
5173 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5174
5175 /* Update cfg. */
5176 e = single_succ_edge (store_bb);
5177 e->flags &= ~EDGE_FALLTHRU;
5178 e->flags |= EDGE_FALSE_VALUE;
5179
5180 e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
5181
5182 /* Copy the new value to loadedi (we already did that before the condition
5183 if we are not in SSA). */
5184 if (gimple_in_ssa_p (cfun))
5185 {
5186 phi = gimple_seq_first_stmt (phi_nodes (loop_header));
5187 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi);
5188 }
5189
5190 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
5191 gsi_remove (&si, true);
5192
5193 if (gimple_in_ssa_p (cfun))
5194 update_ssa (TODO_update_ssa_no_phi);
5195
5196 return true;
5197 }
5198
5199 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5200
5201 GOMP_atomic_start ();
5202 *addr = rhs;
5203 GOMP_atomic_end ();
5204
5205 The result is not globally atomic, but works so long as all parallel
5206 references are within #pragma omp atomic directives. According to
5207 responses received from omp@openmp.org, appears to be within spec.
5208 Which makes sense, since that's how several other compilers handle
5209 this situation as well.
5210 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
5211 expanding. STORED_VAL is the operand of the matching
5212 GIMPLE_OMP_ATOMIC_STORE.
5213
5214 We replace
5215 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
5216 loaded_val = *addr;
5217
5218 and replace
5219 GIMPLE_OMP_ATOMIC_ATORE (stored_val) with
5220 *addr = stored_val;
5221 */
5222
5223 static bool
5224 expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
5225 tree addr, tree loaded_val, tree stored_val)
5226 {
5227 gimple_stmt_iterator si;
5228 gimple stmt;
5229 tree t;
5230
5231 si = gsi_last_bb (load_bb);
5232 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
5233
5234 t = built_in_decls[BUILT_IN_GOMP_ATOMIC_START];
5235 t = build_function_call_expr (UNKNOWN_LOCATION, t, 0);
5236 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5237
5238 stmt = gimple_build_assign (loaded_val, build_simple_mem_ref (addr));
5239 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5240 gsi_remove (&si, true);
5241
5242 si = gsi_last_bb (store_bb);
5243 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
5244
5245 stmt = gimple_build_assign (build_simple_mem_ref (unshare_expr (addr)),
5246 stored_val);
5247 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5248
5249 t = built_in_decls[BUILT_IN_GOMP_ATOMIC_END];
5250 t = build_function_call_expr (UNKNOWN_LOCATION, t, 0);
5251 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5252 gsi_remove (&si, true);
5253
5254 if (gimple_in_ssa_p (cfun))
5255 update_ssa (TODO_update_ssa_no_phi);
5256 return true;
5257 }
5258
5259 /* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
5260 using expand_omp_atomic_fetch_op. If it failed, we try to
5261 call expand_omp_atomic_pipeline, and if it fails too, the
5262 ultimate fallback is wrapping the operation in a mutex
5263 (expand_omp_atomic_mutex). REGION is the atomic region built
5264 by build_omp_regions_1(). */
5265
5266 static void
5267 expand_omp_atomic (struct omp_region *region)
5268 {
5269 basic_block load_bb = region->entry, store_bb = region->exit;
5270 gimple load = last_stmt (load_bb), store = last_stmt (store_bb);
5271 tree loaded_val = gimple_omp_atomic_load_lhs (load);
5272 tree addr = gimple_omp_atomic_load_rhs (load);
5273 tree stored_val = gimple_omp_atomic_store_val (store);
5274 tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5275 HOST_WIDE_INT index;
5276
5277 /* Make sure the type is one of the supported sizes. */
5278 index = tree_low_cst (TYPE_SIZE_UNIT (type), 1);
5279 index = exact_log2 (index);
5280 if (index >= 0 && index <= 4)
5281 {
5282 unsigned int align = TYPE_ALIGN_UNIT (type);
5283
5284 /* __sync builtins require strict data alignment. */
5285 if (exact_log2 (align) >= index)
5286 {
5287 /* When possible, use specialized atomic update functions. */
5288 if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
5289 && store_bb == single_succ (load_bb))
5290 {
5291 if (expand_omp_atomic_fetch_op (load_bb, addr,
5292 loaded_val, stored_val, index))
5293 return;
5294 }
5295
5296 /* If we don't have specialized __sync builtins, try and implement
5297 as a compare and swap loop. */
5298 if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
5299 loaded_val, stored_val, index))
5300 return;
5301 }
5302 }
5303
5304 /* The ultimate fallback is wrapping the operation in a mutex. */
5305 expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
5306 }
5307
5308
5309 /* Expand the parallel region tree rooted at REGION. Expansion
5310 proceeds in depth-first order. Innermost regions are expanded
5311 first. This way, parallel regions that require a new function to
5312 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
5313 internal dependencies in their body. */
5314
5315 static void
5316 expand_omp (struct omp_region *region)
5317 {
5318 while (region)
5319 {
5320 location_t saved_location;
5321
5322 /* First, determine whether this is a combined parallel+workshare
5323 region. */
5324 if (region->type == GIMPLE_OMP_PARALLEL)
5325 determine_parallel_type (region);
5326
5327 if (region->inner)
5328 expand_omp (region->inner);
5329
5330 saved_location = input_location;
5331 if (gimple_has_location (last_stmt (region->entry)))
5332 input_location = gimple_location (last_stmt (region->entry));
5333
5334 switch (region->type)
5335 {
5336 case GIMPLE_OMP_PARALLEL:
5337 case GIMPLE_OMP_TASK:
5338 expand_omp_taskreg (region);
5339 break;
5340
5341 case GIMPLE_OMP_FOR:
5342 expand_omp_for (region);
5343 break;
5344
5345 case GIMPLE_OMP_SECTIONS:
5346 expand_omp_sections (region);
5347 break;
5348
5349 case GIMPLE_OMP_SECTION:
5350 /* Individual omp sections are handled together with their
5351 parent GIMPLE_OMP_SECTIONS region. */
5352 break;
5353
5354 case GIMPLE_OMP_SINGLE:
5355 expand_omp_single (region);
5356 break;
5357
5358 case GIMPLE_OMP_MASTER:
5359 case GIMPLE_OMP_ORDERED:
5360 case GIMPLE_OMP_CRITICAL:
5361 expand_omp_synch (region);
5362 break;
5363
5364 case GIMPLE_OMP_ATOMIC_LOAD:
5365 expand_omp_atomic (region);
5366 break;
5367
5368 default:
5369 gcc_unreachable ();
5370 }
5371
5372 input_location = saved_location;
5373 region = region->next;
5374 }
5375 }
5376
5377
5378 /* Helper for build_omp_regions. Scan the dominator tree starting at
5379 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
5380 true, the function ends once a single tree is built (otherwise, whole
5381 forest of OMP constructs may be built). */
5382
5383 static void
5384 build_omp_regions_1 (basic_block bb, struct omp_region *parent,
5385 bool single_tree)
5386 {
5387 gimple_stmt_iterator gsi;
5388 gimple stmt;
5389 basic_block son;
5390
5391 gsi = gsi_last_bb (bb);
5392 if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi)))
5393 {
5394 struct omp_region *region;
5395 enum gimple_code code;
5396
5397 stmt = gsi_stmt (gsi);
5398 code = gimple_code (stmt);
5399 if (code == GIMPLE_OMP_RETURN)
5400 {
5401 /* STMT is the return point out of region PARENT. Mark it
5402 as the exit point and make PARENT the immediately
5403 enclosing region. */
5404 gcc_assert (parent);
5405 region = parent;
5406 region->exit = bb;
5407 parent = parent->outer;
5408 }
5409 else if (code == GIMPLE_OMP_ATOMIC_STORE)
5410 {
5411 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
5412 GIMPLE_OMP_RETURN, but matches with
5413 GIMPLE_OMP_ATOMIC_LOAD. */
5414 gcc_assert (parent);
5415 gcc_assert (parent->type == GIMPLE_OMP_ATOMIC_LOAD);
5416 region = parent;
5417 region->exit = bb;
5418 parent = parent->outer;
5419 }
5420
5421 else if (code == GIMPLE_OMP_CONTINUE)
5422 {
5423 gcc_assert (parent);
5424 parent->cont = bb;
5425 }
5426 else if (code == GIMPLE_OMP_SECTIONS_SWITCH)
5427 {
5428 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
5429 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
5430 ;
5431 }
5432 else
5433 {
5434 /* Otherwise, this directive becomes the parent for a new
5435 region. */
5436 region = new_omp_region (bb, code, parent);
5437 parent = region;
5438 }
5439 }
5440
5441 if (single_tree && !parent)
5442 return;
5443
5444 for (son = first_dom_son (CDI_DOMINATORS, bb);
5445 son;
5446 son = next_dom_son (CDI_DOMINATORS, son))
5447 build_omp_regions_1 (son, parent, single_tree);
5448 }
5449
5450 /* Builds the tree of OMP regions rooted at ROOT, storing it to
5451 root_omp_region. */
5452
5453 static void
5454 build_omp_regions_root (basic_block root)
5455 {
5456 gcc_assert (root_omp_region == NULL);
5457 build_omp_regions_1 (root, NULL, true);
5458 gcc_assert (root_omp_region != NULL);
5459 }
5460
5461 /* Expands omp construct (and its subconstructs) starting in HEAD. */
5462
5463 void
5464 omp_expand_local (basic_block head)
5465 {
5466 build_omp_regions_root (head);
5467 if (dump_file && (dump_flags & TDF_DETAILS))
5468 {
5469 fprintf (dump_file, "\nOMP region tree\n\n");
5470 dump_omp_region (dump_file, root_omp_region, 0);
5471 fprintf (dump_file, "\n");
5472 }
5473
5474 remove_exit_barriers (root_omp_region);
5475 expand_omp (root_omp_region);
5476
5477 free_omp_regions ();
5478 }
5479
5480 /* Scan the CFG and build a tree of OMP regions. Return the root of
5481 the OMP region tree. */
5482
5483 static void
5484 build_omp_regions (void)
5485 {
5486 gcc_assert (root_omp_region == NULL);
5487 calculate_dominance_info (CDI_DOMINATORS);
5488 build_omp_regions_1 (ENTRY_BLOCK_PTR, NULL, false);
5489 }
5490
5491 /* Main entry point for expanding OMP-GIMPLE into runtime calls. */
5492
5493 static unsigned int
5494 execute_expand_omp (void)
5495 {
5496 build_omp_regions ();
5497
5498 if (!root_omp_region)
5499 return 0;
5500
5501 if (dump_file)
5502 {
5503 fprintf (dump_file, "\nOMP region tree\n\n");
5504 dump_omp_region (dump_file, root_omp_region, 0);
5505 fprintf (dump_file, "\n");
5506 }
5507
5508 remove_exit_barriers (root_omp_region);
5509
5510 expand_omp (root_omp_region);
5511
5512 cleanup_tree_cfg ();
5513
5514 free_omp_regions ();
5515
5516 return 0;
5517 }
5518
5519 /* OMP expansion -- the default pass, run before creation of SSA form. */
5520
5521 static bool
5522 gate_expand_omp (void)
5523 {
5524 return (flag_openmp != 0 && !seen_error ());
5525 }
5526
5527 struct gimple_opt_pass pass_expand_omp =
5528 {
5529 {
5530 GIMPLE_PASS,
5531 "ompexp", /* name */
5532 gate_expand_omp, /* gate */
5533 execute_expand_omp, /* execute */
5534 NULL, /* sub */
5535 NULL, /* next */
5536 0, /* static_pass_number */
5537 TV_NONE, /* tv_id */
5538 PROP_gimple_any, /* properties_required */
5539 0, /* properties_provided */
5540 0, /* properties_destroyed */
5541 0, /* todo_flags_start */
5542 TODO_dump_func /* todo_flags_finish */
5543 }
5544 };
5545 \f
5546 /* Routines to lower OpenMP directives into OMP-GIMPLE. */
5547
5548 /* Lower the OpenMP sections directive in the current statement in GSI_P.
5549 CTX is the enclosing OMP context for the current statement. */
5550
5551 static void
5552 lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5553 {
5554 tree block, control;
5555 gimple_stmt_iterator tgsi;
5556 unsigned i, len;
5557 gimple stmt, new_stmt, bind, t;
5558 gimple_seq ilist, dlist, olist, new_body, body;
5559 struct gimplify_ctx gctx;
5560
5561 stmt = gsi_stmt (*gsi_p);
5562
5563 push_gimplify_context (&gctx);
5564
5565 dlist = NULL;
5566 ilist = NULL;
5567 lower_rec_input_clauses (gimple_omp_sections_clauses (stmt),
5568 &ilist, &dlist, ctx);
5569
5570 tgsi = gsi_start (gimple_omp_body (stmt));
5571 for (len = 0; !gsi_end_p (tgsi); len++, gsi_next (&tgsi))
5572 continue;
5573
5574 tgsi = gsi_start (gimple_omp_body (stmt));
5575 body = NULL;
5576 for (i = 0; i < len; i++, gsi_next (&tgsi))
5577 {
5578 omp_context *sctx;
5579 gimple sec_start;
5580
5581 sec_start = gsi_stmt (tgsi);
5582 sctx = maybe_lookup_ctx (sec_start);
5583 gcc_assert (sctx);
5584
5585 gimple_seq_add_stmt (&body, sec_start);
5586
5587 lower_omp (gimple_omp_body (sec_start), sctx);
5588 gimple_seq_add_seq (&body, gimple_omp_body (sec_start));
5589 gimple_omp_set_body (sec_start, NULL);
5590
5591 if (i == len - 1)
5592 {
5593 gimple_seq l = NULL;
5594 lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt), NULL,
5595 &l, ctx);
5596 gimple_seq_add_seq (&body, l);
5597 gimple_omp_section_set_last (sec_start);
5598 }
5599
5600 gimple_seq_add_stmt (&body, gimple_build_omp_return (false));
5601 }
5602
5603 block = make_node (BLOCK);
5604 bind = gimple_build_bind (NULL, body, block);
5605
5606 olist = NULL;
5607 lower_reduction_clauses (gimple_omp_sections_clauses (stmt), &olist, ctx);
5608
5609 block = make_node (BLOCK);
5610 new_stmt = gimple_build_bind (NULL, NULL, block);
5611
5612 pop_gimplify_context (new_stmt);
5613 gimple_bind_append_vars (new_stmt, ctx->block_vars);
5614 BLOCK_VARS (block) = gimple_bind_vars (bind);
5615 if (BLOCK_VARS (block))
5616 TREE_USED (block) = 1;
5617
5618 new_body = NULL;
5619 gimple_seq_add_seq (&new_body, ilist);
5620 gimple_seq_add_stmt (&new_body, stmt);
5621 gimple_seq_add_stmt (&new_body, gimple_build_omp_sections_switch ());
5622 gimple_seq_add_stmt (&new_body, bind);
5623
5624 control = create_tmp_var (unsigned_type_node, ".section");
5625 t = gimple_build_omp_continue (control, control);
5626 gimple_omp_sections_set_control (stmt, control);
5627 gimple_seq_add_stmt (&new_body, t);
5628
5629 gimple_seq_add_seq (&new_body, olist);
5630 gimple_seq_add_seq (&new_body, dlist);
5631
5632 new_body = maybe_catch_exception (new_body);
5633
5634 t = gimple_build_omp_return
5635 (!!find_omp_clause (gimple_omp_sections_clauses (stmt),
5636 OMP_CLAUSE_NOWAIT));
5637 gimple_seq_add_stmt (&new_body, t);
5638
5639 gimple_bind_set_body (new_stmt, new_body);
5640 gimple_omp_set_body (stmt, NULL);
5641
5642 gsi_replace (gsi_p, new_stmt, true);
5643 }
5644
5645
5646 /* A subroutine of lower_omp_single. Expand the simple form of
5647 a GIMPLE_OMP_SINGLE, without a copyprivate clause:
5648
5649 if (GOMP_single_start ())
5650 BODY;
5651 [ GOMP_barrier (); ] -> unless 'nowait' is present.
5652
5653 FIXME. It may be better to delay expanding the logic of this until
5654 pass_expand_omp. The expanded logic may make the job more difficult
5655 to a synchronization analysis pass. */
5656
5657 static void
5658 lower_omp_single_simple (gimple single_stmt, gimple_seq *pre_p)
5659 {
5660 location_t loc = gimple_location (single_stmt);
5661 tree tlabel = create_artificial_label (loc);
5662 tree flabel = create_artificial_label (loc);
5663 gimple call, cond;
5664 tree lhs, decl;
5665
5666 decl = built_in_decls[BUILT_IN_GOMP_SINGLE_START];
5667 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (decl)), NULL);
5668 call = gimple_build_call (decl, 0);
5669 gimple_call_set_lhs (call, lhs);
5670 gimple_seq_add_stmt (pre_p, call);
5671
5672 cond = gimple_build_cond (EQ_EXPR, lhs,
5673 fold_convert_loc (loc, TREE_TYPE (lhs),
5674 boolean_true_node),
5675 tlabel, flabel);
5676 gimple_seq_add_stmt (pre_p, cond);
5677 gimple_seq_add_stmt (pre_p, gimple_build_label (tlabel));
5678 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5679 gimple_seq_add_stmt (pre_p, gimple_build_label (flabel));
5680 }
5681
5682
5683 /* A subroutine of lower_omp_single. Expand the simple form of
5684 a GIMPLE_OMP_SINGLE, with a copyprivate clause:
5685
5686 #pragma omp single copyprivate (a, b, c)
5687
5688 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
5689
5690 {
5691 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
5692 {
5693 BODY;
5694 copyout.a = a;
5695 copyout.b = b;
5696 copyout.c = c;
5697 GOMP_single_copy_end (&copyout);
5698 }
5699 else
5700 {
5701 a = copyout_p->a;
5702 b = copyout_p->b;
5703 c = copyout_p->c;
5704 }
5705 GOMP_barrier ();
5706 }
5707
5708 FIXME. It may be better to delay expanding the logic of this until
5709 pass_expand_omp. The expanded logic may make the job more difficult
5710 to a synchronization analysis pass. */
5711
5712 static void
5713 lower_omp_single_copy (gimple single_stmt, gimple_seq *pre_p, omp_context *ctx)
5714 {
5715 tree ptr_type, t, l0, l1, l2;
5716 gimple_seq copyin_seq;
5717 location_t loc = gimple_location (single_stmt);
5718
5719 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
5720
5721 ptr_type = build_pointer_type (ctx->record_type);
5722 ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
5723
5724 l0 = create_artificial_label (loc);
5725 l1 = create_artificial_label (loc);
5726 l2 = create_artificial_label (loc);
5727
5728 t = build_call_expr_loc (loc, built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_START], 0);
5729 t = fold_convert_loc (loc, ptr_type, t);
5730 gimplify_assign (ctx->receiver_decl, t, pre_p);
5731
5732 t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
5733 build_int_cst (ptr_type, 0));
5734 t = build3 (COND_EXPR, void_type_node, t,
5735 build_and_jump (&l0), build_and_jump (&l1));
5736 gimplify_and_add (t, pre_p);
5737
5738 gimple_seq_add_stmt (pre_p, gimple_build_label (l0));
5739
5740 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5741
5742 copyin_seq = NULL;
5743 lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt), pre_p,
5744 &copyin_seq, ctx);
5745
5746 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
5747 t = build_call_expr_loc (loc, built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_END],
5748 1, t);
5749 gimplify_and_add (t, pre_p);
5750
5751 t = build_and_jump (&l2);
5752 gimplify_and_add (t, pre_p);
5753
5754 gimple_seq_add_stmt (pre_p, gimple_build_label (l1));
5755
5756 gimple_seq_add_seq (pre_p, copyin_seq);
5757
5758 gimple_seq_add_stmt (pre_p, gimple_build_label (l2));
5759 }
5760
5761
5762 /* Expand code for an OpenMP single directive. */
5763
5764 static void
5765 lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5766 {
5767 tree block;
5768 gimple t, bind, single_stmt = gsi_stmt (*gsi_p);
5769 gimple_seq bind_body, dlist;
5770 struct gimplify_ctx gctx;
5771
5772 push_gimplify_context (&gctx);
5773
5774 bind_body = NULL;
5775 lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt),
5776 &bind_body, &dlist, ctx);
5777 lower_omp (gimple_omp_body (single_stmt), ctx);
5778
5779 gimple_seq_add_stmt (&bind_body, single_stmt);
5780
5781 if (ctx->record_type)
5782 lower_omp_single_copy (single_stmt, &bind_body, ctx);
5783 else
5784 lower_omp_single_simple (single_stmt, &bind_body);
5785
5786 gimple_omp_set_body (single_stmt, NULL);
5787
5788 gimple_seq_add_seq (&bind_body, dlist);
5789
5790 bind_body = maybe_catch_exception (bind_body);
5791
5792 t = gimple_build_omp_return
5793 (!!find_omp_clause (gimple_omp_single_clauses (single_stmt),
5794 OMP_CLAUSE_NOWAIT));
5795 gimple_seq_add_stmt (&bind_body, t);
5796
5797 block = make_node (BLOCK);
5798 bind = gimple_build_bind (NULL, bind_body, block);
5799
5800 pop_gimplify_context (bind);
5801
5802 gimple_bind_append_vars (bind, ctx->block_vars);
5803 BLOCK_VARS (block) = ctx->block_vars;
5804 gsi_replace (gsi_p, bind, true);
5805 if (BLOCK_VARS (block))
5806 TREE_USED (block) = 1;
5807 }
5808
5809
5810 /* Expand code for an OpenMP master directive. */
5811
5812 static void
5813 lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5814 {
5815 tree block, lab = NULL, x;
5816 gimple stmt = gsi_stmt (*gsi_p), bind;
5817 location_t loc = gimple_location (stmt);
5818 gimple_seq tseq;
5819 struct gimplify_ctx gctx;
5820
5821 push_gimplify_context (&gctx);
5822
5823 block = make_node (BLOCK);
5824 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
5825 block);
5826
5827 x = build_call_expr_loc (loc, built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
5828 x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
5829 x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
5830 tseq = NULL;
5831 gimplify_and_add (x, &tseq);
5832 gimple_bind_add_seq (bind, tseq);
5833
5834 lower_omp (gimple_omp_body (stmt), ctx);
5835 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5836 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5837 gimple_omp_set_body (stmt, NULL);
5838
5839 gimple_bind_add_stmt (bind, gimple_build_label (lab));
5840
5841 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
5842
5843 pop_gimplify_context (bind);
5844
5845 gimple_bind_append_vars (bind, ctx->block_vars);
5846 BLOCK_VARS (block) = ctx->block_vars;
5847 gsi_replace (gsi_p, bind, true);
5848 }
5849
5850
5851 /* Expand code for an OpenMP ordered directive. */
5852
5853 static void
5854 lower_omp_ordered (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5855 {
5856 tree block;
5857 gimple stmt = gsi_stmt (*gsi_p), bind, x;
5858 struct gimplify_ctx gctx;
5859
5860 push_gimplify_context (&gctx);
5861
5862 block = make_node (BLOCK);
5863 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
5864 block);
5865
5866 x = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ORDERED_START], 0);
5867 gimple_bind_add_stmt (bind, x);
5868
5869 lower_omp (gimple_omp_body (stmt), ctx);
5870 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5871 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5872 gimple_omp_set_body (stmt, NULL);
5873
5874 x = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ORDERED_END], 0);
5875 gimple_bind_add_stmt (bind, x);
5876
5877 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
5878
5879 pop_gimplify_context (bind);
5880
5881 gimple_bind_append_vars (bind, ctx->block_vars);
5882 BLOCK_VARS (block) = gimple_bind_vars (bind);
5883 gsi_replace (gsi_p, bind, true);
5884 }
5885
5886
5887 /* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
5888 substitution of a couple of function calls. But in the NAMED case,
5889 requires that languages coordinate a symbol name. It is therefore
5890 best put here in common code. */
5891
5892 static GTY((param1_is (tree), param2_is (tree)))
5893 splay_tree critical_name_mutexes;
5894
5895 static void
5896 lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5897 {
5898 tree block;
5899 tree name, lock, unlock;
5900 gimple stmt = gsi_stmt (*gsi_p), bind;
5901 location_t loc = gimple_location (stmt);
5902 gimple_seq tbody;
5903 struct gimplify_ctx gctx;
5904
5905 name = gimple_omp_critical_name (stmt);
5906 if (name)
5907 {
5908 tree decl;
5909 splay_tree_node n;
5910
5911 if (!critical_name_mutexes)
5912 critical_name_mutexes
5913 = splay_tree_new_ggc (splay_tree_compare_pointers,
5914 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_s,
5915 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_node_s);
5916
5917 n = splay_tree_lookup (critical_name_mutexes, (splay_tree_key) name);
5918 if (n == NULL)
5919 {
5920 char *new_str;
5921
5922 decl = create_tmp_var_raw (ptr_type_node, NULL);
5923
5924 new_str = ACONCAT ((".gomp_critical_user_",
5925 IDENTIFIER_POINTER (name), NULL));
5926 DECL_NAME (decl) = get_identifier (new_str);
5927 TREE_PUBLIC (decl) = 1;
5928 TREE_STATIC (decl) = 1;
5929 DECL_COMMON (decl) = 1;
5930 DECL_ARTIFICIAL (decl) = 1;
5931 DECL_IGNORED_P (decl) = 1;
5932 varpool_finalize_decl (decl);
5933
5934 splay_tree_insert (critical_name_mutexes, (splay_tree_key) name,
5935 (splay_tree_value) decl);
5936 }
5937 else
5938 decl = (tree) n->value;
5939
5940 lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_START];
5941 lock = build_call_expr_loc (loc, lock, 1, build_fold_addr_expr_loc (loc, decl));
5942
5943 unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_END];
5944 unlock = build_call_expr_loc (loc, unlock, 1,
5945 build_fold_addr_expr_loc (loc, decl));
5946 }
5947 else
5948 {
5949 lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_START];
5950 lock = build_call_expr_loc (loc, lock, 0);
5951
5952 unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_END];
5953 unlock = build_call_expr_loc (loc, unlock, 0);
5954 }
5955
5956 push_gimplify_context (&gctx);
5957
5958 block = make_node (BLOCK);
5959 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt), block);
5960
5961 tbody = gimple_bind_body (bind);
5962 gimplify_and_add (lock, &tbody);
5963 gimple_bind_set_body (bind, tbody);
5964
5965 lower_omp (gimple_omp_body (stmt), ctx);
5966 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5967 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5968 gimple_omp_set_body (stmt, NULL);
5969
5970 tbody = gimple_bind_body (bind);
5971 gimplify_and_add (unlock, &tbody);
5972 gimple_bind_set_body (bind, tbody);
5973
5974 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
5975
5976 pop_gimplify_context (bind);
5977 gimple_bind_append_vars (bind, ctx->block_vars);
5978 BLOCK_VARS (block) = gimple_bind_vars (bind);
5979 gsi_replace (gsi_p, bind, true);
5980 }
5981
5982
5983 /* A subroutine of lower_omp_for. Generate code to emit the predicate
5984 for a lastprivate clause. Given a loop control predicate of (V
5985 cond N2), we gate the clause on (!(V cond N2)). The lowered form
5986 is appended to *DLIST, iterator initialization is appended to
5987 *BODY_P. */
5988
5989 static void
5990 lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p,
5991 gimple_seq *dlist, struct omp_context *ctx)
5992 {
5993 tree clauses, cond, vinit;
5994 enum tree_code cond_code;
5995 gimple_seq stmts;
5996
5997 cond_code = fd->loop.cond_code;
5998 cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
5999
6000 /* When possible, use a strict equality expression. This can let VRP
6001 type optimizations deduce the value and remove a copy. */
6002 if (host_integerp (fd->loop.step, 0))
6003 {
6004 HOST_WIDE_INT step = TREE_INT_CST_LOW (fd->loop.step);
6005 if (step == 1 || step == -1)
6006 cond_code = EQ_EXPR;
6007 }
6008
6009 cond = build2 (cond_code, boolean_type_node, fd->loop.v, fd->loop.n2);
6010
6011 clauses = gimple_omp_for_clauses (fd->for_stmt);
6012 stmts = NULL;
6013 lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
6014 if (!gimple_seq_empty_p (stmts))
6015 {
6016 gimple_seq_add_seq (&stmts, *dlist);
6017 *dlist = stmts;
6018
6019 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
6020 vinit = fd->loop.n1;
6021 if (cond_code == EQ_EXPR
6022 && host_integerp (fd->loop.n2, 0)
6023 && ! integer_zerop (fd->loop.n2))
6024 vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0);
6025
6026 /* Initialize the iterator variable, so that threads that don't execute
6027 any iterations don't execute the lastprivate clauses by accident. */
6028 gimplify_assign (fd->loop.v, vinit, body_p);
6029 }
6030 }
6031
6032
6033 /* Lower code for an OpenMP loop directive. */
6034
6035 static void
6036 lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6037 {
6038 tree *rhs_p, block;
6039 struct omp_for_data fd;
6040 gimple stmt = gsi_stmt (*gsi_p), new_stmt;
6041 gimple_seq omp_for_body, body, dlist;
6042 size_t i;
6043 struct gimplify_ctx gctx;
6044
6045 push_gimplify_context (&gctx);
6046
6047 lower_omp (gimple_omp_for_pre_body (stmt), ctx);
6048 lower_omp (gimple_omp_body (stmt), ctx);
6049
6050 block = make_node (BLOCK);
6051 new_stmt = gimple_build_bind (NULL, NULL, block);
6052
6053 /* Move declaration of temporaries in the loop body before we make
6054 it go away. */
6055 omp_for_body = gimple_omp_body (stmt);
6056 if (!gimple_seq_empty_p (omp_for_body)
6057 && gimple_code (gimple_seq_first_stmt (omp_for_body)) == GIMPLE_BIND)
6058 {
6059 tree vars = gimple_bind_vars (gimple_seq_first_stmt (omp_for_body));
6060 gimple_bind_append_vars (new_stmt, vars);
6061 }
6062
6063 /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
6064 dlist = NULL;
6065 body = NULL;
6066 lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx);
6067 gimple_seq_add_seq (&body, gimple_omp_for_pre_body (stmt));
6068
6069 /* Lower the header expressions. At this point, we can assume that
6070 the header is of the form:
6071
6072 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
6073
6074 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
6075 using the .omp_data_s mapping, if needed. */
6076 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
6077 {
6078 rhs_p = gimple_omp_for_initial_ptr (stmt, i);
6079 if (!is_gimple_min_invariant (*rhs_p))
6080 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6081
6082 rhs_p = gimple_omp_for_final_ptr (stmt, i);
6083 if (!is_gimple_min_invariant (*rhs_p))
6084 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6085
6086 rhs_p = &TREE_OPERAND (gimple_omp_for_incr (stmt, i), 1);
6087 if (!is_gimple_min_invariant (*rhs_p))
6088 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6089 }
6090
6091 /* Once lowered, extract the bounds and clauses. */
6092 extract_omp_for_data (stmt, &fd, NULL);
6093
6094 lower_omp_for_lastprivate (&fd, &body, &dlist, ctx);
6095
6096 gimple_seq_add_stmt (&body, stmt);
6097 gimple_seq_add_seq (&body, gimple_omp_body (stmt));
6098
6099 gimple_seq_add_stmt (&body, gimple_build_omp_continue (fd.loop.v,
6100 fd.loop.v));
6101
6102 /* After the loop, add exit clauses. */
6103 lower_reduction_clauses (gimple_omp_for_clauses (stmt), &body, ctx);
6104 gimple_seq_add_seq (&body, dlist);
6105
6106 body = maybe_catch_exception (body);
6107
6108 /* Region exit marker goes at the end of the loop body. */
6109 gimple_seq_add_stmt (&body, gimple_build_omp_return (fd.have_nowait));
6110
6111 pop_gimplify_context (new_stmt);
6112
6113 gimple_bind_append_vars (new_stmt, ctx->block_vars);
6114 BLOCK_VARS (block) = gimple_bind_vars (new_stmt);
6115 if (BLOCK_VARS (block))
6116 TREE_USED (block) = 1;
6117
6118 gimple_bind_set_body (new_stmt, body);
6119 gimple_omp_set_body (stmt, NULL);
6120 gimple_omp_for_set_pre_body (stmt, NULL);
6121 gsi_replace (gsi_p, new_stmt, true);
6122 }
6123
6124 /* Callback for walk_stmts. Check if the current statement only contains
6125 GIMPLE_OMP_FOR or GIMPLE_OMP_PARALLEL. */
6126
6127 static tree
6128 check_combined_parallel (gimple_stmt_iterator *gsi_p,
6129 bool *handled_ops_p,
6130 struct walk_stmt_info *wi)
6131 {
6132 int *info = (int *) wi->info;
6133 gimple stmt = gsi_stmt (*gsi_p);
6134
6135 *handled_ops_p = true;
6136 switch (gimple_code (stmt))
6137 {
6138 WALK_SUBSTMTS;
6139
6140 case GIMPLE_OMP_FOR:
6141 case GIMPLE_OMP_SECTIONS:
6142 *info = *info == 0 ? 1 : -1;
6143 break;
6144 default:
6145 *info = -1;
6146 break;
6147 }
6148 return NULL;
6149 }
6150
6151 struct omp_taskcopy_context
6152 {
6153 /* This field must be at the beginning, as we do "inheritance": Some
6154 callback functions for tree-inline.c (e.g., omp_copy_decl)
6155 receive a copy_body_data pointer that is up-casted to an
6156 omp_context pointer. */
6157 copy_body_data cb;
6158 omp_context *ctx;
6159 };
6160
6161 static tree
6162 task_copyfn_copy_decl (tree var, copy_body_data *cb)
6163 {
6164 struct omp_taskcopy_context *tcctx = (struct omp_taskcopy_context *) cb;
6165
6166 if (splay_tree_lookup (tcctx->ctx->sfield_map, (splay_tree_key) var))
6167 return create_tmp_var (TREE_TYPE (var), NULL);
6168
6169 return var;
6170 }
6171
6172 static tree
6173 task_copyfn_remap_type (struct omp_taskcopy_context *tcctx, tree orig_type)
6174 {
6175 tree name, new_fields = NULL, type, f;
6176
6177 type = lang_hooks.types.make_type (RECORD_TYPE);
6178 name = DECL_NAME (TYPE_NAME (orig_type));
6179 name = build_decl (gimple_location (tcctx->ctx->stmt),
6180 TYPE_DECL, name, type);
6181 TYPE_NAME (type) = name;
6182
6183 for (f = TYPE_FIELDS (orig_type); f ; f = TREE_CHAIN (f))
6184 {
6185 tree new_f = copy_node (f);
6186 DECL_CONTEXT (new_f) = type;
6187 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &tcctx->cb);
6188 TREE_CHAIN (new_f) = new_fields;
6189 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6190 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6191 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
6192 &tcctx->cb, NULL);
6193 new_fields = new_f;
6194 *pointer_map_insert (tcctx->cb.decl_map, f) = new_f;
6195 }
6196 TYPE_FIELDS (type) = nreverse (new_fields);
6197 layout_type (type);
6198 return type;
6199 }
6200
6201 /* Create task copyfn. */
6202
6203 static void
6204 create_task_copyfn (gimple task_stmt, omp_context *ctx)
6205 {
6206 struct function *child_cfun;
6207 tree child_fn, t, c, src, dst, f, sf, arg, sarg, decl;
6208 tree record_type, srecord_type, bind, list;
6209 bool record_needs_remap = false, srecord_needs_remap = false;
6210 splay_tree_node n;
6211 struct omp_taskcopy_context tcctx;
6212 struct gimplify_ctx gctx;
6213 location_t loc = gimple_location (task_stmt);
6214
6215 child_fn = gimple_omp_task_copy_fn (task_stmt);
6216 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
6217 gcc_assert (child_cfun->cfg == NULL);
6218 child_cfun->dont_save_pending_sizes_p = 1;
6219 DECL_SAVED_TREE (child_fn) = alloc_stmt_list ();
6220
6221 /* Reset DECL_CONTEXT on function arguments. */
6222 for (t = DECL_ARGUMENTS (child_fn); t; t = TREE_CHAIN (t))
6223 DECL_CONTEXT (t) = child_fn;
6224
6225 /* Populate the function. */
6226 push_gimplify_context (&gctx);
6227 current_function_decl = child_fn;
6228
6229 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
6230 TREE_SIDE_EFFECTS (bind) = 1;
6231 list = NULL;
6232 DECL_SAVED_TREE (child_fn) = bind;
6233 DECL_SOURCE_LOCATION (child_fn) = gimple_location (task_stmt);
6234
6235 /* Remap src and dst argument types if needed. */
6236 record_type = ctx->record_type;
6237 srecord_type = ctx->srecord_type;
6238 for (f = TYPE_FIELDS (record_type); f ; f = TREE_CHAIN (f))
6239 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6240 {
6241 record_needs_remap = true;
6242 break;
6243 }
6244 for (f = TYPE_FIELDS (srecord_type); f ; f = TREE_CHAIN (f))
6245 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6246 {
6247 srecord_needs_remap = true;
6248 break;
6249 }
6250
6251 if (record_needs_remap || srecord_needs_remap)
6252 {
6253 memset (&tcctx, '\0', sizeof (tcctx));
6254 tcctx.cb.src_fn = ctx->cb.src_fn;
6255 tcctx.cb.dst_fn = child_fn;
6256 tcctx.cb.src_node = cgraph_node (tcctx.cb.src_fn);
6257 tcctx.cb.dst_node = tcctx.cb.src_node;
6258 tcctx.cb.src_cfun = ctx->cb.src_cfun;
6259 tcctx.cb.copy_decl = task_copyfn_copy_decl;
6260 tcctx.cb.eh_lp_nr = 0;
6261 tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE;
6262 tcctx.cb.decl_map = pointer_map_create ();
6263 tcctx.ctx = ctx;
6264
6265 if (record_needs_remap)
6266 record_type = task_copyfn_remap_type (&tcctx, record_type);
6267 if (srecord_needs_remap)
6268 srecord_type = task_copyfn_remap_type (&tcctx, srecord_type);
6269 }
6270 else
6271 tcctx.cb.decl_map = NULL;
6272
6273 push_cfun (child_cfun);
6274
6275 arg = DECL_ARGUMENTS (child_fn);
6276 TREE_TYPE (arg) = build_pointer_type (record_type);
6277 sarg = TREE_CHAIN (arg);
6278 TREE_TYPE (sarg) = build_pointer_type (srecord_type);
6279
6280 /* First pass: initialize temporaries used in record_type and srecord_type
6281 sizes and field offsets. */
6282 if (tcctx.cb.decl_map)
6283 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6284 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6285 {
6286 tree *p;
6287
6288 decl = OMP_CLAUSE_DECL (c);
6289 p = (tree *) pointer_map_contains (tcctx.cb.decl_map, decl);
6290 if (p == NULL)
6291 continue;
6292 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6293 sf = (tree) n->value;
6294 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6295 src = build_simple_mem_ref_loc (loc, sarg);
6296 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6297 t = build2 (MODIFY_EXPR, TREE_TYPE (*p), *p, src);
6298 append_to_statement_list (t, &list);
6299 }
6300
6301 /* Second pass: copy shared var pointers and copy construct non-VLA
6302 firstprivate vars. */
6303 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6304 switch (OMP_CLAUSE_CODE (c))
6305 {
6306 case OMP_CLAUSE_SHARED:
6307 decl = OMP_CLAUSE_DECL (c);
6308 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6309 if (n == NULL)
6310 break;
6311 f = (tree) n->value;
6312 if (tcctx.cb.decl_map)
6313 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6314 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6315 sf = (tree) n->value;
6316 if (tcctx.cb.decl_map)
6317 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6318 src = build_simple_mem_ref_loc (loc, sarg);
6319 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6320 dst = build_simple_mem_ref_loc (loc, arg);
6321 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6322 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
6323 append_to_statement_list (t, &list);
6324 break;
6325 case OMP_CLAUSE_FIRSTPRIVATE:
6326 decl = OMP_CLAUSE_DECL (c);
6327 if (is_variable_sized (decl))
6328 break;
6329 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6330 if (n == NULL)
6331 break;
6332 f = (tree) n->value;
6333 if (tcctx.cb.decl_map)
6334 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6335 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6336 if (n != NULL)
6337 {
6338 sf = (tree) n->value;
6339 if (tcctx.cb.decl_map)
6340 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6341 src = build_simple_mem_ref_loc (loc, sarg);
6342 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6343 if (use_pointer_for_field (decl, NULL) || is_reference (decl))
6344 src = build_simple_mem_ref_loc (loc, src);
6345 }
6346 else
6347 src = decl;
6348 dst = build_simple_mem_ref_loc (loc, arg);
6349 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6350 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6351 append_to_statement_list (t, &list);
6352 break;
6353 case OMP_CLAUSE_PRIVATE:
6354 if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c))
6355 break;
6356 decl = OMP_CLAUSE_DECL (c);
6357 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6358 f = (tree) n->value;
6359 if (tcctx.cb.decl_map)
6360 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6361 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6362 if (n != NULL)
6363 {
6364 sf = (tree) n->value;
6365 if (tcctx.cb.decl_map)
6366 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6367 src = build_simple_mem_ref_loc (loc, sarg);
6368 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6369 if (use_pointer_for_field (decl, NULL))
6370 src = build_simple_mem_ref_loc (loc, src);
6371 }
6372 else
6373 src = decl;
6374 dst = build_simple_mem_ref_loc (loc, arg);
6375 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6376 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
6377 append_to_statement_list (t, &list);
6378 break;
6379 default:
6380 break;
6381 }
6382
6383 /* Last pass: handle VLA firstprivates. */
6384 if (tcctx.cb.decl_map)
6385 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6386 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6387 {
6388 tree ind, ptr, df;
6389
6390 decl = OMP_CLAUSE_DECL (c);
6391 if (!is_variable_sized (decl))
6392 continue;
6393 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6394 if (n == NULL)
6395 continue;
6396 f = (tree) n->value;
6397 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6398 gcc_assert (DECL_HAS_VALUE_EXPR_P (decl));
6399 ind = DECL_VALUE_EXPR (decl);
6400 gcc_assert (TREE_CODE (ind) == INDIRECT_REF);
6401 gcc_assert (DECL_P (TREE_OPERAND (ind, 0)));
6402 n = splay_tree_lookup (ctx->sfield_map,
6403 (splay_tree_key) TREE_OPERAND (ind, 0));
6404 sf = (tree) n->value;
6405 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6406 src = build_simple_mem_ref_loc (loc, sarg);
6407 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6408 src = build_simple_mem_ref_loc (loc, src);
6409 dst = build_simple_mem_ref_loc (loc, arg);
6410 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6411 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6412 append_to_statement_list (t, &list);
6413 n = splay_tree_lookup (ctx->field_map,
6414 (splay_tree_key) TREE_OPERAND (ind, 0));
6415 df = (tree) n->value;
6416 df = *(tree *) pointer_map_contains (tcctx.cb.decl_map, df);
6417 ptr = build_simple_mem_ref_loc (loc, arg);
6418 ptr = build3 (COMPONENT_REF, TREE_TYPE (df), ptr, df, NULL);
6419 t = build2 (MODIFY_EXPR, TREE_TYPE (ptr), ptr,
6420 build_fold_addr_expr_loc (loc, dst));
6421 append_to_statement_list (t, &list);
6422 }
6423
6424 t = build1 (RETURN_EXPR, void_type_node, NULL);
6425 append_to_statement_list (t, &list);
6426
6427 if (tcctx.cb.decl_map)
6428 pointer_map_destroy (tcctx.cb.decl_map);
6429 pop_gimplify_context (NULL);
6430 BIND_EXPR_BODY (bind) = list;
6431 pop_cfun ();
6432 current_function_decl = ctx->cb.src_fn;
6433 }
6434
6435 /* Lower the OpenMP parallel or task directive in the current statement
6436 in GSI_P. CTX holds context information for the directive. */
6437
6438 static void
6439 lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6440 {
6441 tree clauses;
6442 tree child_fn, t;
6443 gimple stmt = gsi_stmt (*gsi_p);
6444 gimple par_bind, bind;
6445 gimple_seq par_body, olist, ilist, par_olist, par_ilist, new_body;
6446 struct gimplify_ctx gctx;
6447 location_t loc = gimple_location (stmt);
6448
6449 clauses = gimple_omp_taskreg_clauses (stmt);
6450 par_bind = gimple_seq_first_stmt (gimple_omp_body (stmt));
6451 par_body = gimple_bind_body (par_bind);
6452 child_fn = ctx->cb.dst_fn;
6453 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
6454 && !gimple_omp_parallel_combined_p (stmt))
6455 {
6456 struct walk_stmt_info wi;
6457 int ws_num = 0;
6458
6459 memset (&wi, 0, sizeof (wi));
6460 wi.info = &ws_num;
6461 wi.val_only = true;
6462 walk_gimple_seq (par_body, check_combined_parallel, NULL, &wi);
6463 if (ws_num == 1)
6464 gimple_omp_parallel_set_combined_p (stmt, true);
6465 }
6466 if (ctx->srecord_type)
6467 create_task_copyfn (stmt, ctx);
6468
6469 push_gimplify_context (&gctx);
6470
6471 par_olist = NULL;
6472 par_ilist = NULL;
6473 lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx);
6474 lower_omp (par_body, ctx);
6475 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL)
6476 lower_reduction_clauses (clauses, &par_olist, ctx);
6477
6478 /* Declare all the variables created by mapping and the variables
6479 declared in the scope of the parallel body. */
6480 record_vars_into (ctx->block_vars, child_fn);
6481 record_vars_into (gimple_bind_vars (par_bind), child_fn);
6482
6483 if (ctx->record_type)
6484 {
6485 ctx->sender_decl
6486 = create_tmp_var (ctx->srecord_type ? ctx->srecord_type
6487 : ctx->record_type, ".omp_data_o");
6488 TREE_ADDRESSABLE (ctx->sender_decl) = 1;
6489 gimple_omp_taskreg_set_data_arg (stmt, ctx->sender_decl);
6490 }
6491
6492 olist = NULL;
6493 ilist = NULL;
6494 lower_send_clauses (clauses, &ilist, &olist, ctx);
6495 lower_send_shared_vars (&ilist, &olist, ctx);
6496
6497 /* Once all the expansions are done, sequence all the different
6498 fragments inside gimple_omp_body. */
6499
6500 new_body = NULL;
6501
6502 if (ctx->record_type)
6503 {
6504 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
6505 /* fixup_child_record_type might have changed receiver_decl's type. */
6506 t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
6507 gimple_seq_add_stmt (&new_body,
6508 gimple_build_assign (ctx->receiver_decl, t));
6509 }
6510
6511 gimple_seq_add_seq (&new_body, par_ilist);
6512 gimple_seq_add_seq (&new_body, par_body);
6513 gimple_seq_add_seq (&new_body, par_olist);
6514 new_body = maybe_catch_exception (new_body);
6515 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
6516 gimple_omp_set_body (stmt, new_body);
6517
6518 bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
6519 gimple_bind_add_stmt (bind, stmt);
6520 if (ilist || olist)
6521 {
6522 gimple_seq_add_stmt (&ilist, bind);
6523 gimple_seq_add_seq (&ilist, olist);
6524 bind = gimple_build_bind (NULL, ilist, NULL);
6525 }
6526
6527 gsi_replace (gsi_p, bind, true);
6528
6529 pop_gimplify_context (NULL);
6530 }
6531
6532 /* Callback for lower_omp_1. Return non-NULL if *tp needs to be
6533 regimplified. If DATA is non-NULL, lower_omp_1 is outside
6534 of OpenMP context, but with task_shared_vars set. */
6535
6536 static tree
6537 lower_omp_regimplify_p (tree *tp, int *walk_subtrees,
6538 void *data)
6539 {
6540 tree t = *tp;
6541
6542 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
6543 if (TREE_CODE (t) == VAR_DECL && data == NULL && DECL_HAS_VALUE_EXPR_P (t))
6544 return t;
6545
6546 if (task_shared_vars
6547 && DECL_P (t)
6548 && bitmap_bit_p (task_shared_vars, DECL_UID (t)))
6549 return t;
6550
6551 /* If a global variable has been privatized, TREE_CONSTANT on
6552 ADDR_EXPR might be wrong. */
6553 if (data == NULL && TREE_CODE (t) == ADDR_EXPR)
6554 recompute_tree_invariant_for_addr_expr (t);
6555
6556 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
6557 return NULL_TREE;
6558 }
6559
6560 static void
6561 lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6562 {
6563 gimple stmt = gsi_stmt (*gsi_p);
6564 struct walk_stmt_info wi;
6565
6566 if (gimple_has_location (stmt))
6567 input_location = gimple_location (stmt);
6568
6569 if (task_shared_vars)
6570 memset (&wi, '\0', sizeof (wi));
6571
6572 /* If we have issued syntax errors, avoid doing any heavy lifting.
6573 Just replace the OpenMP directives with a NOP to avoid
6574 confusing RTL expansion. */
6575 if (seen_error () && is_gimple_omp (stmt))
6576 {
6577 gsi_replace (gsi_p, gimple_build_nop (), true);
6578 return;
6579 }
6580
6581 switch (gimple_code (stmt))
6582 {
6583 case GIMPLE_COND:
6584 if ((ctx || task_shared_vars)
6585 && (walk_tree (gimple_cond_lhs_ptr (stmt), lower_omp_regimplify_p,
6586 ctx ? NULL : &wi, NULL)
6587 || walk_tree (gimple_cond_rhs_ptr (stmt), lower_omp_regimplify_p,
6588 ctx ? NULL : &wi, NULL)))
6589 gimple_regimplify_operands (stmt, gsi_p);
6590 break;
6591 case GIMPLE_CATCH:
6592 lower_omp (gimple_catch_handler (stmt), ctx);
6593 break;
6594 case GIMPLE_EH_FILTER:
6595 lower_omp (gimple_eh_filter_failure (stmt), ctx);
6596 break;
6597 case GIMPLE_TRY:
6598 lower_omp (gimple_try_eval (stmt), ctx);
6599 lower_omp (gimple_try_cleanup (stmt), ctx);
6600 break;
6601 case GIMPLE_BIND:
6602 lower_omp (gimple_bind_body (stmt), ctx);
6603 break;
6604 case GIMPLE_OMP_PARALLEL:
6605 case GIMPLE_OMP_TASK:
6606 ctx = maybe_lookup_ctx (stmt);
6607 lower_omp_taskreg (gsi_p, ctx);
6608 break;
6609 case GIMPLE_OMP_FOR:
6610 ctx = maybe_lookup_ctx (stmt);
6611 gcc_assert (ctx);
6612 lower_omp_for (gsi_p, ctx);
6613 break;
6614 case GIMPLE_OMP_SECTIONS:
6615 ctx = maybe_lookup_ctx (stmt);
6616 gcc_assert (ctx);
6617 lower_omp_sections (gsi_p, ctx);
6618 break;
6619 case GIMPLE_OMP_SINGLE:
6620 ctx = maybe_lookup_ctx (stmt);
6621 gcc_assert (ctx);
6622 lower_omp_single (gsi_p, ctx);
6623 break;
6624 case GIMPLE_OMP_MASTER:
6625 ctx = maybe_lookup_ctx (stmt);
6626 gcc_assert (ctx);
6627 lower_omp_master (gsi_p, ctx);
6628 break;
6629 case GIMPLE_OMP_ORDERED:
6630 ctx = maybe_lookup_ctx (stmt);
6631 gcc_assert (ctx);
6632 lower_omp_ordered (gsi_p, ctx);
6633 break;
6634 case GIMPLE_OMP_CRITICAL:
6635 ctx = maybe_lookup_ctx (stmt);
6636 gcc_assert (ctx);
6637 lower_omp_critical (gsi_p, ctx);
6638 break;
6639 case GIMPLE_OMP_ATOMIC_LOAD:
6640 if ((ctx || task_shared_vars)
6641 && walk_tree (gimple_omp_atomic_load_rhs_ptr (stmt),
6642 lower_omp_regimplify_p, ctx ? NULL : &wi, NULL))
6643 gimple_regimplify_operands (stmt, gsi_p);
6644 break;
6645 default:
6646 if ((ctx || task_shared_vars)
6647 && walk_gimple_op (stmt, lower_omp_regimplify_p,
6648 ctx ? NULL : &wi))
6649 gimple_regimplify_operands (stmt, gsi_p);
6650 break;
6651 }
6652 }
6653
6654 static void
6655 lower_omp (gimple_seq body, omp_context *ctx)
6656 {
6657 location_t saved_location = input_location;
6658 gimple_stmt_iterator gsi = gsi_start (body);
6659 for (gsi = gsi_start (body); !gsi_end_p (gsi); gsi_next (&gsi))
6660 lower_omp_1 (&gsi, ctx);
6661 input_location = saved_location;
6662 }
6663 \f
6664 /* Main entry point. */
6665
6666 static unsigned int
6667 execute_lower_omp (void)
6668 {
6669 gimple_seq body;
6670
6671 /* This pass always runs, to provide PROP_gimple_lomp.
6672 But there is nothing to do unless -fopenmp is given. */
6673 if (flag_openmp == 0)
6674 return 0;
6675
6676 all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
6677 delete_omp_context);
6678
6679 body = gimple_body (current_function_decl);
6680 scan_omp (body, NULL);
6681 gcc_assert (taskreg_nesting_level == 0);
6682
6683 if (all_contexts->root)
6684 {
6685 struct gimplify_ctx gctx;
6686
6687 if (task_shared_vars)
6688 push_gimplify_context (&gctx);
6689 lower_omp (body, NULL);
6690 if (task_shared_vars)
6691 pop_gimplify_context (NULL);
6692 }
6693
6694 if (all_contexts)
6695 {
6696 splay_tree_delete (all_contexts);
6697 all_contexts = NULL;
6698 }
6699 BITMAP_FREE (task_shared_vars);
6700 return 0;
6701 }
6702
6703 struct gimple_opt_pass pass_lower_omp =
6704 {
6705 {
6706 GIMPLE_PASS,
6707 "omplower", /* name */
6708 NULL, /* gate */
6709 execute_lower_omp, /* execute */
6710 NULL, /* sub */
6711 NULL, /* next */
6712 0, /* static_pass_number */
6713 TV_NONE, /* tv_id */
6714 PROP_gimple_any, /* properties_required */
6715 PROP_gimple_lomp, /* properties_provided */
6716 0, /* properties_destroyed */
6717 0, /* todo_flags_start */
6718 TODO_dump_func /* todo_flags_finish */
6719 }
6720 };
6721 \f
6722 /* The following is a utility to diagnose OpenMP structured block violations.
6723 It is not part of the "omplower" pass, as that's invoked too late. It
6724 should be invoked by the respective front ends after gimplification. */
6725
6726 static splay_tree all_labels;
6727
6728 /* Check for mismatched contexts and generate an error if needed. Return
6729 true if an error is detected. */
6730
6731 static bool
6732 diagnose_sb_0 (gimple_stmt_iterator *gsi_p,
6733 gimple branch_ctx, gimple label_ctx)
6734 {
6735 if (label_ctx == branch_ctx)
6736 return false;
6737
6738
6739 /*
6740 Previously we kept track of the label's entire context in diagnose_sb_[12]
6741 so we could traverse it and issue a correct "exit" or "enter" error
6742 message upon a structured block violation.
6743
6744 We built the context by building a list with tree_cons'ing, but there is
6745 no easy counterpart in gimple tuples. It seems like far too much work
6746 for issuing exit/enter error messages. If someone really misses the
6747 distinct error message... patches welcome.
6748 */
6749
6750 #if 0
6751 /* Try to avoid confusing the user by producing and error message
6752 with correct "exit" or "enter" verbiage. We prefer "exit"
6753 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
6754 if (branch_ctx == NULL)
6755 exit_p = false;
6756 else
6757 {
6758 while (label_ctx)
6759 {
6760 if (TREE_VALUE (label_ctx) == branch_ctx)
6761 {
6762 exit_p = false;
6763 break;
6764 }
6765 label_ctx = TREE_CHAIN (label_ctx);
6766 }
6767 }
6768
6769 if (exit_p)
6770 error ("invalid exit from OpenMP structured block");
6771 else
6772 error ("invalid entry to OpenMP structured block");
6773 #endif
6774
6775 /* If it's obvious we have an invalid entry, be specific about the error. */
6776 if (branch_ctx == NULL)
6777 error ("invalid entry to OpenMP structured block");
6778 else
6779 /* Otherwise, be vague and lazy, but efficient. */
6780 error ("invalid branch to/from an OpenMP structured block");
6781
6782 gsi_replace (gsi_p, gimple_build_nop (), false);
6783 return true;
6784 }
6785
6786 /* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
6787 where each label is found. */
6788
6789 static tree
6790 diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
6791 struct walk_stmt_info *wi)
6792 {
6793 gimple context = (gimple) wi->info;
6794 gimple inner_context;
6795 gimple stmt = gsi_stmt (*gsi_p);
6796
6797 *handled_ops_p = true;
6798
6799 switch (gimple_code (stmt))
6800 {
6801 WALK_SUBSTMTS;
6802
6803 case GIMPLE_OMP_PARALLEL:
6804 case GIMPLE_OMP_TASK:
6805 case GIMPLE_OMP_SECTIONS:
6806 case GIMPLE_OMP_SINGLE:
6807 case GIMPLE_OMP_SECTION:
6808 case GIMPLE_OMP_MASTER:
6809 case GIMPLE_OMP_ORDERED:
6810 case GIMPLE_OMP_CRITICAL:
6811 /* The minimal context here is just the current OMP construct. */
6812 inner_context = stmt;
6813 wi->info = inner_context;
6814 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
6815 wi->info = context;
6816 break;
6817
6818 case GIMPLE_OMP_FOR:
6819 inner_context = stmt;
6820 wi->info = inner_context;
6821 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
6822 walk them. */
6823 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
6824 diagnose_sb_1, NULL, wi);
6825 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
6826 wi->info = context;
6827 break;
6828
6829 case GIMPLE_LABEL:
6830 splay_tree_insert (all_labels, (splay_tree_key) gimple_label_label (stmt),
6831 (splay_tree_value) context);
6832 break;
6833
6834 default:
6835 break;
6836 }
6837
6838 return NULL_TREE;
6839 }
6840
6841 /* Pass 2: Check each branch and see if its context differs from that of
6842 the destination label's context. */
6843
6844 static tree
6845 diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
6846 struct walk_stmt_info *wi)
6847 {
6848 gimple context = (gimple) wi->info;
6849 splay_tree_node n;
6850 gimple stmt = gsi_stmt (*gsi_p);
6851
6852 *handled_ops_p = true;
6853
6854 switch (gimple_code (stmt))
6855 {
6856 WALK_SUBSTMTS;
6857
6858 case GIMPLE_OMP_PARALLEL:
6859 case GIMPLE_OMP_TASK:
6860 case GIMPLE_OMP_SECTIONS:
6861 case GIMPLE_OMP_SINGLE:
6862 case GIMPLE_OMP_SECTION:
6863 case GIMPLE_OMP_MASTER:
6864 case GIMPLE_OMP_ORDERED:
6865 case GIMPLE_OMP_CRITICAL:
6866 wi->info = stmt;
6867 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
6868 wi->info = context;
6869 break;
6870
6871 case GIMPLE_OMP_FOR:
6872 wi->info = stmt;
6873 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
6874 walk them. */
6875 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
6876 diagnose_sb_2, NULL, wi);
6877 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
6878 wi->info = context;
6879 break;
6880
6881 case GIMPLE_COND:
6882 {
6883 tree lab = gimple_cond_true_label (stmt);
6884 if (lab)
6885 {
6886 n = splay_tree_lookup (all_labels,
6887 (splay_tree_key) lab);
6888 diagnose_sb_0 (gsi_p, context,
6889 n ? (gimple) n->value : NULL);
6890 }
6891 lab = gimple_cond_false_label (stmt);
6892 if (lab)
6893 {
6894 n = splay_tree_lookup (all_labels,
6895 (splay_tree_key) lab);
6896 diagnose_sb_0 (gsi_p, context,
6897 n ? (gimple) n->value : NULL);
6898 }
6899 }
6900 break;
6901
6902 case GIMPLE_GOTO:
6903 {
6904 tree lab = gimple_goto_dest (stmt);
6905 if (TREE_CODE (lab) != LABEL_DECL)
6906 break;
6907
6908 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
6909 diagnose_sb_0 (gsi_p, context, n ? (gimple) n->value : NULL);
6910 }
6911 break;
6912
6913 case GIMPLE_SWITCH:
6914 {
6915 unsigned int i;
6916 for (i = 0; i < gimple_switch_num_labels (stmt); ++i)
6917 {
6918 tree lab = CASE_LABEL (gimple_switch_label (stmt, i));
6919 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
6920 if (n && diagnose_sb_0 (gsi_p, context, (gimple) n->value))
6921 break;
6922 }
6923 }
6924 break;
6925
6926 case GIMPLE_RETURN:
6927 diagnose_sb_0 (gsi_p, context, NULL);
6928 break;
6929
6930 default:
6931 break;
6932 }
6933
6934 return NULL_TREE;
6935 }
6936
6937 static unsigned int
6938 diagnose_omp_structured_block_errors (void)
6939 {
6940 struct walk_stmt_info wi;
6941 gimple_seq body = gimple_body (current_function_decl);
6942
6943 all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
6944
6945 memset (&wi, 0, sizeof (wi));
6946 walk_gimple_seq (body, diagnose_sb_1, NULL, &wi);
6947
6948 memset (&wi, 0, sizeof (wi));
6949 wi.want_locations = true;
6950 walk_gimple_seq (body, diagnose_sb_2, NULL, &wi);
6951
6952 splay_tree_delete (all_labels);
6953 all_labels = NULL;
6954
6955 return 0;
6956 }
6957
6958 static bool
6959 gate_diagnose_omp_blocks (void)
6960 {
6961 return flag_openmp != 0;
6962 }
6963
6964 struct gimple_opt_pass pass_diagnose_omp_blocks =
6965 {
6966 {
6967 GIMPLE_PASS,
6968 "*diagnose_omp_blocks", /* name */
6969 gate_diagnose_omp_blocks, /* gate */
6970 diagnose_omp_structured_block_errors, /* execute */
6971 NULL, /* sub */
6972 NULL, /* next */
6973 0, /* static_pass_number */
6974 TV_NONE, /* tv_id */
6975 PROP_gimple_any, /* properties_required */
6976 0, /* properties_provided */
6977 0, /* properties_destroyed */
6978 0, /* todo_flags_start */
6979 0, /* todo_flags_finish */
6980 }
6981 };
6982
6983 #include "gt-omp-low.h"