re PR middle-end/42834 (memcpy folding overeager)
[gcc.git] / gcc / omp-low.c
1 /* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
5
6 Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010
7 Free Software Foundation, Inc.
8
9 This file is part of GCC.
10
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
14 version.
15
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
24
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "tree.h"
30 #include "rtl.h"
31 #include "gimple.h"
32 #include "tree-iterator.h"
33 #include "tree-inline.h"
34 #include "langhooks.h"
35 #include "diagnostic-core.h"
36 #include "tree-flow.h"
37 #include "timevar.h"
38 #include "flags.h"
39 #include "function.h"
40 #include "expr.h"
41 #include "toplev.h"
42 #include "tree-pass.h"
43 #include "ggc.h"
44 #include "except.h"
45 #include "splay-tree.h"
46 #include "optabs.h"
47 #include "cfgloop.h"
48
49
50 /* Lowering of OpenMP parallel and workshare constructs proceeds in two
51 phases. The first phase scans the function looking for OMP statements
52 and then for variables that must be replaced to satisfy data sharing
53 clauses. The second phase expands code for the constructs, as well as
54 re-gimplifying things when variables have been replaced with complex
55 expressions.
56
57 Final code generation is done by pass_expand_omp. The flowgraph is
58 scanned for parallel regions which are then moved to a new
59 function, to be invoked by the thread library. */
60
61 /* Context structure. Used to store information about each parallel
62 directive in the code. */
63
64 typedef struct omp_context
65 {
66 /* This field must be at the beginning, as we do "inheritance": Some
67 callback functions for tree-inline.c (e.g., omp_copy_decl)
68 receive a copy_body_data pointer that is up-casted to an
69 omp_context pointer. */
70 copy_body_data cb;
71
72 /* The tree of contexts corresponding to the encountered constructs. */
73 struct omp_context *outer;
74 gimple stmt;
75
76 /* Map variables to fields in a structure that allows communication
77 between sending and receiving threads. */
78 splay_tree field_map;
79 tree record_type;
80 tree sender_decl;
81 tree receiver_decl;
82
83 /* These are used just by task contexts, if task firstprivate fn is
84 needed. srecord_type is used to communicate from the thread
85 that encountered the task construct to task firstprivate fn,
86 record_type is allocated by GOMP_task, initialized by task firstprivate
87 fn and passed to the task body fn. */
88 splay_tree sfield_map;
89 tree srecord_type;
90
91 /* A chain of variables to add to the top-level block surrounding the
92 construct. In the case of a parallel, this is in the child function. */
93 tree block_vars;
94
95 /* What to do with variables with implicitly determined sharing
96 attributes. */
97 enum omp_clause_default_kind default_kind;
98
99 /* Nesting depth of this context. Used to beautify error messages re
100 invalid gotos. The outermost ctx is depth 1, with depth 0 being
101 reserved for the main body of the function. */
102 int depth;
103
104 /* True if this parallel directive is nested within another. */
105 bool is_nested;
106 } omp_context;
107
108
109 struct omp_for_data_loop
110 {
111 tree v, n1, n2, step;
112 enum tree_code cond_code;
113 };
114
115 /* A structure describing the main elements of a parallel loop. */
116
117 struct omp_for_data
118 {
119 struct omp_for_data_loop loop;
120 tree chunk_size;
121 gimple for_stmt;
122 tree pre, iter_type;
123 int collapse;
124 bool have_nowait, have_ordered;
125 enum omp_clause_schedule_kind sched_kind;
126 struct omp_for_data_loop *loops;
127 };
128
129
130 static splay_tree all_contexts;
131 static int taskreg_nesting_level;
132 struct omp_region *root_omp_region;
133 static bitmap task_shared_vars;
134
135 static void scan_omp (gimple_seq, omp_context *);
136 static tree scan_omp_1_op (tree *, int *, void *);
137
138 #define WALK_SUBSTMTS \
139 case GIMPLE_BIND: \
140 case GIMPLE_TRY: \
141 case GIMPLE_CATCH: \
142 case GIMPLE_EH_FILTER: \
143 /* The sub-statements for these should be walked. */ \
144 *handled_ops_p = false; \
145 break;
146
147 /* Convenience function for calling scan_omp_1_op on tree operands. */
148
149 static inline tree
150 scan_omp_op (tree *tp, omp_context *ctx)
151 {
152 struct walk_stmt_info wi;
153
154 memset (&wi, 0, sizeof (wi));
155 wi.info = ctx;
156 wi.want_locations = true;
157
158 return walk_tree (tp, scan_omp_1_op, &wi, NULL);
159 }
160
161 static void lower_omp (gimple_seq, omp_context *);
162 static tree lookup_decl_in_outer_ctx (tree, omp_context *);
163 static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
164
165 /* Find an OpenMP clause of type KIND within CLAUSES. */
166
167 tree
168 find_omp_clause (tree clauses, enum omp_clause_code kind)
169 {
170 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
171 if (OMP_CLAUSE_CODE (clauses) == kind)
172 return clauses;
173
174 return NULL_TREE;
175 }
176
177 /* Return true if CTX is for an omp parallel. */
178
179 static inline bool
180 is_parallel_ctx (omp_context *ctx)
181 {
182 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL;
183 }
184
185
186 /* Return true if CTX is for an omp task. */
187
188 static inline bool
189 is_task_ctx (omp_context *ctx)
190 {
191 return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
192 }
193
194
195 /* Return true if CTX is for an omp parallel or omp task. */
196
197 static inline bool
198 is_taskreg_ctx (omp_context *ctx)
199 {
200 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL
201 || gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
202 }
203
204
205 /* Return true if REGION is a combined parallel+workshare region. */
206
207 static inline bool
208 is_combined_parallel (struct omp_region *region)
209 {
210 return region->is_combined_parallel;
211 }
212
213
214 /* Extract the header elements of parallel loop FOR_STMT and store
215 them into *FD. */
216
217 static void
218 extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
219 struct omp_for_data_loop *loops)
220 {
221 tree t, var, *collapse_iter, *collapse_count;
222 tree count = NULL_TREE, iter_type = long_integer_type_node;
223 struct omp_for_data_loop *loop;
224 int i;
225 struct omp_for_data_loop dummy_loop;
226 location_t loc = gimple_location (for_stmt);
227
228 fd->for_stmt = for_stmt;
229 fd->pre = NULL;
230 fd->collapse = gimple_omp_for_collapse (for_stmt);
231 if (fd->collapse > 1)
232 fd->loops = loops;
233 else
234 fd->loops = &fd->loop;
235
236 fd->have_nowait = fd->have_ordered = false;
237 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
238 fd->chunk_size = NULL_TREE;
239 collapse_iter = NULL;
240 collapse_count = NULL;
241
242 for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
243 switch (OMP_CLAUSE_CODE (t))
244 {
245 case OMP_CLAUSE_NOWAIT:
246 fd->have_nowait = true;
247 break;
248 case OMP_CLAUSE_ORDERED:
249 fd->have_ordered = true;
250 break;
251 case OMP_CLAUSE_SCHEDULE:
252 fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
253 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
254 break;
255 case OMP_CLAUSE_COLLAPSE:
256 if (fd->collapse > 1)
257 {
258 collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
259 collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
260 }
261 default:
262 break;
263 }
264
265 /* FIXME: for now map schedule(auto) to schedule(static).
266 There should be analysis to determine whether all iterations
267 are approximately the same amount of work (then schedule(static)
268 is best) or if it varies (then schedule(dynamic,N) is better). */
269 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
270 {
271 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
272 gcc_assert (fd->chunk_size == NULL);
273 }
274 gcc_assert (fd->collapse == 1 || collapse_iter != NULL);
275 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
276 gcc_assert (fd->chunk_size == NULL);
277 else if (fd->chunk_size == NULL)
278 {
279 /* We only need to compute a default chunk size for ordered
280 static loops and dynamic loops. */
281 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
282 || fd->have_ordered
283 || fd->collapse > 1)
284 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
285 ? integer_zero_node : integer_one_node;
286 }
287
288 for (i = 0; i < fd->collapse; i++)
289 {
290 if (fd->collapse == 1)
291 loop = &fd->loop;
292 else if (loops != NULL)
293 loop = loops + i;
294 else
295 loop = &dummy_loop;
296
297
298 loop->v = gimple_omp_for_index (for_stmt, i);
299 gcc_assert (SSA_VAR_P (loop->v));
300 gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
301 || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
302 var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
303 loop->n1 = gimple_omp_for_initial (for_stmt, i);
304
305 loop->cond_code = gimple_omp_for_cond (for_stmt, i);
306 loop->n2 = gimple_omp_for_final (for_stmt, i);
307 switch (loop->cond_code)
308 {
309 case LT_EXPR:
310 case GT_EXPR:
311 break;
312 case LE_EXPR:
313 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
314 loop->n2 = fold_build2_loc (loc,
315 POINTER_PLUS_EXPR, TREE_TYPE (loop->n2),
316 loop->n2, size_one_node);
317 else
318 loop->n2 = fold_build2_loc (loc,
319 PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
320 build_int_cst (TREE_TYPE (loop->n2), 1));
321 loop->cond_code = LT_EXPR;
322 break;
323 case GE_EXPR:
324 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
325 loop->n2 = fold_build2_loc (loc,
326 POINTER_PLUS_EXPR, TREE_TYPE (loop->n2),
327 loop->n2, size_int (-1));
328 else
329 loop->n2 = fold_build2_loc (loc,
330 MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
331 build_int_cst (TREE_TYPE (loop->n2), 1));
332 loop->cond_code = GT_EXPR;
333 break;
334 default:
335 gcc_unreachable ();
336 }
337
338 t = gimple_omp_for_incr (for_stmt, i);
339 gcc_assert (TREE_OPERAND (t, 0) == var);
340 switch (TREE_CODE (t))
341 {
342 case PLUS_EXPR:
343 case POINTER_PLUS_EXPR:
344 loop->step = TREE_OPERAND (t, 1);
345 break;
346 case MINUS_EXPR:
347 loop->step = TREE_OPERAND (t, 1);
348 loop->step = fold_build1_loc (loc,
349 NEGATE_EXPR, TREE_TYPE (loop->step),
350 loop->step);
351 break;
352 default:
353 gcc_unreachable ();
354 }
355
356 if (iter_type != long_long_unsigned_type_node)
357 {
358 if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
359 iter_type = long_long_unsigned_type_node;
360 else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
361 && TYPE_PRECISION (TREE_TYPE (loop->v))
362 >= TYPE_PRECISION (iter_type))
363 {
364 tree n;
365
366 if (loop->cond_code == LT_EXPR)
367 n = fold_build2_loc (loc,
368 PLUS_EXPR, TREE_TYPE (loop->v),
369 loop->n2, loop->step);
370 else
371 n = loop->n1;
372 if (TREE_CODE (n) != INTEGER_CST
373 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
374 iter_type = long_long_unsigned_type_node;
375 }
376 else if (TYPE_PRECISION (TREE_TYPE (loop->v))
377 > TYPE_PRECISION (iter_type))
378 {
379 tree n1, n2;
380
381 if (loop->cond_code == LT_EXPR)
382 {
383 n1 = loop->n1;
384 n2 = fold_build2_loc (loc,
385 PLUS_EXPR, TREE_TYPE (loop->v),
386 loop->n2, loop->step);
387 }
388 else
389 {
390 n1 = fold_build2_loc (loc,
391 MINUS_EXPR, TREE_TYPE (loop->v),
392 loop->n2, loop->step);
393 n2 = loop->n1;
394 }
395 if (TREE_CODE (n1) != INTEGER_CST
396 || TREE_CODE (n2) != INTEGER_CST
397 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
398 || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
399 iter_type = long_long_unsigned_type_node;
400 }
401 }
402
403 if (collapse_count && *collapse_count == NULL)
404 {
405 if ((i == 0 || count != NULL_TREE)
406 && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
407 && TREE_CONSTANT (loop->n1)
408 && TREE_CONSTANT (loop->n2)
409 && TREE_CODE (loop->step) == INTEGER_CST)
410 {
411 tree itype = TREE_TYPE (loop->v);
412
413 if (POINTER_TYPE_P (itype))
414 itype
415 = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
416 t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
417 t = fold_build2_loc (loc,
418 PLUS_EXPR, itype,
419 fold_convert_loc (loc, itype, loop->step), t);
420 t = fold_build2_loc (loc, PLUS_EXPR, itype, t,
421 fold_convert_loc (loc, itype, loop->n2));
422 t = fold_build2_loc (loc, MINUS_EXPR, itype, t,
423 fold_convert_loc (loc, itype, loop->n1));
424 if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
425 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype,
426 fold_build1_loc (loc, NEGATE_EXPR, itype, t),
427 fold_build1_loc (loc, NEGATE_EXPR, itype,
428 fold_convert_loc (loc, itype,
429 loop->step)));
430 else
431 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, t,
432 fold_convert_loc (loc, itype, loop->step));
433 t = fold_convert_loc (loc, long_long_unsigned_type_node, t);
434 if (count != NULL_TREE)
435 count = fold_build2_loc (loc,
436 MULT_EXPR, long_long_unsigned_type_node,
437 count, t);
438 else
439 count = t;
440 if (TREE_CODE (count) != INTEGER_CST)
441 count = NULL_TREE;
442 }
443 else
444 count = NULL_TREE;
445 }
446 }
447
448 if (count)
449 {
450 if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
451 iter_type = long_long_unsigned_type_node;
452 else
453 iter_type = long_integer_type_node;
454 }
455 else if (collapse_iter && *collapse_iter != NULL)
456 iter_type = TREE_TYPE (*collapse_iter);
457 fd->iter_type = iter_type;
458 if (collapse_iter && *collapse_iter == NULL)
459 *collapse_iter = create_tmp_var (iter_type, ".iter");
460 if (collapse_count && *collapse_count == NULL)
461 {
462 if (count)
463 *collapse_count = fold_convert_loc (loc, iter_type, count);
464 else
465 *collapse_count = create_tmp_var (iter_type, ".count");
466 }
467
468 if (fd->collapse > 1)
469 {
470 fd->loop.v = *collapse_iter;
471 fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
472 fd->loop.n2 = *collapse_count;
473 fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
474 fd->loop.cond_code = LT_EXPR;
475 }
476 }
477
478
479 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
480 is the immediate dominator of PAR_ENTRY_BB, return true if there
481 are no data dependencies that would prevent expanding the parallel
482 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
483
484 When expanding a combined parallel+workshare region, the call to
485 the child function may need additional arguments in the case of
486 GIMPLE_OMP_FOR regions. In some cases, these arguments are
487 computed out of variables passed in from the parent to the child
488 via 'struct .omp_data_s'. For instance:
489
490 #pragma omp parallel for schedule (guided, i * 4)
491 for (j ...)
492
493 Is lowered into:
494
495 # BLOCK 2 (PAR_ENTRY_BB)
496 .omp_data_o.i = i;
497 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
498
499 # BLOCK 3 (WS_ENTRY_BB)
500 .omp_data_i = &.omp_data_o;
501 D.1667 = .omp_data_i->i;
502 D.1598 = D.1667 * 4;
503 #pragma omp for schedule (guided, D.1598)
504
505 When we outline the parallel region, the call to the child function
506 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
507 that value is computed *after* the call site. So, in principle we
508 cannot do the transformation.
509
510 To see whether the code in WS_ENTRY_BB blocks the combined
511 parallel+workshare call, we collect all the variables used in the
512 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
513 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
514 call.
515
516 FIXME. If we had the SSA form built at this point, we could merely
517 hoist the code in block 3 into block 2 and be done with it. But at
518 this point we don't have dataflow information and though we could
519 hack something up here, it is really not worth the aggravation. */
520
521 static bool
522 workshare_safe_to_combine_p (basic_block ws_entry_bb)
523 {
524 struct omp_for_data fd;
525 gimple ws_stmt = last_stmt (ws_entry_bb);
526
527 if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
528 return true;
529
530 gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR);
531
532 extract_omp_for_data (ws_stmt, &fd, NULL);
533
534 if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
535 return false;
536 if (fd.iter_type != long_integer_type_node)
537 return false;
538
539 /* FIXME. We give up too easily here. If any of these arguments
540 are not constants, they will likely involve variables that have
541 been mapped into fields of .omp_data_s for sharing with the child
542 function. With appropriate data flow, it would be possible to
543 see through this. */
544 if (!is_gimple_min_invariant (fd.loop.n1)
545 || !is_gimple_min_invariant (fd.loop.n2)
546 || !is_gimple_min_invariant (fd.loop.step)
547 || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
548 return false;
549
550 return true;
551 }
552
553
554 /* Collect additional arguments needed to emit a combined
555 parallel+workshare call. WS_STMT is the workshare directive being
556 expanded. */
557
558 static tree
559 get_ws_args_for (gimple ws_stmt)
560 {
561 tree t;
562 location_t loc = gimple_location (ws_stmt);
563
564 if (gimple_code (ws_stmt) == GIMPLE_OMP_FOR)
565 {
566 struct omp_for_data fd;
567 tree ws_args;
568
569 extract_omp_for_data (ws_stmt, &fd, NULL);
570
571 ws_args = NULL_TREE;
572 if (fd.chunk_size)
573 {
574 t = fold_convert_loc (loc, long_integer_type_node, fd.chunk_size);
575 ws_args = tree_cons (NULL, t, ws_args);
576 }
577
578 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.step);
579 ws_args = tree_cons (NULL, t, ws_args);
580
581 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n2);
582 ws_args = tree_cons (NULL, t, ws_args);
583
584 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n1);
585 ws_args = tree_cons (NULL, t, ws_args);
586
587 return ws_args;
588 }
589 else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
590 {
591 /* Number of sections is equal to the number of edges from the
592 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
593 the exit of the sections region. */
594 basic_block bb = single_succ (gimple_bb (ws_stmt));
595 t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
596 t = tree_cons (NULL, t, NULL);
597 return t;
598 }
599
600 gcc_unreachable ();
601 }
602
603
604 /* Discover whether REGION is a combined parallel+workshare region. */
605
606 static void
607 determine_parallel_type (struct omp_region *region)
608 {
609 basic_block par_entry_bb, par_exit_bb;
610 basic_block ws_entry_bb, ws_exit_bb;
611
612 if (region == NULL || region->inner == NULL
613 || region->exit == NULL || region->inner->exit == NULL
614 || region->inner->cont == NULL)
615 return;
616
617 /* We only support parallel+for and parallel+sections. */
618 if (region->type != GIMPLE_OMP_PARALLEL
619 || (region->inner->type != GIMPLE_OMP_FOR
620 && region->inner->type != GIMPLE_OMP_SECTIONS))
621 return;
622
623 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
624 WS_EXIT_BB -> PAR_EXIT_BB. */
625 par_entry_bb = region->entry;
626 par_exit_bb = region->exit;
627 ws_entry_bb = region->inner->entry;
628 ws_exit_bb = region->inner->exit;
629
630 if (single_succ (par_entry_bb) == ws_entry_bb
631 && single_succ (ws_exit_bb) == par_exit_bb
632 && workshare_safe_to_combine_p (ws_entry_bb)
633 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb))
634 || (last_and_only_stmt (ws_entry_bb)
635 && last_and_only_stmt (par_exit_bb))))
636 {
637 gimple ws_stmt = last_stmt (ws_entry_bb);
638
639 if (region->inner->type == GIMPLE_OMP_FOR)
640 {
641 /* If this is a combined parallel loop, we need to determine
642 whether or not to use the combined library calls. There
643 are two cases where we do not apply the transformation:
644 static loops and any kind of ordered loop. In the first
645 case, we already open code the loop so there is no need
646 to do anything else. In the latter case, the combined
647 parallel loop call would still need extra synchronization
648 to implement ordered semantics, so there would not be any
649 gain in using the combined call. */
650 tree clauses = gimple_omp_for_clauses (ws_stmt);
651 tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
652 if (c == NULL
653 || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC
654 || find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
655 {
656 region->is_combined_parallel = false;
657 region->inner->is_combined_parallel = false;
658 return;
659 }
660 }
661
662 region->is_combined_parallel = true;
663 region->inner->is_combined_parallel = true;
664 region->ws_args = get_ws_args_for (ws_stmt);
665 }
666 }
667
668
669 /* Return true if EXPR is variable sized. */
670
671 static inline bool
672 is_variable_sized (const_tree expr)
673 {
674 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
675 }
676
677 /* Return true if DECL is a reference type. */
678
679 static inline bool
680 is_reference (tree decl)
681 {
682 return lang_hooks.decls.omp_privatize_by_reference (decl);
683 }
684
685 /* Lookup variables in the decl or field splay trees. The "maybe" form
686 allows for the variable form to not have been entered, otherwise we
687 assert that the variable must have been entered. */
688
689 static inline tree
690 lookup_decl (tree var, omp_context *ctx)
691 {
692 tree *n;
693 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
694 return *n;
695 }
696
697 static inline tree
698 maybe_lookup_decl (const_tree var, omp_context *ctx)
699 {
700 tree *n;
701 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
702 return n ? *n : NULL_TREE;
703 }
704
705 static inline tree
706 lookup_field (tree var, omp_context *ctx)
707 {
708 splay_tree_node n;
709 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
710 return (tree) n->value;
711 }
712
713 static inline tree
714 lookup_sfield (tree var, omp_context *ctx)
715 {
716 splay_tree_node n;
717 n = splay_tree_lookup (ctx->sfield_map
718 ? ctx->sfield_map : ctx->field_map,
719 (splay_tree_key) var);
720 return (tree) n->value;
721 }
722
723 static inline tree
724 maybe_lookup_field (tree var, omp_context *ctx)
725 {
726 splay_tree_node n;
727 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
728 return n ? (tree) n->value : NULL_TREE;
729 }
730
731 /* Return true if DECL should be copied by pointer. SHARED_CTX is
732 the parallel context if DECL is to be shared. */
733
734 static bool
735 use_pointer_for_field (tree decl, omp_context *shared_ctx)
736 {
737 if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
738 return true;
739
740 /* We can only use copy-in/copy-out semantics for shared variables
741 when we know the value is not accessible from an outer scope. */
742 if (shared_ctx)
743 {
744 /* ??? Trivially accessible from anywhere. But why would we even
745 be passing an address in this case? Should we simply assert
746 this to be false, or should we have a cleanup pass that removes
747 these from the list of mappings? */
748 if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
749 return true;
750
751 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
752 without analyzing the expression whether or not its location
753 is accessible to anyone else. In the case of nested parallel
754 regions it certainly may be. */
755 if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
756 return true;
757
758 /* Do not use copy-in/copy-out for variables that have their
759 address taken. */
760 if (TREE_ADDRESSABLE (decl))
761 return true;
762
763 /* Disallow copy-in/out in nested parallel if
764 decl is shared in outer parallel, otherwise
765 each thread could store the shared variable
766 in its own copy-in location, making the
767 variable no longer really shared. */
768 if (!TREE_READONLY (decl) && shared_ctx->is_nested)
769 {
770 omp_context *up;
771
772 for (up = shared_ctx->outer; up; up = up->outer)
773 if (is_taskreg_ctx (up) && maybe_lookup_decl (decl, up))
774 break;
775
776 if (up)
777 {
778 tree c;
779
780 for (c = gimple_omp_taskreg_clauses (up->stmt);
781 c; c = OMP_CLAUSE_CHAIN (c))
782 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
783 && OMP_CLAUSE_DECL (c) == decl)
784 break;
785
786 if (c)
787 return true;
788 }
789 }
790
791 /* For tasks avoid using copy-in/out, unless they are readonly
792 (in which case just copy-in is used). As tasks can be
793 deferred or executed in different thread, when GOMP_task
794 returns, the task hasn't necessarily terminated. */
795 if (!TREE_READONLY (decl) && is_task_ctx (shared_ctx))
796 {
797 tree outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
798 if (is_gimple_reg (outer))
799 {
800 /* Taking address of OUTER in lower_send_shared_vars
801 might need regimplification of everything that uses the
802 variable. */
803 if (!task_shared_vars)
804 task_shared_vars = BITMAP_ALLOC (NULL);
805 bitmap_set_bit (task_shared_vars, DECL_UID (outer));
806 TREE_ADDRESSABLE (outer) = 1;
807 }
808 return true;
809 }
810 }
811
812 return false;
813 }
814
815 /* Create a new VAR_DECL and copy information from VAR to it. */
816
817 tree
818 copy_var_decl (tree var, tree name, tree type)
819 {
820 tree copy = build_decl (DECL_SOURCE_LOCATION (var), VAR_DECL, name, type);
821
822 TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (var);
823 TREE_THIS_VOLATILE (copy) = TREE_THIS_VOLATILE (var);
824 DECL_GIMPLE_REG_P (copy) = DECL_GIMPLE_REG_P (var);
825 DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (var);
826 DECL_IGNORED_P (copy) = DECL_IGNORED_P (var);
827 DECL_CONTEXT (copy) = DECL_CONTEXT (var);
828 TREE_USED (copy) = 1;
829 DECL_SEEN_IN_BIND_EXPR_P (copy) = 1;
830
831 return copy;
832 }
833
834 /* Construct a new automatic decl similar to VAR. */
835
836 static tree
837 omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
838 {
839 tree copy = copy_var_decl (var, name, type);
840
841 DECL_CONTEXT (copy) = current_function_decl;
842 TREE_CHAIN (copy) = ctx->block_vars;
843 ctx->block_vars = copy;
844
845 return copy;
846 }
847
848 static tree
849 omp_copy_decl_1 (tree var, omp_context *ctx)
850 {
851 return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
852 }
853
854 /* Build tree nodes to access the field for VAR on the receiver side. */
855
856 static tree
857 build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
858 {
859 tree x, field = lookup_field (var, ctx);
860
861 /* If the receiver record type was remapped in the child function,
862 remap the field into the new record type. */
863 x = maybe_lookup_field (field, ctx);
864 if (x != NULL)
865 field = x;
866
867 x = build_simple_mem_ref (ctx->receiver_decl);
868 x = build3 (COMPONENT_REF, TREE_TYPE (field), x, field, NULL);
869 if (by_ref)
870 x = build_simple_mem_ref (x);
871
872 return x;
873 }
874
875 /* Build tree nodes to access VAR in the scope outer to CTX. In the case
876 of a parallel, this is a component reference; for workshare constructs
877 this is some variable. */
878
879 static tree
880 build_outer_var_ref (tree var, omp_context *ctx)
881 {
882 tree x;
883
884 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
885 x = var;
886 else if (is_variable_sized (var))
887 {
888 x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
889 x = build_outer_var_ref (x, ctx);
890 x = build_simple_mem_ref (x);
891 }
892 else if (is_taskreg_ctx (ctx))
893 {
894 bool by_ref = use_pointer_for_field (var, NULL);
895 x = build_receiver_ref (var, by_ref, ctx);
896 }
897 else if (ctx->outer)
898 x = lookup_decl (var, ctx->outer);
899 else if (is_reference (var))
900 /* This can happen with orphaned constructs. If var is reference, it is
901 possible it is shared and as such valid. */
902 x = var;
903 else
904 gcc_unreachable ();
905
906 if (is_reference (var))
907 x = build_simple_mem_ref (x);
908
909 return x;
910 }
911
912 /* Build tree nodes to access the field for VAR on the sender side. */
913
914 static tree
915 build_sender_ref (tree var, omp_context *ctx)
916 {
917 tree field = lookup_sfield (var, ctx);
918 return build3 (COMPONENT_REF, TREE_TYPE (field),
919 ctx->sender_decl, field, NULL);
920 }
921
922 /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
923
924 static void
925 install_var_field (tree var, bool by_ref, int mask, omp_context *ctx)
926 {
927 tree field, type, sfield = NULL_TREE;
928
929 gcc_assert ((mask & 1) == 0
930 || !splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
931 gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
932 || !splay_tree_lookup (ctx->sfield_map, (splay_tree_key) var));
933
934 type = TREE_TYPE (var);
935 if (by_ref)
936 type = build_pointer_type (type);
937 else if ((mask & 3) == 1 && is_reference (var))
938 type = TREE_TYPE (type);
939
940 field = build_decl (DECL_SOURCE_LOCATION (var),
941 FIELD_DECL, DECL_NAME (var), type);
942
943 /* Remember what variable this field was created for. This does have a
944 side effect of making dwarf2out ignore this member, so for helpful
945 debugging we clear it later in delete_omp_context. */
946 DECL_ABSTRACT_ORIGIN (field) = var;
947 if (type == TREE_TYPE (var))
948 {
949 DECL_ALIGN (field) = DECL_ALIGN (var);
950 DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
951 TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
952 }
953 else
954 DECL_ALIGN (field) = TYPE_ALIGN (type);
955
956 if ((mask & 3) == 3)
957 {
958 insert_field_into_struct (ctx->record_type, field);
959 if (ctx->srecord_type)
960 {
961 sfield = build_decl (DECL_SOURCE_LOCATION (var),
962 FIELD_DECL, DECL_NAME (var), type);
963 DECL_ABSTRACT_ORIGIN (sfield) = var;
964 DECL_ALIGN (sfield) = DECL_ALIGN (field);
965 DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
966 TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
967 insert_field_into_struct (ctx->srecord_type, sfield);
968 }
969 }
970 else
971 {
972 if (ctx->srecord_type == NULL_TREE)
973 {
974 tree t;
975
976 ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
977 ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
978 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
979 {
980 sfield = build_decl (DECL_SOURCE_LOCATION (var),
981 FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
982 DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
983 insert_field_into_struct (ctx->srecord_type, sfield);
984 splay_tree_insert (ctx->sfield_map,
985 (splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
986 (splay_tree_value) sfield);
987 }
988 }
989 sfield = field;
990 insert_field_into_struct ((mask & 1) ? ctx->record_type
991 : ctx->srecord_type, field);
992 }
993
994 if (mask & 1)
995 splay_tree_insert (ctx->field_map, (splay_tree_key) var,
996 (splay_tree_value) field);
997 if ((mask & 2) && ctx->sfield_map)
998 splay_tree_insert (ctx->sfield_map, (splay_tree_key) var,
999 (splay_tree_value) sfield);
1000 }
1001
1002 static tree
1003 install_var_local (tree var, omp_context *ctx)
1004 {
1005 tree new_var = omp_copy_decl_1 (var, ctx);
1006 insert_decl_map (&ctx->cb, var, new_var);
1007 return new_var;
1008 }
1009
1010 /* Adjust the replacement for DECL in CTX for the new context. This means
1011 copying the DECL_VALUE_EXPR, and fixing up the type. */
1012
1013 static void
1014 fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
1015 {
1016 tree new_decl, size;
1017
1018 new_decl = lookup_decl (decl, ctx);
1019
1020 TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
1021
1022 if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
1023 && DECL_HAS_VALUE_EXPR_P (decl))
1024 {
1025 tree ve = DECL_VALUE_EXPR (decl);
1026 walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL);
1027 SET_DECL_VALUE_EXPR (new_decl, ve);
1028 DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
1029 }
1030
1031 if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
1032 {
1033 size = remap_decl (DECL_SIZE (decl), &ctx->cb);
1034 if (size == error_mark_node)
1035 size = TYPE_SIZE (TREE_TYPE (new_decl));
1036 DECL_SIZE (new_decl) = size;
1037
1038 size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
1039 if (size == error_mark_node)
1040 size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
1041 DECL_SIZE_UNIT (new_decl) = size;
1042 }
1043 }
1044
1045 /* The callback for remap_decl. Search all containing contexts for a
1046 mapping of the variable; this avoids having to duplicate the splay
1047 tree ahead of time. We know a mapping doesn't already exist in the
1048 given context. Create new mappings to implement default semantics. */
1049
1050 static tree
1051 omp_copy_decl (tree var, copy_body_data *cb)
1052 {
1053 omp_context *ctx = (omp_context *) cb;
1054 tree new_var;
1055
1056 if (TREE_CODE (var) == LABEL_DECL)
1057 {
1058 new_var = create_artificial_label (DECL_SOURCE_LOCATION (var));
1059 DECL_CONTEXT (new_var) = current_function_decl;
1060 insert_decl_map (&ctx->cb, var, new_var);
1061 return new_var;
1062 }
1063
1064 while (!is_taskreg_ctx (ctx))
1065 {
1066 ctx = ctx->outer;
1067 if (ctx == NULL)
1068 return var;
1069 new_var = maybe_lookup_decl (var, ctx);
1070 if (new_var)
1071 return new_var;
1072 }
1073
1074 if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
1075 return var;
1076
1077 return error_mark_node;
1078 }
1079
1080
1081 /* Return the parallel region associated with STMT. */
1082
1083 /* Debugging dumps for parallel regions. */
1084 void dump_omp_region (FILE *, struct omp_region *, int);
1085 void debug_omp_region (struct omp_region *);
1086 void debug_all_omp_regions (void);
1087
1088 /* Dump the parallel region tree rooted at REGION. */
1089
1090 void
1091 dump_omp_region (FILE *file, struct omp_region *region, int indent)
1092 {
1093 fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
1094 gimple_code_name[region->type]);
1095
1096 if (region->inner)
1097 dump_omp_region (file, region->inner, indent + 4);
1098
1099 if (region->cont)
1100 {
1101 fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
1102 region->cont->index);
1103 }
1104
1105 if (region->exit)
1106 fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
1107 region->exit->index);
1108 else
1109 fprintf (file, "%*s[no exit marker]\n", indent, "");
1110
1111 if (region->next)
1112 dump_omp_region (file, region->next, indent);
1113 }
1114
1115 DEBUG_FUNCTION void
1116 debug_omp_region (struct omp_region *region)
1117 {
1118 dump_omp_region (stderr, region, 0);
1119 }
1120
1121 DEBUG_FUNCTION void
1122 debug_all_omp_regions (void)
1123 {
1124 dump_omp_region (stderr, root_omp_region, 0);
1125 }
1126
1127
1128 /* Create a new parallel region starting at STMT inside region PARENT. */
1129
1130 struct omp_region *
1131 new_omp_region (basic_block bb, enum gimple_code type,
1132 struct omp_region *parent)
1133 {
1134 struct omp_region *region = XCNEW (struct omp_region);
1135
1136 region->outer = parent;
1137 region->entry = bb;
1138 region->type = type;
1139
1140 if (parent)
1141 {
1142 /* This is a nested region. Add it to the list of inner
1143 regions in PARENT. */
1144 region->next = parent->inner;
1145 parent->inner = region;
1146 }
1147 else
1148 {
1149 /* This is a toplevel region. Add it to the list of toplevel
1150 regions in ROOT_OMP_REGION. */
1151 region->next = root_omp_region;
1152 root_omp_region = region;
1153 }
1154
1155 return region;
1156 }
1157
1158 /* Release the memory associated with the region tree rooted at REGION. */
1159
1160 static void
1161 free_omp_region_1 (struct omp_region *region)
1162 {
1163 struct omp_region *i, *n;
1164
1165 for (i = region->inner; i ; i = n)
1166 {
1167 n = i->next;
1168 free_omp_region_1 (i);
1169 }
1170
1171 free (region);
1172 }
1173
1174 /* Release the memory for the entire omp region tree. */
1175
1176 void
1177 free_omp_regions (void)
1178 {
1179 struct omp_region *r, *n;
1180 for (r = root_omp_region; r ; r = n)
1181 {
1182 n = r->next;
1183 free_omp_region_1 (r);
1184 }
1185 root_omp_region = NULL;
1186 }
1187
1188
1189 /* Create a new context, with OUTER_CTX being the surrounding context. */
1190
1191 static omp_context *
1192 new_omp_context (gimple stmt, omp_context *outer_ctx)
1193 {
1194 omp_context *ctx = XCNEW (omp_context);
1195
1196 splay_tree_insert (all_contexts, (splay_tree_key) stmt,
1197 (splay_tree_value) ctx);
1198 ctx->stmt = stmt;
1199
1200 if (outer_ctx)
1201 {
1202 ctx->outer = outer_ctx;
1203 ctx->cb = outer_ctx->cb;
1204 ctx->cb.block = NULL;
1205 ctx->depth = outer_ctx->depth + 1;
1206 }
1207 else
1208 {
1209 ctx->cb.src_fn = current_function_decl;
1210 ctx->cb.dst_fn = current_function_decl;
1211 ctx->cb.src_node = cgraph_node (current_function_decl);
1212 ctx->cb.dst_node = ctx->cb.src_node;
1213 ctx->cb.src_cfun = cfun;
1214 ctx->cb.copy_decl = omp_copy_decl;
1215 ctx->cb.eh_lp_nr = 0;
1216 ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
1217 ctx->depth = 1;
1218 }
1219
1220 ctx->cb.decl_map = pointer_map_create ();
1221
1222 return ctx;
1223 }
1224
1225 static gimple_seq maybe_catch_exception (gimple_seq);
1226
1227 /* Finalize task copyfn. */
1228
1229 static void
1230 finalize_task_copyfn (gimple task_stmt)
1231 {
1232 struct function *child_cfun;
1233 tree child_fn, old_fn;
1234 gimple_seq seq, new_seq;
1235 gimple bind;
1236
1237 child_fn = gimple_omp_task_copy_fn (task_stmt);
1238 if (child_fn == NULL_TREE)
1239 return;
1240
1241 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
1242
1243 /* Inform the callgraph about the new function. */
1244 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
1245 = cfun->curr_properties;
1246
1247 old_fn = current_function_decl;
1248 push_cfun (child_cfun);
1249 current_function_decl = child_fn;
1250 bind = gimplify_body (&DECL_SAVED_TREE (child_fn), child_fn, false);
1251 seq = gimple_seq_alloc ();
1252 gimple_seq_add_stmt (&seq, bind);
1253 new_seq = maybe_catch_exception (seq);
1254 if (new_seq != seq)
1255 {
1256 bind = gimple_build_bind (NULL, new_seq, NULL);
1257 seq = gimple_seq_alloc ();
1258 gimple_seq_add_stmt (&seq, bind);
1259 }
1260 gimple_set_body (child_fn, seq);
1261 pop_cfun ();
1262 current_function_decl = old_fn;
1263
1264 cgraph_add_new_function (child_fn, false);
1265 }
1266
1267 /* Destroy a omp_context data structures. Called through the splay tree
1268 value delete callback. */
1269
1270 static void
1271 delete_omp_context (splay_tree_value value)
1272 {
1273 omp_context *ctx = (omp_context *) value;
1274
1275 pointer_map_destroy (ctx->cb.decl_map);
1276
1277 if (ctx->field_map)
1278 splay_tree_delete (ctx->field_map);
1279 if (ctx->sfield_map)
1280 splay_tree_delete (ctx->sfield_map);
1281
1282 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1283 it produces corrupt debug information. */
1284 if (ctx->record_type)
1285 {
1286 tree t;
1287 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
1288 DECL_ABSTRACT_ORIGIN (t) = NULL;
1289 }
1290 if (ctx->srecord_type)
1291 {
1292 tree t;
1293 for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = TREE_CHAIN (t))
1294 DECL_ABSTRACT_ORIGIN (t) = NULL;
1295 }
1296
1297 if (is_task_ctx (ctx))
1298 finalize_task_copyfn (ctx->stmt);
1299
1300 XDELETE (ctx);
1301 }
1302
1303 /* Fix up RECEIVER_DECL with a type that has been remapped to the child
1304 context. */
1305
1306 static void
1307 fixup_child_record_type (omp_context *ctx)
1308 {
1309 tree f, type = ctx->record_type;
1310
1311 /* ??? It isn't sufficient to just call remap_type here, because
1312 variably_modified_type_p doesn't work the way we expect for
1313 record types. Testing each field for whether it needs remapping
1314 and creating a new record by hand works, however. */
1315 for (f = TYPE_FIELDS (type); f ; f = TREE_CHAIN (f))
1316 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
1317 break;
1318 if (f)
1319 {
1320 tree name, new_fields = NULL;
1321
1322 type = lang_hooks.types.make_type (RECORD_TYPE);
1323 name = DECL_NAME (TYPE_NAME (ctx->record_type));
1324 name = build_decl (DECL_SOURCE_LOCATION (ctx->receiver_decl),
1325 TYPE_DECL, name, type);
1326 TYPE_NAME (type) = name;
1327
1328 for (f = TYPE_FIELDS (ctx->record_type); f ; f = TREE_CHAIN (f))
1329 {
1330 tree new_f = copy_node (f);
1331 DECL_CONTEXT (new_f) = type;
1332 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
1333 TREE_CHAIN (new_f) = new_fields;
1334 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
1335 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
1336 &ctx->cb, NULL);
1337 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
1338 &ctx->cb, NULL);
1339 new_fields = new_f;
1340
1341 /* Arrange to be able to look up the receiver field
1342 given the sender field. */
1343 splay_tree_insert (ctx->field_map, (splay_tree_key) f,
1344 (splay_tree_value) new_f);
1345 }
1346 TYPE_FIELDS (type) = nreverse (new_fields);
1347 layout_type (type);
1348 }
1349
1350 TREE_TYPE (ctx->receiver_decl) = build_pointer_type (type);
1351 }
1352
1353 /* Instantiate decls as necessary in CTX to satisfy the data sharing
1354 specified by CLAUSES. */
1355
1356 static void
1357 scan_sharing_clauses (tree clauses, omp_context *ctx)
1358 {
1359 tree c, decl;
1360 bool scan_array_reductions = false;
1361
1362 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1363 {
1364 bool by_ref;
1365
1366 switch (OMP_CLAUSE_CODE (c))
1367 {
1368 case OMP_CLAUSE_PRIVATE:
1369 decl = OMP_CLAUSE_DECL (c);
1370 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
1371 goto do_private;
1372 else if (!is_variable_sized (decl))
1373 install_var_local (decl, ctx);
1374 break;
1375
1376 case OMP_CLAUSE_SHARED:
1377 gcc_assert (is_taskreg_ctx (ctx));
1378 decl = OMP_CLAUSE_DECL (c);
1379 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
1380 || !is_variable_sized (decl));
1381 /* Global variables don't need to be copied,
1382 the receiver side will use them directly. */
1383 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1384 break;
1385 by_ref = use_pointer_for_field (decl, ctx);
1386 if (! TREE_READONLY (decl)
1387 || TREE_ADDRESSABLE (decl)
1388 || by_ref
1389 || is_reference (decl))
1390 {
1391 install_var_field (decl, by_ref, 3, ctx);
1392 install_var_local (decl, ctx);
1393 break;
1394 }
1395 /* We don't need to copy const scalar vars back. */
1396 OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
1397 goto do_private;
1398
1399 case OMP_CLAUSE_LASTPRIVATE:
1400 /* Let the corresponding firstprivate clause create
1401 the variable. */
1402 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1403 break;
1404 /* FALLTHRU */
1405
1406 case OMP_CLAUSE_FIRSTPRIVATE:
1407 case OMP_CLAUSE_REDUCTION:
1408 decl = OMP_CLAUSE_DECL (c);
1409 do_private:
1410 if (is_variable_sized (decl))
1411 {
1412 if (is_task_ctx (ctx))
1413 install_var_field (decl, false, 1, ctx);
1414 break;
1415 }
1416 else if (is_taskreg_ctx (ctx))
1417 {
1418 bool global
1419 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
1420 by_ref = use_pointer_for_field (decl, NULL);
1421
1422 if (is_task_ctx (ctx)
1423 && (global || by_ref || is_reference (decl)))
1424 {
1425 install_var_field (decl, false, 1, ctx);
1426 if (!global)
1427 install_var_field (decl, by_ref, 2, ctx);
1428 }
1429 else if (!global)
1430 install_var_field (decl, by_ref, 3, ctx);
1431 }
1432 install_var_local (decl, ctx);
1433 break;
1434
1435 case OMP_CLAUSE_COPYPRIVATE:
1436 case OMP_CLAUSE_COPYIN:
1437 decl = OMP_CLAUSE_DECL (c);
1438 by_ref = use_pointer_for_field (decl, NULL);
1439 install_var_field (decl, by_ref, 3, ctx);
1440 break;
1441
1442 case OMP_CLAUSE_DEFAULT:
1443 ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
1444 break;
1445
1446 case OMP_CLAUSE_IF:
1447 case OMP_CLAUSE_NUM_THREADS:
1448 case OMP_CLAUSE_SCHEDULE:
1449 if (ctx->outer)
1450 scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
1451 break;
1452
1453 case OMP_CLAUSE_NOWAIT:
1454 case OMP_CLAUSE_ORDERED:
1455 case OMP_CLAUSE_COLLAPSE:
1456 case OMP_CLAUSE_UNTIED:
1457 break;
1458
1459 default:
1460 gcc_unreachable ();
1461 }
1462 }
1463
1464 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1465 {
1466 switch (OMP_CLAUSE_CODE (c))
1467 {
1468 case OMP_CLAUSE_LASTPRIVATE:
1469 /* Let the corresponding firstprivate clause create
1470 the variable. */
1471 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1472 scan_array_reductions = true;
1473 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1474 break;
1475 /* FALLTHRU */
1476
1477 case OMP_CLAUSE_PRIVATE:
1478 case OMP_CLAUSE_FIRSTPRIVATE:
1479 case OMP_CLAUSE_REDUCTION:
1480 decl = OMP_CLAUSE_DECL (c);
1481 if (is_variable_sized (decl))
1482 install_var_local (decl, ctx);
1483 fixup_remapped_decl (decl, ctx,
1484 OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
1485 && OMP_CLAUSE_PRIVATE_DEBUG (c));
1486 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1487 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1488 scan_array_reductions = true;
1489 break;
1490
1491 case OMP_CLAUSE_SHARED:
1492 decl = OMP_CLAUSE_DECL (c);
1493 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1494 fixup_remapped_decl (decl, ctx, false);
1495 break;
1496
1497 case OMP_CLAUSE_COPYPRIVATE:
1498 case OMP_CLAUSE_COPYIN:
1499 case OMP_CLAUSE_DEFAULT:
1500 case OMP_CLAUSE_IF:
1501 case OMP_CLAUSE_NUM_THREADS:
1502 case OMP_CLAUSE_SCHEDULE:
1503 case OMP_CLAUSE_NOWAIT:
1504 case OMP_CLAUSE_ORDERED:
1505 case OMP_CLAUSE_COLLAPSE:
1506 case OMP_CLAUSE_UNTIED:
1507 break;
1508
1509 default:
1510 gcc_unreachable ();
1511 }
1512 }
1513
1514 if (scan_array_reductions)
1515 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1516 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1517 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1518 {
1519 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
1520 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
1521 }
1522 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
1523 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1524 scan_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
1525 }
1526
1527 /* Create a new name for omp child function. Returns an identifier. */
1528
1529 static GTY(()) unsigned int tmp_ompfn_id_num;
1530
1531 static tree
1532 create_omp_child_function_name (bool task_copy)
1533 {
1534 return (clone_function_name (current_function_decl,
1535 task_copy ? "_omp_cpyfn" : "_omp_fn"));
1536 }
1537
1538 /* Build a decl for the omp child function. It'll not contain a body
1539 yet, just the bare decl. */
1540
1541 static void
1542 create_omp_child_function (omp_context *ctx, bool task_copy)
1543 {
1544 tree decl, type, name, t;
1545
1546 name = create_omp_child_function_name (task_copy);
1547 if (task_copy)
1548 type = build_function_type_list (void_type_node, ptr_type_node,
1549 ptr_type_node, NULL_TREE);
1550 else
1551 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
1552
1553 decl = build_decl (gimple_location (ctx->stmt),
1554 FUNCTION_DECL, name, type);
1555
1556 if (!task_copy)
1557 ctx->cb.dst_fn = decl;
1558 else
1559 gimple_omp_task_set_copy_fn (ctx->stmt, decl);
1560
1561 TREE_STATIC (decl) = 1;
1562 TREE_USED (decl) = 1;
1563 DECL_ARTIFICIAL (decl) = 1;
1564 DECL_IGNORED_P (decl) = 0;
1565 TREE_PUBLIC (decl) = 0;
1566 DECL_UNINLINABLE (decl) = 1;
1567 DECL_EXTERNAL (decl) = 0;
1568 DECL_CONTEXT (decl) = NULL_TREE;
1569 DECL_INITIAL (decl) = make_node (BLOCK);
1570
1571 t = build_decl (DECL_SOURCE_LOCATION (decl),
1572 RESULT_DECL, NULL_TREE, void_type_node);
1573 DECL_ARTIFICIAL (t) = 1;
1574 DECL_IGNORED_P (t) = 1;
1575 DECL_CONTEXT (t) = decl;
1576 DECL_RESULT (decl) = t;
1577
1578 t = build_decl (DECL_SOURCE_LOCATION (decl),
1579 PARM_DECL, get_identifier (".omp_data_i"), ptr_type_node);
1580 DECL_ARTIFICIAL (t) = 1;
1581 DECL_ARG_TYPE (t) = ptr_type_node;
1582 DECL_CONTEXT (t) = current_function_decl;
1583 TREE_USED (t) = 1;
1584 DECL_ARGUMENTS (decl) = t;
1585 if (!task_copy)
1586 ctx->receiver_decl = t;
1587 else
1588 {
1589 t = build_decl (DECL_SOURCE_LOCATION (decl),
1590 PARM_DECL, get_identifier (".omp_data_o"),
1591 ptr_type_node);
1592 DECL_ARTIFICIAL (t) = 1;
1593 DECL_ARG_TYPE (t) = ptr_type_node;
1594 DECL_CONTEXT (t) = current_function_decl;
1595 TREE_USED (t) = 1;
1596 TREE_ADDRESSABLE (t) = 1;
1597 TREE_CHAIN (t) = DECL_ARGUMENTS (decl);
1598 DECL_ARGUMENTS (decl) = t;
1599 }
1600
1601 /* Allocate memory for the function structure. The call to
1602 allocate_struct_function clobbers CFUN, so we need to restore
1603 it afterward. */
1604 push_struct_function (decl);
1605 cfun->function_end_locus = gimple_location (ctx->stmt);
1606 pop_cfun ();
1607 }
1608
1609
1610 /* Scan an OpenMP parallel directive. */
1611
1612 static void
1613 scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1614 {
1615 omp_context *ctx;
1616 tree name;
1617 gimple stmt = gsi_stmt (*gsi);
1618
1619 /* Ignore parallel directives with empty bodies, unless there
1620 are copyin clauses. */
1621 if (optimize > 0
1622 && empty_body_p (gimple_omp_body (stmt))
1623 && find_omp_clause (gimple_omp_parallel_clauses (stmt),
1624 OMP_CLAUSE_COPYIN) == NULL)
1625 {
1626 gsi_replace (gsi, gimple_build_nop (), false);
1627 return;
1628 }
1629
1630 ctx = new_omp_context (stmt, outer_ctx);
1631 if (taskreg_nesting_level > 1)
1632 ctx->is_nested = true;
1633 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1634 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1635 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1636 name = create_tmp_var_name (".omp_data_s");
1637 name = build_decl (gimple_location (stmt),
1638 TYPE_DECL, name, ctx->record_type);
1639 TYPE_NAME (ctx->record_type) = name;
1640 create_omp_child_function (ctx, false);
1641 gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
1642
1643 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx);
1644 scan_omp (gimple_omp_body (stmt), ctx);
1645
1646 if (TYPE_FIELDS (ctx->record_type) == NULL)
1647 ctx->record_type = ctx->receiver_decl = NULL;
1648 else
1649 {
1650 layout_type (ctx->record_type);
1651 fixup_child_record_type (ctx);
1652 }
1653 }
1654
1655 /* Scan an OpenMP task directive. */
1656
1657 static void
1658 scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1659 {
1660 omp_context *ctx;
1661 tree name, t;
1662 gimple stmt = gsi_stmt (*gsi);
1663 location_t loc = gimple_location (stmt);
1664
1665 /* Ignore task directives with empty bodies. */
1666 if (optimize > 0
1667 && empty_body_p (gimple_omp_body (stmt)))
1668 {
1669 gsi_replace (gsi, gimple_build_nop (), false);
1670 return;
1671 }
1672
1673 ctx = new_omp_context (stmt, outer_ctx);
1674 if (taskreg_nesting_level > 1)
1675 ctx->is_nested = true;
1676 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1677 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1678 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1679 name = create_tmp_var_name (".omp_data_s");
1680 name = build_decl (gimple_location (stmt),
1681 TYPE_DECL, name, ctx->record_type);
1682 TYPE_NAME (ctx->record_type) = name;
1683 create_omp_child_function (ctx, false);
1684 gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
1685
1686 scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
1687
1688 if (ctx->srecord_type)
1689 {
1690 name = create_tmp_var_name (".omp_data_a");
1691 name = build_decl (gimple_location (stmt),
1692 TYPE_DECL, name, ctx->srecord_type);
1693 TYPE_NAME (ctx->srecord_type) = name;
1694 create_omp_child_function (ctx, true);
1695 }
1696
1697 scan_omp (gimple_omp_body (stmt), ctx);
1698
1699 if (TYPE_FIELDS (ctx->record_type) == NULL)
1700 {
1701 ctx->record_type = ctx->receiver_decl = NULL;
1702 t = build_int_cst (long_integer_type_node, 0);
1703 gimple_omp_task_set_arg_size (stmt, t);
1704 t = build_int_cst (long_integer_type_node, 1);
1705 gimple_omp_task_set_arg_align (stmt, t);
1706 }
1707 else
1708 {
1709 tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
1710 /* Move VLA fields to the end. */
1711 p = &TYPE_FIELDS (ctx->record_type);
1712 while (*p)
1713 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
1714 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
1715 {
1716 *q = *p;
1717 *p = TREE_CHAIN (*p);
1718 TREE_CHAIN (*q) = NULL_TREE;
1719 q = &TREE_CHAIN (*q);
1720 }
1721 else
1722 p = &TREE_CHAIN (*p);
1723 *p = vla_fields;
1724 layout_type (ctx->record_type);
1725 fixup_child_record_type (ctx);
1726 if (ctx->srecord_type)
1727 layout_type (ctx->srecord_type);
1728 t = fold_convert_loc (loc, long_integer_type_node,
1729 TYPE_SIZE_UNIT (ctx->record_type));
1730 gimple_omp_task_set_arg_size (stmt, t);
1731 t = build_int_cst (long_integer_type_node,
1732 TYPE_ALIGN_UNIT (ctx->record_type));
1733 gimple_omp_task_set_arg_align (stmt, t);
1734 }
1735 }
1736
1737
1738 /* Scan an OpenMP loop directive. */
1739
1740 static void
1741 scan_omp_for (gimple stmt, omp_context *outer_ctx)
1742 {
1743 omp_context *ctx;
1744 size_t i;
1745
1746 ctx = new_omp_context (stmt, outer_ctx);
1747
1748 scan_sharing_clauses (gimple_omp_for_clauses (stmt), ctx);
1749
1750 scan_omp (gimple_omp_for_pre_body (stmt), ctx);
1751 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
1752 {
1753 scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx);
1754 scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx);
1755 scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx);
1756 scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx);
1757 }
1758 scan_omp (gimple_omp_body (stmt), ctx);
1759 }
1760
1761 /* Scan an OpenMP sections directive. */
1762
1763 static void
1764 scan_omp_sections (gimple stmt, omp_context *outer_ctx)
1765 {
1766 omp_context *ctx;
1767
1768 ctx = new_omp_context (stmt, outer_ctx);
1769 scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx);
1770 scan_omp (gimple_omp_body (stmt), ctx);
1771 }
1772
1773 /* Scan an OpenMP single directive. */
1774
1775 static void
1776 scan_omp_single (gimple stmt, omp_context *outer_ctx)
1777 {
1778 omp_context *ctx;
1779 tree name;
1780
1781 ctx = new_omp_context (stmt, outer_ctx);
1782 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1783 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1784 name = create_tmp_var_name (".omp_copy_s");
1785 name = build_decl (gimple_location (stmt),
1786 TYPE_DECL, name, ctx->record_type);
1787 TYPE_NAME (ctx->record_type) = name;
1788
1789 scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx);
1790 scan_omp (gimple_omp_body (stmt), ctx);
1791
1792 if (TYPE_FIELDS (ctx->record_type) == NULL)
1793 ctx->record_type = NULL;
1794 else
1795 layout_type (ctx->record_type);
1796 }
1797
1798
1799 /* Check OpenMP nesting restrictions. */
1800 static void
1801 check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
1802 {
1803 switch (gimple_code (stmt))
1804 {
1805 case GIMPLE_OMP_FOR:
1806 case GIMPLE_OMP_SECTIONS:
1807 case GIMPLE_OMP_SINGLE:
1808 case GIMPLE_CALL:
1809 for (; ctx != NULL; ctx = ctx->outer)
1810 switch (gimple_code (ctx->stmt))
1811 {
1812 case GIMPLE_OMP_FOR:
1813 case GIMPLE_OMP_SECTIONS:
1814 case GIMPLE_OMP_SINGLE:
1815 case GIMPLE_OMP_ORDERED:
1816 case GIMPLE_OMP_MASTER:
1817 case GIMPLE_OMP_TASK:
1818 if (is_gimple_call (stmt))
1819 {
1820 warning (0, "barrier region may not be closely nested inside "
1821 "of work-sharing, critical, ordered, master or "
1822 "explicit task region");
1823 return;
1824 }
1825 warning (0, "work-sharing region may not be closely nested inside "
1826 "of work-sharing, critical, ordered, master or explicit "
1827 "task region");
1828 return;
1829 case GIMPLE_OMP_PARALLEL:
1830 return;
1831 default:
1832 break;
1833 }
1834 break;
1835 case GIMPLE_OMP_MASTER:
1836 for (; ctx != NULL; ctx = ctx->outer)
1837 switch (gimple_code (ctx->stmt))
1838 {
1839 case GIMPLE_OMP_FOR:
1840 case GIMPLE_OMP_SECTIONS:
1841 case GIMPLE_OMP_SINGLE:
1842 case GIMPLE_OMP_TASK:
1843 warning (0, "master region may not be closely nested inside "
1844 "of work-sharing or explicit task region");
1845 return;
1846 case GIMPLE_OMP_PARALLEL:
1847 return;
1848 default:
1849 break;
1850 }
1851 break;
1852 case GIMPLE_OMP_ORDERED:
1853 for (; ctx != NULL; ctx = ctx->outer)
1854 switch (gimple_code (ctx->stmt))
1855 {
1856 case GIMPLE_OMP_CRITICAL:
1857 case GIMPLE_OMP_TASK:
1858 warning (0, "ordered region may not be closely nested inside "
1859 "of critical or explicit task region");
1860 return;
1861 case GIMPLE_OMP_FOR:
1862 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
1863 OMP_CLAUSE_ORDERED) == NULL)
1864 warning (0, "ordered region must be closely nested inside "
1865 "a loop region with an ordered clause");
1866 return;
1867 case GIMPLE_OMP_PARALLEL:
1868 return;
1869 default:
1870 break;
1871 }
1872 break;
1873 case GIMPLE_OMP_CRITICAL:
1874 for (; ctx != NULL; ctx = ctx->outer)
1875 if (gimple_code (ctx->stmt) == GIMPLE_OMP_CRITICAL
1876 && (gimple_omp_critical_name (stmt)
1877 == gimple_omp_critical_name (ctx->stmt)))
1878 {
1879 warning (0, "critical region may not be nested inside a critical "
1880 "region with the same name");
1881 return;
1882 }
1883 break;
1884 default:
1885 break;
1886 }
1887 }
1888
1889
1890 /* Helper function scan_omp.
1891
1892 Callback for walk_tree or operators in walk_gimple_stmt used to
1893 scan for OpenMP directives in TP. */
1894
1895 static tree
1896 scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
1897 {
1898 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
1899 omp_context *ctx = (omp_context *) wi->info;
1900 tree t = *tp;
1901
1902 switch (TREE_CODE (t))
1903 {
1904 case VAR_DECL:
1905 case PARM_DECL:
1906 case LABEL_DECL:
1907 case RESULT_DECL:
1908 if (ctx)
1909 *tp = remap_decl (t, &ctx->cb);
1910 break;
1911
1912 default:
1913 if (ctx && TYPE_P (t))
1914 *tp = remap_type (t, &ctx->cb);
1915 else if (!DECL_P (t))
1916 {
1917 *walk_subtrees = 1;
1918 if (ctx)
1919 {
1920 tree tem = remap_type (TREE_TYPE (t), &ctx->cb);
1921 if (tem != TREE_TYPE (t))
1922 {
1923 if (TREE_CODE (t) == INTEGER_CST)
1924 *tp = build_int_cst_wide (tem,
1925 TREE_INT_CST_LOW (t),
1926 TREE_INT_CST_HIGH (t));
1927 else
1928 TREE_TYPE (t) = tem;
1929 }
1930 }
1931 }
1932 break;
1933 }
1934
1935 return NULL_TREE;
1936 }
1937
1938
1939 /* Helper function for scan_omp.
1940
1941 Callback for walk_gimple_stmt used to scan for OpenMP directives in
1942 the current statement in GSI. */
1943
1944 static tree
1945 scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
1946 struct walk_stmt_info *wi)
1947 {
1948 gimple stmt = gsi_stmt (*gsi);
1949 omp_context *ctx = (omp_context *) wi->info;
1950
1951 if (gimple_has_location (stmt))
1952 input_location = gimple_location (stmt);
1953
1954 /* Check the OpenMP nesting restrictions. */
1955 if (ctx != NULL)
1956 {
1957 if (is_gimple_omp (stmt))
1958 check_omp_nesting_restrictions (stmt, ctx);
1959 else if (is_gimple_call (stmt))
1960 {
1961 tree fndecl = gimple_call_fndecl (stmt);
1962 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
1963 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
1964 check_omp_nesting_restrictions (stmt, ctx);
1965 }
1966 }
1967
1968 *handled_ops_p = true;
1969
1970 switch (gimple_code (stmt))
1971 {
1972 case GIMPLE_OMP_PARALLEL:
1973 taskreg_nesting_level++;
1974 scan_omp_parallel (gsi, ctx);
1975 taskreg_nesting_level--;
1976 break;
1977
1978 case GIMPLE_OMP_TASK:
1979 taskreg_nesting_level++;
1980 scan_omp_task (gsi, ctx);
1981 taskreg_nesting_level--;
1982 break;
1983
1984 case GIMPLE_OMP_FOR:
1985 scan_omp_for (stmt, ctx);
1986 break;
1987
1988 case GIMPLE_OMP_SECTIONS:
1989 scan_omp_sections (stmt, ctx);
1990 break;
1991
1992 case GIMPLE_OMP_SINGLE:
1993 scan_omp_single (stmt, ctx);
1994 break;
1995
1996 case GIMPLE_OMP_SECTION:
1997 case GIMPLE_OMP_MASTER:
1998 case GIMPLE_OMP_ORDERED:
1999 case GIMPLE_OMP_CRITICAL:
2000 ctx = new_omp_context (stmt, ctx);
2001 scan_omp (gimple_omp_body (stmt), ctx);
2002 break;
2003
2004 case GIMPLE_BIND:
2005 {
2006 tree var;
2007
2008 *handled_ops_p = false;
2009 if (ctx)
2010 for (var = gimple_bind_vars (stmt); var ; var = TREE_CHAIN (var))
2011 insert_decl_map (&ctx->cb, var, var);
2012 }
2013 break;
2014 default:
2015 *handled_ops_p = false;
2016 break;
2017 }
2018
2019 return NULL_TREE;
2020 }
2021
2022
2023 /* Scan all the statements starting at the current statement. CTX
2024 contains context information about the OpenMP directives and
2025 clauses found during the scan. */
2026
2027 static void
2028 scan_omp (gimple_seq body, omp_context *ctx)
2029 {
2030 location_t saved_location;
2031 struct walk_stmt_info wi;
2032
2033 memset (&wi, 0, sizeof (wi));
2034 wi.info = ctx;
2035 wi.want_locations = true;
2036
2037 saved_location = input_location;
2038 walk_gimple_seq (body, scan_omp_1_stmt, scan_omp_1_op, &wi);
2039 input_location = saved_location;
2040 }
2041 \f
2042 /* Re-gimplification and code generation routines. */
2043
2044 /* Build a call to GOMP_barrier. */
2045
2046 static tree
2047 build_omp_barrier (void)
2048 {
2049 return build_call_expr (built_in_decls[BUILT_IN_GOMP_BARRIER], 0);
2050 }
2051
2052 /* If a context was created for STMT when it was scanned, return it. */
2053
2054 static omp_context *
2055 maybe_lookup_ctx (gimple stmt)
2056 {
2057 splay_tree_node n;
2058 n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
2059 return n ? (omp_context *) n->value : NULL;
2060 }
2061
2062
2063 /* Find the mapping for DECL in CTX or the immediately enclosing
2064 context that has a mapping for DECL.
2065
2066 If CTX is a nested parallel directive, we may have to use the decl
2067 mappings created in CTX's parent context. Suppose that we have the
2068 following parallel nesting (variable UIDs showed for clarity):
2069
2070 iD.1562 = 0;
2071 #omp parallel shared(iD.1562) -> outer parallel
2072 iD.1562 = iD.1562 + 1;
2073
2074 #omp parallel shared (iD.1562) -> inner parallel
2075 iD.1562 = iD.1562 - 1;
2076
2077 Each parallel structure will create a distinct .omp_data_s structure
2078 for copying iD.1562 in/out of the directive:
2079
2080 outer parallel .omp_data_s.1.i -> iD.1562
2081 inner parallel .omp_data_s.2.i -> iD.1562
2082
2083 A shared variable mapping will produce a copy-out operation before
2084 the parallel directive and a copy-in operation after it. So, in
2085 this case we would have:
2086
2087 iD.1562 = 0;
2088 .omp_data_o.1.i = iD.1562;
2089 #omp parallel shared(iD.1562) -> outer parallel
2090 .omp_data_i.1 = &.omp_data_o.1
2091 .omp_data_i.1->i = .omp_data_i.1->i + 1;
2092
2093 .omp_data_o.2.i = iD.1562; -> **
2094 #omp parallel shared(iD.1562) -> inner parallel
2095 .omp_data_i.2 = &.omp_data_o.2
2096 .omp_data_i.2->i = .omp_data_i.2->i - 1;
2097
2098
2099 ** This is a problem. The symbol iD.1562 cannot be referenced
2100 inside the body of the outer parallel region. But since we are
2101 emitting this copy operation while expanding the inner parallel
2102 directive, we need to access the CTX structure of the outer
2103 parallel directive to get the correct mapping:
2104
2105 .omp_data_o.2.i = .omp_data_i.1->i
2106
2107 Since there may be other workshare or parallel directives enclosing
2108 the parallel directive, it may be necessary to walk up the context
2109 parent chain. This is not a problem in general because nested
2110 parallelism happens only rarely. */
2111
2112 static tree
2113 lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2114 {
2115 tree t;
2116 omp_context *up;
2117
2118 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2119 t = maybe_lookup_decl (decl, up);
2120
2121 gcc_assert (!ctx->is_nested || t || is_global_var (decl));
2122
2123 return t ? t : decl;
2124 }
2125
2126
2127 /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
2128 in outer contexts. */
2129
2130 static tree
2131 maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2132 {
2133 tree t = NULL;
2134 omp_context *up;
2135
2136 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2137 t = maybe_lookup_decl (decl, up);
2138
2139 return t ? t : decl;
2140 }
2141
2142
2143 /* Construct the initialization value for reduction CLAUSE. */
2144
2145 tree
2146 omp_reduction_init (tree clause, tree type)
2147 {
2148 location_t loc = OMP_CLAUSE_LOCATION (clause);
2149 switch (OMP_CLAUSE_REDUCTION_CODE (clause))
2150 {
2151 case PLUS_EXPR:
2152 case MINUS_EXPR:
2153 case BIT_IOR_EXPR:
2154 case BIT_XOR_EXPR:
2155 case TRUTH_OR_EXPR:
2156 case TRUTH_ORIF_EXPR:
2157 case TRUTH_XOR_EXPR:
2158 case NE_EXPR:
2159 return fold_convert_loc (loc, type, integer_zero_node);
2160
2161 case MULT_EXPR:
2162 case TRUTH_AND_EXPR:
2163 case TRUTH_ANDIF_EXPR:
2164 case EQ_EXPR:
2165 return fold_convert_loc (loc, type, integer_one_node);
2166
2167 case BIT_AND_EXPR:
2168 return fold_convert_loc (loc, type, integer_minus_one_node);
2169
2170 case MAX_EXPR:
2171 if (SCALAR_FLOAT_TYPE_P (type))
2172 {
2173 REAL_VALUE_TYPE max, min;
2174 if (HONOR_INFINITIES (TYPE_MODE (type)))
2175 {
2176 real_inf (&max);
2177 real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
2178 }
2179 else
2180 real_maxval (&min, 1, TYPE_MODE (type));
2181 return build_real (type, min);
2182 }
2183 else
2184 {
2185 gcc_assert (INTEGRAL_TYPE_P (type));
2186 return TYPE_MIN_VALUE (type);
2187 }
2188
2189 case MIN_EXPR:
2190 if (SCALAR_FLOAT_TYPE_P (type))
2191 {
2192 REAL_VALUE_TYPE max;
2193 if (HONOR_INFINITIES (TYPE_MODE (type)))
2194 real_inf (&max);
2195 else
2196 real_maxval (&max, 0, TYPE_MODE (type));
2197 return build_real (type, max);
2198 }
2199 else
2200 {
2201 gcc_assert (INTEGRAL_TYPE_P (type));
2202 return TYPE_MAX_VALUE (type);
2203 }
2204
2205 default:
2206 gcc_unreachable ();
2207 }
2208 }
2209
2210 /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
2211 from the receiver (aka child) side and initializers for REFERENCE_TYPE
2212 private variables. Initialization statements go in ILIST, while calls
2213 to destructors go in DLIST. */
2214
2215 static void
2216 lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
2217 omp_context *ctx)
2218 {
2219 gimple_stmt_iterator diter;
2220 tree c, dtor, copyin_seq, x, ptr;
2221 bool copyin_by_ref = false;
2222 bool lastprivate_firstprivate = false;
2223 int pass;
2224
2225 *dlist = gimple_seq_alloc ();
2226 diter = gsi_start (*dlist);
2227 copyin_seq = NULL;
2228
2229 /* Do all the fixed sized types in the first pass, and the variable sized
2230 types in the second pass. This makes sure that the scalar arguments to
2231 the variable sized types are processed before we use them in the
2232 variable sized operations. */
2233 for (pass = 0; pass < 2; ++pass)
2234 {
2235 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2236 {
2237 enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
2238 tree var, new_var;
2239 bool by_ref;
2240 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2241
2242 switch (c_kind)
2243 {
2244 case OMP_CLAUSE_PRIVATE:
2245 if (OMP_CLAUSE_PRIVATE_DEBUG (c))
2246 continue;
2247 break;
2248 case OMP_CLAUSE_SHARED:
2249 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
2250 {
2251 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
2252 continue;
2253 }
2254 case OMP_CLAUSE_FIRSTPRIVATE:
2255 case OMP_CLAUSE_COPYIN:
2256 case OMP_CLAUSE_REDUCTION:
2257 break;
2258 case OMP_CLAUSE_LASTPRIVATE:
2259 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2260 {
2261 lastprivate_firstprivate = true;
2262 if (pass != 0)
2263 continue;
2264 }
2265 break;
2266 default:
2267 continue;
2268 }
2269
2270 new_var = var = OMP_CLAUSE_DECL (c);
2271 if (c_kind != OMP_CLAUSE_COPYIN)
2272 new_var = lookup_decl (var, ctx);
2273
2274 if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
2275 {
2276 if (pass != 0)
2277 continue;
2278 }
2279 else if (is_variable_sized (var))
2280 {
2281 /* For variable sized types, we need to allocate the
2282 actual storage here. Call alloca and store the
2283 result in the pointer decl that we created elsewhere. */
2284 if (pass == 0)
2285 continue;
2286
2287 if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
2288 {
2289 gimple stmt;
2290 tree tmp;
2291
2292 ptr = DECL_VALUE_EXPR (new_var);
2293 gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
2294 ptr = TREE_OPERAND (ptr, 0);
2295 gcc_assert (DECL_P (ptr));
2296 x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
2297
2298 /* void *tmp = __builtin_alloca */
2299 stmt
2300 = gimple_build_call (built_in_decls[BUILT_IN_ALLOCA], 1, x);
2301 tmp = create_tmp_var_raw (ptr_type_node, NULL);
2302 gimple_add_tmp_var (tmp);
2303 gimple_call_set_lhs (stmt, tmp);
2304
2305 gimple_seq_add_stmt (ilist, stmt);
2306
2307 x = fold_convert_loc (clause_loc, TREE_TYPE (ptr), tmp);
2308 gimplify_assign (ptr, x, ilist);
2309 }
2310 }
2311 else if (is_reference (var))
2312 {
2313 /* For references that are being privatized for Fortran,
2314 allocate new backing storage for the new pointer
2315 variable. This allows us to avoid changing all the
2316 code that expects a pointer to something that expects
2317 a direct variable. Note that this doesn't apply to
2318 C++, since reference types are disallowed in data
2319 sharing clauses there, except for NRV optimized
2320 return values. */
2321 if (pass == 0)
2322 continue;
2323
2324 x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
2325 if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
2326 {
2327 x = build_receiver_ref (var, false, ctx);
2328 x = build_fold_addr_expr_loc (clause_loc, x);
2329 }
2330 else if (TREE_CONSTANT (x))
2331 {
2332 const char *name = NULL;
2333 if (DECL_NAME (var))
2334 name = IDENTIFIER_POINTER (DECL_NAME (new_var));
2335
2336 x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
2337 name);
2338 gimple_add_tmp_var (x);
2339 TREE_ADDRESSABLE (x) = 1;
2340 x = build_fold_addr_expr_loc (clause_loc, x);
2341 }
2342 else
2343 {
2344 x = build_call_expr_loc (clause_loc,
2345 built_in_decls[BUILT_IN_ALLOCA], 1, x);
2346 }
2347
2348 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
2349 gimplify_assign (new_var, x, ilist);
2350
2351 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2352 }
2353 else if (c_kind == OMP_CLAUSE_REDUCTION
2354 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2355 {
2356 if (pass == 0)
2357 continue;
2358 }
2359 else if (pass != 0)
2360 continue;
2361
2362 switch (OMP_CLAUSE_CODE (c))
2363 {
2364 case OMP_CLAUSE_SHARED:
2365 /* Shared global vars are just accessed directly. */
2366 if (is_global_var (new_var))
2367 break;
2368 /* Set up the DECL_VALUE_EXPR for shared variables now. This
2369 needs to be delayed until after fixup_child_record_type so
2370 that we get the correct type during the dereference. */
2371 by_ref = use_pointer_for_field (var, ctx);
2372 x = build_receiver_ref (var, by_ref, ctx);
2373 SET_DECL_VALUE_EXPR (new_var, x);
2374 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2375
2376 /* ??? If VAR is not passed by reference, and the variable
2377 hasn't been initialized yet, then we'll get a warning for
2378 the store into the omp_data_s structure. Ideally, we'd be
2379 able to notice this and not store anything at all, but
2380 we're generating code too early. Suppress the warning. */
2381 if (!by_ref)
2382 TREE_NO_WARNING (var) = 1;
2383 break;
2384
2385 case OMP_CLAUSE_LASTPRIVATE:
2386 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2387 break;
2388 /* FALLTHRU */
2389
2390 case OMP_CLAUSE_PRIVATE:
2391 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE)
2392 x = build_outer_var_ref (var, ctx);
2393 else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2394 {
2395 if (is_task_ctx (ctx))
2396 x = build_receiver_ref (var, false, ctx);
2397 else
2398 x = build_outer_var_ref (var, ctx);
2399 }
2400 else
2401 x = NULL;
2402 x = lang_hooks.decls.omp_clause_default_ctor (c, new_var, x);
2403 if (x)
2404 gimplify_and_add (x, ilist);
2405 /* FALLTHRU */
2406
2407 do_dtor:
2408 x = lang_hooks.decls.omp_clause_dtor (c, new_var);
2409 if (x)
2410 {
2411 gimple_seq tseq = NULL;
2412
2413 dtor = x;
2414 gimplify_stmt (&dtor, &tseq);
2415 gsi_insert_seq_before (&diter, tseq, GSI_SAME_STMT);
2416 }
2417 break;
2418
2419 case OMP_CLAUSE_FIRSTPRIVATE:
2420 if (is_task_ctx (ctx))
2421 {
2422 if (is_reference (var) || is_variable_sized (var))
2423 goto do_dtor;
2424 else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var,
2425 ctx))
2426 || use_pointer_for_field (var, NULL))
2427 {
2428 x = build_receiver_ref (var, false, ctx);
2429 SET_DECL_VALUE_EXPR (new_var, x);
2430 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2431 goto do_dtor;
2432 }
2433 }
2434 x = build_outer_var_ref (var, ctx);
2435 x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x);
2436 gimplify_and_add (x, ilist);
2437 goto do_dtor;
2438 break;
2439
2440 case OMP_CLAUSE_COPYIN:
2441 by_ref = use_pointer_for_field (var, NULL);
2442 x = build_receiver_ref (var, by_ref, ctx);
2443 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
2444 append_to_statement_list (x, &copyin_seq);
2445 copyin_by_ref |= by_ref;
2446 break;
2447
2448 case OMP_CLAUSE_REDUCTION:
2449 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2450 {
2451 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2452 x = build_outer_var_ref (var, ctx);
2453
2454 if (is_reference (var))
2455 x = build_fold_addr_expr_loc (clause_loc, x);
2456 SET_DECL_VALUE_EXPR (placeholder, x);
2457 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2458 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
2459 gimple_seq_add_seq (ilist,
2460 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c));
2461 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
2462 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
2463 }
2464 else
2465 {
2466 x = omp_reduction_init (c, TREE_TYPE (new_var));
2467 gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
2468 gimplify_assign (new_var, x, ilist);
2469 }
2470 break;
2471
2472 default:
2473 gcc_unreachable ();
2474 }
2475 }
2476 }
2477
2478 /* The copyin sequence is not to be executed by the main thread, since
2479 that would result in self-copies. Perhaps not visible to scalars,
2480 but it certainly is to C++ operator=. */
2481 if (copyin_seq)
2482 {
2483 x = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
2484 x = build2 (NE_EXPR, boolean_type_node, x,
2485 build_int_cst (TREE_TYPE (x), 0));
2486 x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
2487 gimplify_and_add (x, ilist);
2488 }
2489
2490 /* If any copyin variable is passed by reference, we must ensure the
2491 master thread doesn't modify it before it is copied over in all
2492 threads. Similarly for variables in both firstprivate and
2493 lastprivate clauses we need to ensure the lastprivate copying
2494 happens after firstprivate copying in all threads. */
2495 if (copyin_by_ref || lastprivate_firstprivate)
2496 gimplify_and_add (build_omp_barrier (), ilist);
2497 }
2498
2499
2500 /* Generate code to implement the LASTPRIVATE clauses. This is used for
2501 both parallel and workshare constructs. PREDICATE may be NULL if it's
2502 always true. */
2503
2504 static void
2505 lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *stmt_list,
2506 omp_context *ctx)
2507 {
2508 tree x, c, label = NULL;
2509 bool par_clauses = false;
2510
2511 /* Early exit if there are no lastprivate clauses. */
2512 clauses = find_omp_clause (clauses, OMP_CLAUSE_LASTPRIVATE);
2513 if (clauses == NULL)
2514 {
2515 /* If this was a workshare clause, see if it had been combined
2516 with its parallel. In that case, look for the clauses on the
2517 parallel statement itself. */
2518 if (is_parallel_ctx (ctx))
2519 return;
2520
2521 ctx = ctx->outer;
2522 if (ctx == NULL || !is_parallel_ctx (ctx))
2523 return;
2524
2525 clauses = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2526 OMP_CLAUSE_LASTPRIVATE);
2527 if (clauses == NULL)
2528 return;
2529 par_clauses = true;
2530 }
2531
2532 if (predicate)
2533 {
2534 gimple stmt;
2535 tree label_true, arm1, arm2;
2536
2537 label = create_artificial_label (UNKNOWN_LOCATION);
2538 label_true = create_artificial_label (UNKNOWN_LOCATION);
2539 arm1 = TREE_OPERAND (predicate, 0);
2540 arm2 = TREE_OPERAND (predicate, 1);
2541 gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
2542 gimplify_expr (&arm2, stmt_list, NULL, is_gimple_val, fb_rvalue);
2543 stmt = gimple_build_cond (TREE_CODE (predicate), arm1, arm2,
2544 label_true, label);
2545 gimple_seq_add_stmt (stmt_list, stmt);
2546 gimple_seq_add_stmt (stmt_list, gimple_build_label (label_true));
2547 }
2548
2549 for (c = clauses; c ;)
2550 {
2551 tree var, new_var;
2552 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2553
2554 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
2555 {
2556 var = OMP_CLAUSE_DECL (c);
2557 new_var = lookup_decl (var, ctx);
2558
2559 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
2560 {
2561 lower_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
2562 gimple_seq_add_seq (stmt_list,
2563 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
2564 }
2565 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) = NULL;
2566
2567 x = build_outer_var_ref (var, ctx);
2568 if (is_reference (var))
2569 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2570 x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
2571 gimplify_and_add (x, stmt_list);
2572 }
2573 c = OMP_CLAUSE_CHAIN (c);
2574 if (c == NULL && !par_clauses)
2575 {
2576 /* If this was a workshare clause, see if it had been combined
2577 with its parallel. In that case, continue looking for the
2578 clauses also on the parallel statement itself. */
2579 if (is_parallel_ctx (ctx))
2580 break;
2581
2582 ctx = ctx->outer;
2583 if (ctx == NULL || !is_parallel_ctx (ctx))
2584 break;
2585
2586 c = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2587 OMP_CLAUSE_LASTPRIVATE);
2588 par_clauses = true;
2589 }
2590 }
2591
2592 if (label)
2593 gimple_seq_add_stmt (stmt_list, gimple_build_label (label));
2594 }
2595
2596
2597 /* Generate code to implement the REDUCTION clauses. */
2598
2599 static void
2600 lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
2601 {
2602 gimple_seq sub_seq = NULL;
2603 gimple stmt;
2604 tree x, c;
2605 int count = 0;
2606
2607 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
2608 update in that case, otherwise use a lock. */
2609 for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
2610 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
2611 {
2612 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2613 {
2614 /* Never use OMP_ATOMIC for array reductions. */
2615 count = -1;
2616 break;
2617 }
2618 count++;
2619 }
2620
2621 if (count == 0)
2622 return;
2623
2624 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2625 {
2626 tree var, ref, new_var;
2627 enum tree_code code;
2628 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2629
2630 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
2631 continue;
2632
2633 var = OMP_CLAUSE_DECL (c);
2634 new_var = lookup_decl (var, ctx);
2635 if (is_reference (var))
2636 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2637 ref = build_outer_var_ref (var, ctx);
2638 code = OMP_CLAUSE_REDUCTION_CODE (c);
2639
2640 /* reduction(-:var) sums up the partial results, so it acts
2641 identically to reduction(+:var). */
2642 if (code == MINUS_EXPR)
2643 code = PLUS_EXPR;
2644
2645 if (count == 1)
2646 {
2647 tree addr = build_fold_addr_expr_loc (clause_loc, ref);
2648
2649 addr = save_expr (addr);
2650 ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
2651 x = fold_build2_loc (clause_loc, code, TREE_TYPE (ref), ref, new_var);
2652 x = build2 (OMP_ATOMIC, void_type_node, addr, x);
2653 gimplify_and_add (x, stmt_seqp);
2654 return;
2655 }
2656
2657 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2658 {
2659 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2660
2661 if (is_reference (var))
2662 ref = build_fold_addr_expr_loc (clause_loc, ref);
2663 SET_DECL_VALUE_EXPR (placeholder, ref);
2664 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2665 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
2666 gimple_seq_add_seq (&sub_seq, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
2667 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
2668 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
2669 }
2670 else
2671 {
2672 x = build2 (code, TREE_TYPE (ref), ref, new_var);
2673 ref = build_outer_var_ref (var, ctx);
2674 gimplify_assign (ref, x, &sub_seq);
2675 }
2676 }
2677
2678 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ATOMIC_START], 0);
2679 gimple_seq_add_stmt (stmt_seqp, stmt);
2680
2681 gimple_seq_add_seq (stmt_seqp, sub_seq);
2682
2683 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ATOMIC_END], 0);
2684 gimple_seq_add_stmt (stmt_seqp, stmt);
2685 }
2686
2687
2688 /* Generate code to implement the COPYPRIVATE clauses. */
2689
2690 static void
2691 lower_copyprivate_clauses (tree clauses, gimple_seq *slist, gimple_seq *rlist,
2692 omp_context *ctx)
2693 {
2694 tree c;
2695
2696 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2697 {
2698 tree var, new_var, ref, x;
2699 bool by_ref;
2700 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2701
2702 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
2703 continue;
2704
2705 var = OMP_CLAUSE_DECL (c);
2706 by_ref = use_pointer_for_field (var, NULL);
2707
2708 ref = build_sender_ref (var, ctx);
2709 x = new_var = lookup_decl_in_outer_ctx (var, ctx);
2710 if (by_ref)
2711 {
2712 x = build_fold_addr_expr_loc (clause_loc, new_var);
2713 x = fold_convert_loc (clause_loc, TREE_TYPE (ref), x);
2714 }
2715 gimplify_assign (ref, x, slist);
2716
2717 ref = build_receiver_ref (var, false, ctx);
2718 if (by_ref)
2719 {
2720 ref = fold_convert_loc (clause_loc,
2721 build_pointer_type (TREE_TYPE (new_var)),
2722 ref);
2723 ref = build_fold_indirect_ref_loc (clause_loc, ref);
2724 }
2725 if (is_reference (var))
2726 {
2727 ref = fold_convert_loc (clause_loc, TREE_TYPE (new_var), ref);
2728 ref = build_simple_mem_ref_loc (clause_loc, ref);
2729 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2730 }
2731 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, ref);
2732 gimplify_and_add (x, rlist);
2733 }
2734 }
2735
2736
2737 /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
2738 and REDUCTION from the sender (aka parent) side. */
2739
2740 static void
2741 lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
2742 omp_context *ctx)
2743 {
2744 tree c;
2745
2746 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2747 {
2748 tree val, ref, x, var;
2749 bool by_ref, do_in = false, do_out = false;
2750 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2751
2752 switch (OMP_CLAUSE_CODE (c))
2753 {
2754 case OMP_CLAUSE_PRIVATE:
2755 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2756 break;
2757 continue;
2758 case OMP_CLAUSE_FIRSTPRIVATE:
2759 case OMP_CLAUSE_COPYIN:
2760 case OMP_CLAUSE_LASTPRIVATE:
2761 case OMP_CLAUSE_REDUCTION:
2762 break;
2763 default:
2764 continue;
2765 }
2766
2767 val = OMP_CLAUSE_DECL (c);
2768 var = lookup_decl_in_outer_ctx (val, ctx);
2769
2770 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
2771 && is_global_var (var))
2772 continue;
2773 if (is_variable_sized (val))
2774 continue;
2775 by_ref = use_pointer_for_field (val, NULL);
2776
2777 switch (OMP_CLAUSE_CODE (c))
2778 {
2779 case OMP_CLAUSE_PRIVATE:
2780 case OMP_CLAUSE_FIRSTPRIVATE:
2781 case OMP_CLAUSE_COPYIN:
2782 do_in = true;
2783 break;
2784
2785 case OMP_CLAUSE_LASTPRIVATE:
2786 if (by_ref || is_reference (val))
2787 {
2788 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2789 continue;
2790 do_in = true;
2791 }
2792 else
2793 {
2794 do_out = true;
2795 if (lang_hooks.decls.omp_private_outer_ref (val))
2796 do_in = true;
2797 }
2798 break;
2799
2800 case OMP_CLAUSE_REDUCTION:
2801 do_in = true;
2802 do_out = !(by_ref || is_reference (val));
2803 break;
2804
2805 default:
2806 gcc_unreachable ();
2807 }
2808
2809 if (do_in)
2810 {
2811 ref = build_sender_ref (val, ctx);
2812 x = by_ref ? build_fold_addr_expr_loc (clause_loc, var) : var;
2813 gimplify_assign (ref, x, ilist);
2814 if (is_task_ctx (ctx))
2815 DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL;
2816 }
2817
2818 if (do_out)
2819 {
2820 ref = build_sender_ref (val, ctx);
2821 gimplify_assign (var, ref, olist);
2822 }
2823 }
2824 }
2825
2826 /* Generate code to implement SHARED from the sender (aka parent)
2827 side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
2828 list things that got automatically shared. */
2829
2830 static void
2831 lower_send_shared_vars (gimple_seq *ilist, gimple_seq *olist, omp_context *ctx)
2832 {
2833 tree var, ovar, nvar, f, x, record_type;
2834
2835 if (ctx->record_type == NULL)
2836 return;
2837
2838 record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type;
2839 for (f = TYPE_FIELDS (record_type); f ; f = TREE_CHAIN (f))
2840 {
2841 ovar = DECL_ABSTRACT_ORIGIN (f);
2842 nvar = maybe_lookup_decl (ovar, ctx);
2843 if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
2844 continue;
2845
2846 /* If CTX is a nested parallel directive. Find the immediately
2847 enclosing parallel or workshare construct that contains a
2848 mapping for OVAR. */
2849 var = lookup_decl_in_outer_ctx (ovar, ctx);
2850
2851 if (use_pointer_for_field (ovar, ctx))
2852 {
2853 x = build_sender_ref (ovar, ctx);
2854 var = build_fold_addr_expr (var);
2855 gimplify_assign (x, var, ilist);
2856 }
2857 else
2858 {
2859 x = build_sender_ref (ovar, ctx);
2860 gimplify_assign (x, var, ilist);
2861
2862 if (!TREE_READONLY (var)
2863 /* We don't need to receive a new reference to a result
2864 or parm decl. In fact we may not store to it as we will
2865 invalidate any pending RSO and generate wrong gimple
2866 during inlining. */
2867 && !((TREE_CODE (var) == RESULT_DECL
2868 || TREE_CODE (var) == PARM_DECL)
2869 && DECL_BY_REFERENCE (var)))
2870 {
2871 x = build_sender_ref (ovar, ctx);
2872 gimplify_assign (var, x, olist);
2873 }
2874 }
2875 }
2876 }
2877
2878
2879 /* A convenience function to build an empty GIMPLE_COND with just the
2880 condition. */
2881
2882 static gimple
2883 gimple_build_cond_empty (tree cond)
2884 {
2885 enum tree_code pred_code;
2886 tree lhs, rhs;
2887
2888 gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs);
2889 return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE);
2890 }
2891
2892
2893 /* Build the function calls to GOMP_parallel_start etc to actually
2894 generate the parallel operation. REGION is the parallel region
2895 being expanded. BB is the block where to insert the code. WS_ARGS
2896 will be set if this is a call to a combined parallel+workshare
2897 construct, it contains the list of additional arguments needed by
2898 the workshare construct. */
2899
2900 static void
2901 expand_parallel_call (struct omp_region *region, basic_block bb,
2902 gimple entry_stmt, tree ws_args)
2903 {
2904 tree t, t1, t2, val, cond, c, clauses;
2905 gimple_stmt_iterator gsi;
2906 gimple stmt;
2907 int start_ix;
2908 location_t clause_loc;
2909
2910 clauses = gimple_omp_parallel_clauses (entry_stmt);
2911
2912 /* Determine what flavor of GOMP_parallel_start we will be
2913 emitting. */
2914 start_ix = BUILT_IN_GOMP_PARALLEL_START;
2915 if (is_combined_parallel (region))
2916 {
2917 switch (region->inner->type)
2918 {
2919 case GIMPLE_OMP_FOR:
2920 gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
2921 start_ix = BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START
2922 + (region->inner->sched_kind
2923 == OMP_CLAUSE_SCHEDULE_RUNTIME
2924 ? 3 : region->inner->sched_kind);
2925 break;
2926 case GIMPLE_OMP_SECTIONS:
2927 start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS_START;
2928 break;
2929 default:
2930 gcc_unreachable ();
2931 }
2932 }
2933
2934 /* By default, the value of NUM_THREADS is zero (selected at run time)
2935 and there is no conditional. */
2936 cond = NULL_TREE;
2937 val = build_int_cst (unsigned_type_node, 0);
2938
2939 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
2940 if (c)
2941 cond = OMP_CLAUSE_IF_EXPR (c);
2942
2943 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
2944 if (c)
2945 {
2946 val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
2947 clause_loc = OMP_CLAUSE_LOCATION (c);
2948 }
2949 else
2950 clause_loc = gimple_location (entry_stmt);
2951
2952 /* Ensure 'val' is of the correct type. */
2953 val = fold_convert_loc (clause_loc, unsigned_type_node, val);
2954
2955 /* If we found the clause 'if (cond)', build either
2956 (cond != 0) or (cond ? val : 1u). */
2957 if (cond)
2958 {
2959 gimple_stmt_iterator gsi;
2960
2961 cond = gimple_boolify (cond);
2962
2963 if (integer_zerop (val))
2964 val = fold_build2_loc (clause_loc,
2965 EQ_EXPR, unsigned_type_node, cond,
2966 build_int_cst (TREE_TYPE (cond), 0));
2967 else
2968 {
2969 basic_block cond_bb, then_bb, else_bb;
2970 edge e, e_then, e_else;
2971 tree tmp_then, tmp_else, tmp_join, tmp_var;
2972
2973 tmp_var = create_tmp_var (TREE_TYPE (val), NULL);
2974 if (gimple_in_ssa_p (cfun))
2975 {
2976 tmp_then = make_ssa_name (tmp_var, NULL);
2977 tmp_else = make_ssa_name (tmp_var, NULL);
2978 tmp_join = make_ssa_name (tmp_var, NULL);
2979 }
2980 else
2981 {
2982 tmp_then = tmp_var;
2983 tmp_else = tmp_var;
2984 tmp_join = tmp_var;
2985 }
2986
2987 e = split_block (bb, NULL);
2988 cond_bb = e->src;
2989 bb = e->dest;
2990 remove_edge (e);
2991
2992 then_bb = create_empty_bb (cond_bb);
2993 else_bb = create_empty_bb (then_bb);
2994 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
2995 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
2996
2997 stmt = gimple_build_cond_empty (cond);
2998 gsi = gsi_start_bb (cond_bb);
2999 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3000
3001 gsi = gsi_start_bb (then_bb);
3002 stmt = gimple_build_assign (tmp_then, val);
3003 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3004
3005 gsi = gsi_start_bb (else_bb);
3006 stmt = gimple_build_assign
3007 (tmp_else, build_int_cst (unsigned_type_node, 1));
3008 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3009
3010 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
3011 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
3012 e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
3013 e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
3014
3015 if (gimple_in_ssa_p (cfun))
3016 {
3017 gimple phi = create_phi_node (tmp_join, bb);
3018 SSA_NAME_DEF_STMT (tmp_join) = phi;
3019 add_phi_arg (phi, tmp_then, e_then, UNKNOWN_LOCATION);
3020 add_phi_arg (phi, tmp_else, e_else, UNKNOWN_LOCATION);
3021 }
3022
3023 val = tmp_join;
3024 }
3025
3026 gsi = gsi_start_bb (bb);
3027 val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE,
3028 false, GSI_CONTINUE_LINKING);
3029 }
3030
3031 gsi = gsi_last_bb (bb);
3032 t = gimple_omp_parallel_data_arg (entry_stmt);
3033 if (t == NULL)
3034 t1 = null_pointer_node;
3035 else
3036 t1 = build_fold_addr_expr (t);
3037 t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
3038
3039 if (ws_args)
3040 {
3041 tree args = tree_cons (NULL, t2,
3042 tree_cons (NULL, t1,
3043 tree_cons (NULL, val, ws_args)));
3044 t = build_function_call_expr (UNKNOWN_LOCATION,
3045 built_in_decls[start_ix], args);
3046 }
3047 else
3048 t = build_call_expr (built_in_decls[start_ix], 3, t2, t1, val);
3049
3050 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3051 false, GSI_CONTINUE_LINKING);
3052
3053 t = gimple_omp_parallel_data_arg (entry_stmt);
3054 if (t == NULL)
3055 t = null_pointer_node;
3056 else
3057 t = build_fold_addr_expr (t);
3058 t = build_call_expr_loc (gimple_location (entry_stmt),
3059 gimple_omp_parallel_child_fn (entry_stmt), 1, t);
3060 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3061 false, GSI_CONTINUE_LINKING);
3062
3063 t = build_call_expr_loc (gimple_location (entry_stmt),
3064 built_in_decls[BUILT_IN_GOMP_PARALLEL_END], 0);
3065 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3066 false, GSI_CONTINUE_LINKING);
3067 }
3068
3069
3070 /* Build the function call to GOMP_task to actually
3071 generate the task operation. BB is the block where to insert the code. */
3072
3073 static void
3074 expand_task_call (basic_block bb, gimple entry_stmt)
3075 {
3076 tree t, t1, t2, t3, flags, cond, c, clauses;
3077 gimple_stmt_iterator gsi;
3078 location_t loc = gimple_location (entry_stmt);
3079
3080 clauses = gimple_omp_task_clauses (entry_stmt);
3081
3082 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
3083 if (c)
3084 cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (c));
3085 else
3086 cond = boolean_true_node;
3087
3088 c = find_omp_clause (clauses, OMP_CLAUSE_UNTIED);
3089 flags = build_int_cst (unsigned_type_node, (c ? 1 : 0));
3090
3091 gsi = gsi_last_bb (bb);
3092 t = gimple_omp_task_data_arg (entry_stmt);
3093 if (t == NULL)
3094 t2 = null_pointer_node;
3095 else
3096 t2 = build_fold_addr_expr_loc (loc, t);
3097 t1 = build_fold_addr_expr_loc (loc, gimple_omp_task_child_fn (entry_stmt));
3098 t = gimple_omp_task_copy_fn (entry_stmt);
3099 if (t == NULL)
3100 t3 = null_pointer_node;
3101 else
3102 t3 = build_fold_addr_expr_loc (loc, t);
3103
3104 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_TASK], 7, t1, t2, t3,
3105 gimple_omp_task_arg_size (entry_stmt),
3106 gimple_omp_task_arg_align (entry_stmt), cond, flags);
3107
3108 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3109 false, GSI_CONTINUE_LINKING);
3110 }
3111
3112
3113 /* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
3114 catch handler and return it. This prevents programs from violating the
3115 structured block semantics with throws. */
3116
3117 static gimple_seq
3118 maybe_catch_exception (gimple_seq body)
3119 {
3120 gimple g;
3121 tree decl;
3122
3123 if (!flag_exceptions)
3124 return body;
3125
3126 if (lang_hooks.eh_protect_cleanup_actions != NULL)
3127 decl = lang_hooks.eh_protect_cleanup_actions ();
3128 else
3129 decl = built_in_decls[BUILT_IN_TRAP];
3130
3131 g = gimple_build_eh_must_not_throw (decl);
3132 g = gimple_build_try (body, gimple_seq_alloc_with_stmt (g),
3133 GIMPLE_TRY_CATCH);
3134
3135 return gimple_seq_alloc_with_stmt (g);
3136 }
3137
3138 /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
3139
3140 static tree
3141 list2chain (tree list)
3142 {
3143 tree t;
3144
3145 for (t = list; t; t = TREE_CHAIN (t))
3146 {
3147 tree var = TREE_VALUE (t);
3148 if (TREE_CHAIN (t))
3149 TREE_CHAIN (var) = TREE_VALUE (TREE_CHAIN (t));
3150 else
3151 TREE_CHAIN (var) = NULL_TREE;
3152 }
3153
3154 return list ? TREE_VALUE (list) : NULL_TREE;
3155 }
3156
3157
3158 /* Remove barriers in REGION->EXIT's block. Note that this is only
3159 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
3160 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
3161 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
3162 removed. */
3163
3164 static void
3165 remove_exit_barrier (struct omp_region *region)
3166 {
3167 gimple_stmt_iterator gsi;
3168 basic_block exit_bb;
3169 edge_iterator ei;
3170 edge e;
3171 gimple stmt;
3172 int any_addressable_vars = -1;
3173
3174 exit_bb = region->exit;
3175
3176 /* If the parallel region doesn't return, we don't have REGION->EXIT
3177 block at all. */
3178 if (! exit_bb)
3179 return;
3180
3181 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
3182 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
3183 statements that can appear in between are extremely limited -- no
3184 memory operations at all. Here, we allow nothing at all, so the
3185 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
3186 gsi = gsi_last_bb (exit_bb);
3187 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3188 gsi_prev (&gsi);
3189 if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
3190 return;
3191
3192 FOR_EACH_EDGE (e, ei, exit_bb->preds)
3193 {
3194 gsi = gsi_last_bb (e->src);
3195 if (gsi_end_p (gsi))
3196 continue;
3197 stmt = gsi_stmt (gsi);
3198 if (gimple_code (stmt) == GIMPLE_OMP_RETURN
3199 && !gimple_omp_return_nowait_p (stmt))
3200 {
3201 /* OpenMP 3.0 tasks unfortunately prevent this optimization
3202 in many cases. If there could be tasks queued, the barrier
3203 might be needed to let the tasks run before some local
3204 variable of the parallel that the task uses as shared
3205 runs out of scope. The task can be spawned either
3206 from within current function (this would be easy to check)
3207 or from some function it calls and gets passed an address
3208 of such a variable. */
3209 if (any_addressable_vars < 0)
3210 {
3211 gimple parallel_stmt = last_stmt (region->entry);
3212 tree child_fun = gimple_omp_parallel_child_fn (parallel_stmt);
3213 tree local_decls = DECL_STRUCT_FUNCTION (child_fun)->local_decls;
3214 tree block;
3215
3216 any_addressable_vars = 0;
3217 for (; local_decls; local_decls = TREE_CHAIN (local_decls))
3218 if (TREE_ADDRESSABLE (TREE_VALUE (local_decls)))
3219 {
3220 any_addressable_vars = 1;
3221 break;
3222 }
3223 for (block = gimple_block (stmt);
3224 !any_addressable_vars
3225 && block
3226 && TREE_CODE (block) == BLOCK;
3227 block = BLOCK_SUPERCONTEXT (block))
3228 {
3229 for (local_decls = BLOCK_VARS (block);
3230 local_decls;
3231 local_decls = TREE_CHAIN (local_decls))
3232 if (TREE_ADDRESSABLE (local_decls))
3233 {
3234 any_addressable_vars = 1;
3235 break;
3236 }
3237 if (block == gimple_block (parallel_stmt))
3238 break;
3239 }
3240 }
3241 if (!any_addressable_vars)
3242 gimple_omp_return_set_nowait (stmt);
3243 }
3244 }
3245 }
3246
3247 static void
3248 remove_exit_barriers (struct omp_region *region)
3249 {
3250 if (region->type == GIMPLE_OMP_PARALLEL)
3251 remove_exit_barrier (region);
3252
3253 if (region->inner)
3254 {
3255 region = region->inner;
3256 remove_exit_barriers (region);
3257 while (region->next)
3258 {
3259 region = region->next;
3260 remove_exit_barriers (region);
3261 }
3262 }
3263 }
3264
3265 /* Optimize omp_get_thread_num () and omp_get_num_threads ()
3266 calls. These can't be declared as const functions, but
3267 within one parallel body they are constant, so they can be
3268 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
3269 which are declared const. Similarly for task body, except
3270 that in untied task omp_get_thread_num () can change at any task
3271 scheduling point. */
3272
3273 static void
3274 optimize_omp_library_calls (gimple entry_stmt)
3275 {
3276 basic_block bb;
3277 gimple_stmt_iterator gsi;
3278 tree thr_num_id
3279 = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM]);
3280 tree num_thr_id
3281 = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS]);
3282 bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK
3283 && find_omp_clause (gimple_omp_task_clauses (entry_stmt),
3284 OMP_CLAUSE_UNTIED) != NULL);
3285
3286 FOR_EACH_BB (bb)
3287 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3288 {
3289 gimple call = gsi_stmt (gsi);
3290 tree decl;
3291
3292 if (is_gimple_call (call)
3293 && (decl = gimple_call_fndecl (call))
3294 && DECL_EXTERNAL (decl)
3295 && TREE_PUBLIC (decl)
3296 && DECL_INITIAL (decl) == NULL)
3297 {
3298 tree built_in;
3299
3300 if (DECL_NAME (decl) == thr_num_id)
3301 {
3302 /* In #pragma omp task untied omp_get_thread_num () can change
3303 during the execution of the task region. */
3304 if (untied_task)
3305 continue;
3306 built_in = built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM];
3307 }
3308 else if (DECL_NAME (decl) == num_thr_id)
3309 built_in = built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS];
3310 else
3311 continue;
3312
3313 if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
3314 || gimple_call_num_args (call) != 0)
3315 continue;
3316
3317 if (flag_exceptions && !TREE_NOTHROW (decl))
3318 continue;
3319
3320 if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
3321 || !types_compatible_p (TREE_TYPE (TREE_TYPE (decl)),
3322 TREE_TYPE (TREE_TYPE (built_in))))
3323 continue;
3324
3325 gimple_call_set_fndecl (call, built_in);
3326 }
3327 }
3328 }
3329
3330 /* Expand the OpenMP parallel or task directive starting at REGION. */
3331
3332 static void
3333 expand_omp_taskreg (struct omp_region *region)
3334 {
3335 basic_block entry_bb, exit_bb, new_bb;
3336 struct function *child_cfun;
3337 tree child_fn, block, t, ws_args, *tp;
3338 tree save_current;
3339 gimple_stmt_iterator gsi;
3340 gimple entry_stmt, stmt;
3341 edge e;
3342
3343 entry_stmt = last_stmt (region->entry);
3344 child_fn = gimple_omp_taskreg_child_fn (entry_stmt);
3345 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
3346 /* If this function has been already instrumented, make sure
3347 the child function isn't instrumented again. */
3348 child_cfun->after_tree_profile = cfun->after_tree_profile;
3349
3350 entry_bb = region->entry;
3351 exit_bb = region->exit;
3352
3353 if (is_combined_parallel (region))
3354 ws_args = region->ws_args;
3355 else
3356 ws_args = NULL_TREE;
3357
3358 if (child_cfun->cfg)
3359 {
3360 /* Due to inlining, it may happen that we have already outlined
3361 the region, in which case all we need to do is make the
3362 sub-graph unreachable and emit the parallel call. */
3363 edge entry_succ_e, exit_succ_e;
3364 gimple_stmt_iterator gsi;
3365
3366 entry_succ_e = single_succ_edge (entry_bb);
3367
3368 gsi = gsi_last_bb (entry_bb);
3369 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL
3370 || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK);
3371 gsi_remove (&gsi, true);
3372
3373 new_bb = entry_bb;
3374 if (exit_bb)
3375 {
3376 exit_succ_e = single_succ_edge (exit_bb);
3377 make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
3378 }
3379 remove_edge_and_dominated_blocks (entry_succ_e);
3380 }
3381 else
3382 {
3383 /* If the parallel region needs data sent from the parent
3384 function, then the very first statement (except possible
3385 tree profile counter updates) of the parallel body
3386 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
3387 &.OMP_DATA_O is passed as an argument to the child function,
3388 we need to replace it with the argument as seen by the child
3389 function.
3390
3391 In most cases, this will end up being the identity assignment
3392 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
3393 a function call that has been inlined, the original PARM_DECL
3394 .OMP_DATA_I may have been converted into a different local
3395 variable. In which case, we need to keep the assignment. */
3396 if (gimple_omp_taskreg_data_arg (entry_stmt))
3397 {
3398 basic_block entry_succ_bb = single_succ (entry_bb);
3399 gimple_stmt_iterator gsi;
3400 tree arg, narg;
3401 gimple parcopy_stmt = NULL;
3402
3403 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
3404 {
3405 gimple stmt;
3406
3407 gcc_assert (!gsi_end_p (gsi));
3408 stmt = gsi_stmt (gsi);
3409 if (gimple_code (stmt) != GIMPLE_ASSIGN)
3410 continue;
3411
3412 if (gimple_num_ops (stmt) == 2)
3413 {
3414 tree arg = gimple_assign_rhs1 (stmt);
3415
3416 /* We're ignore the subcode because we're
3417 effectively doing a STRIP_NOPS. */
3418
3419 if (TREE_CODE (arg) == ADDR_EXPR
3420 && TREE_OPERAND (arg, 0)
3421 == gimple_omp_taskreg_data_arg (entry_stmt))
3422 {
3423 parcopy_stmt = stmt;
3424 break;
3425 }
3426 }
3427 }
3428
3429 gcc_assert (parcopy_stmt != NULL);
3430 arg = DECL_ARGUMENTS (child_fn);
3431
3432 if (!gimple_in_ssa_p (cfun))
3433 {
3434 if (gimple_assign_lhs (parcopy_stmt) == arg)
3435 gsi_remove (&gsi, true);
3436 else
3437 {
3438 /* ?? Is setting the subcode really necessary ?? */
3439 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg));
3440 gimple_assign_set_rhs1 (parcopy_stmt, arg);
3441 }
3442 }
3443 else
3444 {
3445 /* If we are in ssa form, we must load the value from the default
3446 definition of the argument. That should not be defined now,
3447 since the argument is not used uninitialized. */
3448 gcc_assert (gimple_default_def (cfun, arg) == NULL);
3449 narg = make_ssa_name (arg, gimple_build_nop ());
3450 set_default_def (arg, narg);
3451 /* ?? Is setting the subcode really necessary ?? */
3452 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (narg));
3453 gimple_assign_set_rhs1 (parcopy_stmt, narg);
3454 update_stmt (parcopy_stmt);
3455 }
3456 }
3457
3458 /* Declare local variables needed in CHILD_CFUN. */
3459 block = DECL_INITIAL (child_fn);
3460 BLOCK_VARS (block) = list2chain (child_cfun->local_decls);
3461 /* The gimplifier could record temporaries in parallel/task block
3462 rather than in containing function's local_decls chain,
3463 which would mean cgraph missed finalizing them. Do it now. */
3464 for (t = BLOCK_VARS (block); t; t = TREE_CHAIN (t))
3465 if (TREE_CODE (t) == VAR_DECL
3466 && TREE_STATIC (t)
3467 && !DECL_EXTERNAL (t))
3468 varpool_finalize_decl (t);
3469 DECL_SAVED_TREE (child_fn) = NULL;
3470 gimple_set_body (child_fn, bb_seq (single_succ (entry_bb)));
3471 TREE_USED (block) = 1;
3472
3473 /* Reset DECL_CONTEXT on function arguments. */
3474 for (t = DECL_ARGUMENTS (child_fn); t; t = TREE_CHAIN (t))
3475 DECL_CONTEXT (t) = child_fn;
3476
3477 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
3478 so that it can be moved to the child function. */
3479 gsi = gsi_last_bb (entry_bb);
3480 stmt = gsi_stmt (gsi);
3481 gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
3482 || gimple_code (stmt) == GIMPLE_OMP_TASK));
3483 gsi_remove (&gsi, true);
3484 e = split_block (entry_bb, stmt);
3485 entry_bb = e->dest;
3486 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
3487
3488 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
3489 if (exit_bb)
3490 {
3491 gsi = gsi_last_bb (exit_bb);
3492 gcc_assert (!gsi_end_p (gsi)
3493 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3494 stmt = gimple_build_return (NULL);
3495 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
3496 gsi_remove (&gsi, true);
3497 }
3498
3499 /* Move the parallel region into CHILD_CFUN. */
3500
3501 if (gimple_in_ssa_p (cfun))
3502 {
3503 push_cfun (child_cfun);
3504 init_tree_ssa (child_cfun);
3505 init_ssa_operands ();
3506 cfun->gimple_df->in_ssa_p = true;
3507 pop_cfun ();
3508 block = NULL_TREE;
3509 }
3510 else
3511 block = gimple_block (entry_stmt);
3512
3513 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
3514 if (exit_bb)
3515 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
3516
3517 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
3518 for (tp = &child_cfun->local_decls; *tp; )
3519 if (DECL_CONTEXT (TREE_VALUE (*tp)) != cfun->decl)
3520 tp = &TREE_CHAIN (*tp);
3521 else
3522 *tp = TREE_CHAIN (*tp);
3523
3524 /* Inform the callgraph about the new function. */
3525 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
3526 = cfun->curr_properties;
3527 cgraph_add_new_function (child_fn, true);
3528
3529 /* Fix the callgraph edges for child_cfun. Those for cfun will be
3530 fixed in a following pass. */
3531 push_cfun (child_cfun);
3532 save_current = current_function_decl;
3533 current_function_decl = child_fn;
3534 if (optimize)
3535 optimize_omp_library_calls (entry_stmt);
3536 rebuild_cgraph_edges ();
3537
3538 /* Some EH regions might become dead, see PR34608. If
3539 pass_cleanup_cfg isn't the first pass to happen with the
3540 new child, these dead EH edges might cause problems.
3541 Clean them up now. */
3542 if (flag_exceptions)
3543 {
3544 basic_block bb;
3545 bool changed = false;
3546
3547 FOR_EACH_BB (bb)
3548 changed |= gimple_purge_dead_eh_edges (bb);
3549 if (changed)
3550 cleanup_tree_cfg ();
3551 }
3552 if (gimple_in_ssa_p (cfun))
3553 update_ssa (TODO_update_ssa);
3554 current_function_decl = save_current;
3555 pop_cfun ();
3556 }
3557
3558 /* Emit a library call to launch the children threads. */
3559 if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
3560 expand_parallel_call (region, new_bb, entry_stmt, ws_args);
3561 else
3562 expand_task_call (new_bb, entry_stmt);
3563 update_ssa (TODO_update_ssa_only_virtuals);
3564 }
3565
3566
3567 /* A subroutine of expand_omp_for. Generate code for a parallel
3568 loop with any schedule. Given parameters:
3569
3570 for (V = N1; V cond N2; V += STEP) BODY;
3571
3572 where COND is "<" or ">", we generate pseudocode
3573
3574 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
3575 if (more) goto L0; else goto L3;
3576 L0:
3577 V = istart0;
3578 iend = iend0;
3579 L1:
3580 BODY;
3581 V += STEP;
3582 if (V cond iend) goto L1; else goto L2;
3583 L2:
3584 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3585 L3:
3586
3587 If this is a combined omp parallel loop, instead of the call to
3588 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
3589
3590 For collapsed loops, given parameters:
3591 collapse(3)
3592 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
3593 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
3594 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
3595 BODY;
3596
3597 we generate pseudocode
3598
3599 if (cond3 is <)
3600 adj = STEP3 - 1;
3601 else
3602 adj = STEP3 + 1;
3603 count3 = (adj + N32 - N31) / STEP3;
3604 if (cond2 is <)
3605 adj = STEP2 - 1;
3606 else
3607 adj = STEP2 + 1;
3608 count2 = (adj + N22 - N21) / STEP2;
3609 if (cond1 is <)
3610 adj = STEP1 - 1;
3611 else
3612 adj = STEP1 + 1;
3613 count1 = (adj + N12 - N11) / STEP1;
3614 count = count1 * count2 * count3;
3615 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
3616 if (more) goto L0; else goto L3;
3617 L0:
3618 V = istart0;
3619 T = V;
3620 V3 = N31 + (T % count3) * STEP3;
3621 T = T / count3;
3622 V2 = N21 + (T % count2) * STEP2;
3623 T = T / count2;
3624 V1 = N11 + T * STEP1;
3625 iend = iend0;
3626 L1:
3627 BODY;
3628 V += 1;
3629 if (V < iend) goto L10; else goto L2;
3630 L10:
3631 V3 += STEP3;
3632 if (V3 cond3 N32) goto L1; else goto L11;
3633 L11:
3634 V3 = N31;
3635 V2 += STEP2;
3636 if (V2 cond2 N22) goto L1; else goto L12;
3637 L12:
3638 V2 = N21;
3639 V1 += STEP1;
3640 goto L1;
3641 L2:
3642 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3643 L3:
3644
3645 */
3646
3647 static void
3648 expand_omp_for_generic (struct omp_region *region,
3649 struct omp_for_data *fd,
3650 enum built_in_function start_fn,
3651 enum built_in_function next_fn)
3652 {
3653 tree type, istart0, iend0, iend;
3654 tree t, vmain, vback, bias = NULL_TREE;
3655 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb;
3656 basic_block l2_bb = NULL, l3_bb = NULL;
3657 gimple_stmt_iterator gsi;
3658 gimple stmt;
3659 bool in_combined_parallel = is_combined_parallel (region);
3660 bool broken_loop = region->cont == NULL;
3661 edge e, ne;
3662 tree *counts = NULL;
3663 int i;
3664
3665 gcc_assert (!broken_loop || !in_combined_parallel);
3666 gcc_assert (fd->iter_type == long_integer_type_node
3667 || !in_combined_parallel);
3668
3669 type = TREE_TYPE (fd->loop.v);
3670 istart0 = create_tmp_var (fd->iter_type, ".istart0");
3671 iend0 = create_tmp_var (fd->iter_type, ".iend0");
3672 TREE_ADDRESSABLE (istart0) = 1;
3673 TREE_ADDRESSABLE (iend0) = 1;
3674 if (gimple_in_ssa_p (cfun))
3675 {
3676 add_referenced_var (istart0);
3677 add_referenced_var (iend0);
3678 }
3679
3680 /* See if we need to bias by LLONG_MIN. */
3681 if (fd->iter_type == long_long_unsigned_type_node
3682 && TREE_CODE (type) == INTEGER_TYPE
3683 && !TYPE_UNSIGNED (type))
3684 {
3685 tree n1, n2;
3686
3687 if (fd->loop.cond_code == LT_EXPR)
3688 {
3689 n1 = fd->loop.n1;
3690 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
3691 }
3692 else
3693 {
3694 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
3695 n2 = fd->loop.n1;
3696 }
3697 if (TREE_CODE (n1) != INTEGER_CST
3698 || TREE_CODE (n2) != INTEGER_CST
3699 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
3700 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
3701 }
3702
3703 entry_bb = region->entry;
3704 cont_bb = region->cont;
3705 collapse_bb = NULL;
3706 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
3707 gcc_assert (broken_loop
3708 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
3709 l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
3710 l1_bb = single_succ (l0_bb);
3711 if (!broken_loop)
3712 {
3713 l2_bb = create_empty_bb (cont_bb);
3714 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb);
3715 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
3716 }
3717 else
3718 l2_bb = NULL;
3719 l3_bb = BRANCH_EDGE (entry_bb)->dest;
3720 exit_bb = region->exit;
3721
3722 gsi = gsi_last_bb (entry_bb);
3723
3724 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
3725 if (fd->collapse > 1)
3726 {
3727 /* collapsed loops need work for expansion in SSA form. */
3728 gcc_assert (!gimple_in_ssa_p (cfun));
3729 counts = (tree *) alloca (fd->collapse * sizeof (tree));
3730 for (i = 0; i < fd->collapse; i++)
3731 {
3732 tree itype = TREE_TYPE (fd->loops[i].v);
3733
3734 if (POINTER_TYPE_P (itype))
3735 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
3736 t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR
3737 ? -1 : 1));
3738 t = fold_build2 (PLUS_EXPR, itype,
3739 fold_convert (itype, fd->loops[i].step), t);
3740 t = fold_build2 (PLUS_EXPR, itype, t,
3741 fold_convert (itype, fd->loops[i].n2));
3742 t = fold_build2 (MINUS_EXPR, itype, t,
3743 fold_convert (itype, fd->loops[i].n1));
3744 if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
3745 t = fold_build2 (TRUNC_DIV_EXPR, itype,
3746 fold_build1 (NEGATE_EXPR, itype, t),
3747 fold_build1 (NEGATE_EXPR, itype,
3748 fold_convert (itype,
3749 fd->loops[i].step)));
3750 else
3751 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
3752 fold_convert (itype, fd->loops[i].step));
3753 t = fold_convert (type, t);
3754 if (TREE_CODE (t) == INTEGER_CST)
3755 counts[i] = t;
3756 else
3757 {
3758 counts[i] = create_tmp_var (type, ".count");
3759 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3760 true, GSI_SAME_STMT);
3761 stmt = gimple_build_assign (counts[i], t);
3762 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3763 }
3764 if (SSA_VAR_P (fd->loop.n2))
3765 {
3766 if (i == 0)
3767 t = counts[0];
3768 else
3769 {
3770 t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]);
3771 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3772 true, GSI_SAME_STMT);
3773 }
3774 stmt = gimple_build_assign (fd->loop.n2, t);
3775 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3776 }
3777 }
3778 }
3779 if (in_combined_parallel)
3780 {
3781 /* In a combined parallel loop, emit a call to
3782 GOMP_loop_foo_next. */
3783 t = build_call_expr (built_in_decls[next_fn], 2,
3784 build_fold_addr_expr (istart0),
3785 build_fold_addr_expr (iend0));
3786 }
3787 else
3788 {
3789 tree t0, t1, t2, t3, t4;
3790 /* If this is not a combined parallel loop, emit a call to
3791 GOMP_loop_foo_start in ENTRY_BB. */
3792 t4 = build_fold_addr_expr (iend0);
3793 t3 = build_fold_addr_expr (istart0);
3794 t2 = fold_convert (fd->iter_type, fd->loop.step);
3795 if (POINTER_TYPE_P (type)
3796 && TYPE_PRECISION (type) != TYPE_PRECISION (fd->iter_type))
3797 {
3798 /* Avoid casting pointers to integer of a different size. */
3799 tree itype
3800 = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
3801 t1 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n2));
3802 t0 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n1));
3803 }
3804 else
3805 {
3806 t1 = fold_convert (fd->iter_type, fd->loop.n2);
3807 t0 = fold_convert (fd->iter_type, fd->loop.n1);
3808 }
3809 if (bias)
3810 {
3811 t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
3812 t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
3813 }
3814 if (fd->iter_type == long_integer_type_node)
3815 {
3816 if (fd->chunk_size)
3817 {
3818 t = fold_convert (fd->iter_type, fd->chunk_size);
3819 t = build_call_expr (built_in_decls[start_fn], 6,
3820 t0, t1, t2, t, t3, t4);
3821 }
3822 else
3823 t = build_call_expr (built_in_decls[start_fn], 5,
3824 t0, t1, t2, t3, t4);
3825 }
3826 else
3827 {
3828 tree t5;
3829 tree c_bool_type;
3830
3831 /* The GOMP_loop_ull_*start functions have additional boolean
3832 argument, true for < loops and false for > loops.
3833 In Fortran, the C bool type can be different from
3834 boolean_type_node. */
3835 c_bool_type = TREE_TYPE (TREE_TYPE (built_in_decls[start_fn]));
3836 t5 = build_int_cst (c_bool_type,
3837 fd->loop.cond_code == LT_EXPR ? 1 : 0);
3838 if (fd->chunk_size)
3839 {
3840 t = fold_convert (fd->iter_type, fd->chunk_size);
3841 t = build_call_expr (built_in_decls[start_fn], 7,
3842 t5, t0, t1, t2, t, t3, t4);
3843 }
3844 else
3845 t = build_call_expr (built_in_decls[start_fn], 6,
3846 t5, t0, t1, t2, t3, t4);
3847 }
3848 }
3849 if (TREE_TYPE (t) != boolean_type_node)
3850 t = fold_build2 (NE_EXPR, boolean_type_node,
3851 t, build_int_cst (TREE_TYPE (t), 0));
3852 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3853 true, GSI_SAME_STMT);
3854 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
3855
3856 /* Remove the GIMPLE_OMP_FOR statement. */
3857 gsi_remove (&gsi, true);
3858
3859 /* Iteration setup for sequential loop goes in L0_BB. */
3860 gsi = gsi_start_bb (l0_bb);
3861 t = istart0;
3862 if (bias)
3863 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
3864 if (POINTER_TYPE_P (type))
3865 t = fold_convert (lang_hooks.types.type_for_size (TYPE_PRECISION (type),
3866 0), t);
3867 t = fold_convert (type, t);
3868 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3869 false, GSI_CONTINUE_LINKING);
3870 stmt = gimple_build_assign (fd->loop.v, t);
3871 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3872
3873 t = iend0;
3874 if (bias)
3875 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
3876 if (POINTER_TYPE_P (type))
3877 t = fold_convert (lang_hooks.types.type_for_size (TYPE_PRECISION (type),
3878 0), t);
3879 t = fold_convert (type, t);
3880 iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3881 false, GSI_CONTINUE_LINKING);
3882 if (fd->collapse > 1)
3883 {
3884 tree tem = create_tmp_var (type, ".tem");
3885
3886 stmt = gimple_build_assign (tem, fd->loop.v);
3887 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3888 for (i = fd->collapse - 1; i >= 0; i--)
3889 {
3890 tree vtype = TREE_TYPE (fd->loops[i].v), itype;
3891 itype = vtype;
3892 if (POINTER_TYPE_P (vtype))
3893 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (vtype), 0);
3894 t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]);
3895 t = fold_convert (itype, t);
3896 t = fold_build2 (MULT_EXPR, itype, t,
3897 fold_convert (itype, fd->loops[i].step));
3898 if (POINTER_TYPE_P (vtype))
3899 t = fold_build2 (POINTER_PLUS_EXPR, vtype,
3900 fd->loops[i].n1, fold_convert (sizetype, t));
3901 else
3902 t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t);
3903 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3904 false, GSI_CONTINUE_LINKING);
3905 stmt = gimple_build_assign (fd->loops[i].v, t);
3906 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3907 if (i != 0)
3908 {
3909 t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]);
3910 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3911 false, GSI_CONTINUE_LINKING);
3912 stmt = gimple_build_assign (tem, t);
3913 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3914 }
3915 }
3916 }
3917
3918 if (!broken_loop)
3919 {
3920 /* Code to control the increment and predicate for the sequential
3921 loop goes in the CONT_BB. */
3922 gsi = gsi_last_bb (cont_bb);
3923 stmt = gsi_stmt (gsi);
3924 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
3925 vmain = gimple_omp_continue_control_use (stmt);
3926 vback = gimple_omp_continue_control_def (stmt);
3927
3928 if (POINTER_TYPE_P (type))
3929 t = fold_build2 (POINTER_PLUS_EXPR, type, vmain,
3930 fold_convert (sizetype, fd->loop.step));
3931 else
3932 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
3933 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3934 true, GSI_SAME_STMT);
3935 stmt = gimple_build_assign (vback, t);
3936 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3937
3938 t = build2 (fd->loop.cond_code, boolean_type_node, vback, iend);
3939 stmt = gimple_build_cond_empty (t);
3940 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3941
3942 /* Remove GIMPLE_OMP_CONTINUE. */
3943 gsi_remove (&gsi, true);
3944
3945 if (fd->collapse > 1)
3946 {
3947 basic_block last_bb, bb;
3948
3949 last_bb = cont_bb;
3950 for (i = fd->collapse - 1; i >= 0; i--)
3951 {
3952 tree vtype = TREE_TYPE (fd->loops[i].v);
3953
3954 bb = create_empty_bb (last_bb);
3955 gsi = gsi_start_bb (bb);
3956
3957 if (i < fd->collapse - 1)
3958 {
3959 e = make_edge (last_bb, bb, EDGE_FALSE_VALUE);
3960 e->probability = REG_BR_PROB_BASE / 8;
3961
3962 t = fd->loops[i + 1].n1;
3963 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3964 false, GSI_CONTINUE_LINKING);
3965 stmt = gimple_build_assign (fd->loops[i + 1].v, t);
3966 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3967 }
3968 else
3969 collapse_bb = bb;
3970
3971 set_immediate_dominator (CDI_DOMINATORS, bb, last_bb);
3972
3973 if (POINTER_TYPE_P (vtype))
3974 t = fold_build2 (POINTER_PLUS_EXPR, vtype,
3975 fd->loops[i].v,
3976 fold_convert (sizetype, fd->loops[i].step));
3977 else
3978 t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v,
3979 fd->loops[i].step);
3980 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3981 false, GSI_CONTINUE_LINKING);
3982 stmt = gimple_build_assign (fd->loops[i].v, t);
3983 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3984
3985 if (i > 0)
3986 {
3987 t = fd->loops[i].n2;
3988 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3989 false, GSI_CONTINUE_LINKING);
3990 t = fold_build2 (fd->loops[i].cond_code, boolean_type_node,
3991 fd->loops[i].v, t);
3992 stmt = gimple_build_cond_empty (t);
3993 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3994 e = make_edge (bb, l1_bb, EDGE_TRUE_VALUE);
3995 e->probability = REG_BR_PROB_BASE * 7 / 8;
3996 }
3997 else
3998 make_edge (bb, l1_bb, EDGE_FALLTHRU);
3999 last_bb = bb;
4000 }
4001 }
4002
4003 /* Emit code to get the next parallel iteration in L2_BB. */
4004 gsi = gsi_start_bb (l2_bb);
4005
4006 t = build_call_expr (built_in_decls[next_fn], 2,
4007 build_fold_addr_expr (istart0),
4008 build_fold_addr_expr (iend0));
4009 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4010 false, GSI_CONTINUE_LINKING);
4011 if (TREE_TYPE (t) != boolean_type_node)
4012 t = fold_build2 (NE_EXPR, boolean_type_node,
4013 t, build_int_cst (TREE_TYPE (t), 0));
4014 stmt = gimple_build_cond_empty (t);
4015 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4016 }
4017
4018 /* Add the loop cleanup function. */
4019 gsi = gsi_last_bb (exit_bb);
4020 if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4021 t = built_in_decls[BUILT_IN_GOMP_LOOP_END_NOWAIT];
4022 else
4023 t = built_in_decls[BUILT_IN_GOMP_LOOP_END];
4024 stmt = gimple_build_call (t, 0);
4025 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
4026 gsi_remove (&gsi, true);
4027
4028 /* Connect the new blocks. */
4029 find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
4030 find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
4031
4032 if (!broken_loop)
4033 {
4034 gimple_seq phis;
4035
4036 e = find_edge (cont_bb, l3_bb);
4037 ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
4038
4039 phis = phi_nodes (l3_bb);
4040 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
4041 {
4042 gimple phi = gsi_stmt (gsi);
4043 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
4044 PHI_ARG_DEF_FROM_EDGE (phi, e));
4045 }
4046 remove_edge (e);
4047
4048 make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
4049 if (fd->collapse > 1)
4050 {
4051 e = find_edge (cont_bb, l1_bb);
4052 remove_edge (e);
4053 e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
4054 }
4055 else
4056 {
4057 e = find_edge (cont_bb, l1_bb);
4058 e->flags = EDGE_TRUE_VALUE;
4059 }
4060 e->probability = REG_BR_PROB_BASE * 7 / 8;
4061 find_edge (cont_bb, l2_bb)->probability = REG_BR_PROB_BASE / 8;
4062 make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
4063
4064 set_immediate_dominator (CDI_DOMINATORS, l2_bb,
4065 recompute_dominator (CDI_DOMINATORS, l2_bb));
4066 set_immediate_dominator (CDI_DOMINATORS, l3_bb,
4067 recompute_dominator (CDI_DOMINATORS, l3_bb));
4068 set_immediate_dominator (CDI_DOMINATORS, l0_bb,
4069 recompute_dominator (CDI_DOMINATORS, l0_bb));
4070 set_immediate_dominator (CDI_DOMINATORS, l1_bb,
4071 recompute_dominator (CDI_DOMINATORS, l1_bb));
4072 }
4073 }
4074
4075
4076 /* A subroutine of expand_omp_for. Generate code for a parallel
4077 loop with static schedule and no specified chunk size. Given
4078 parameters:
4079
4080 for (V = N1; V cond N2; V += STEP) BODY;
4081
4082 where COND is "<" or ">", we generate pseudocode
4083
4084 if (cond is <)
4085 adj = STEP - 1;
4086 else
4087 adj = STEP + 1;
4088 if ((__typeof (V)) -1 > 0 && cond is >)
4089 n = -(adj + N2 - N1) / -STEP;
4090 else
4091 n = (adj + N2 - N1) / STEP;
4092 q = n / nthreads;
4093 q += (q * nthreads != n);
4094 s0 = q * threadid;
4095 e0 = min(s0 + q, n);
4096 V = s0 * STEP + N1;
4097 if (s0 >= e0) goto L2; else goto L0;
4098 L0:
4099 e = e0 * STEP + N1;
4100 L1:
4101 BODY;
4102 V += STEP;
4103 if (V cond e) goto L1;
4104 L2:
4105 */
4106
4107 static void
4108 expand_omp_for_static_nochunk (struct omp_region *region,
4109 struct omp_for_data *fd)
4110 {
4111 tree n, q, s0, e0, e, t, nthreads, threadid;
4112 tree type, itype, vmain, vback;
4113 basic_block entry_bb, exit_bb, seq_start_bb, body_bb, cont_bb;
4114 basic_block fin_bb;
4115 gimple_stmt_iterator gsi;
4116 gimple stmt;
4117
4118 itype = type = TREE_TYPE (fd->loop.v);
4119 if (POINTER_TYPE_P (type))
4120 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
4121
4122 entry_bb = region->entry;
4123 cont_bb = region->cont;
4124 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
4125 gcc_assert (BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
4126 seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
4127 body_bb = single_succ (seq_start_bb);
4128 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4129 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4130 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4131 exit_bb = region->exit;
4132
4133 /* Iteration space partitioning goes in ENTRY_BB. */
4134 gsi = gsi_last_bb (entry_bb);
4135 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4136
4137 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
4138 t = fold_convert (itype, t);
4139 nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4140 true, GSI_SAME_STMT);
4141
4142 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
4143 t = fold_convert (itype, t);
4144 threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4145 true, GSI_SAME_STMT);
4146
4147 fd->loop.n1
4148 = force_gimple_operand_gsi (&gsi, fold_convert (type, fd->loop.n1),
4149 true, NULL_TREE, true, GSI_SAME_STMT);
4150 fd->loop.n2
4151 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.n2),
4152 true, NULL_TREE, true, GSI_SAME_STMT);
4153 fd->loop.step
4154 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.step),
4155 true, NULL_TREE, true, GSI_SAME_STMT);
4156
4157 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4158 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4159 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4160 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4161 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4162 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4163 fold_build1 (NEGATE_EXPR, itype, t),
4164 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4165 else
4166 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4167 t = fold_convert (itype, t);
4168 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4169
4170 t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads);
4171 q = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4172
4173 t = fold_build2 (MULT_EXPR, itype, q, nthreads);
4174 t = fold_build2 (NE_EXPR, itype, t, n);
4175 t = fold_build2 (PLUS_EXPR, itype, q, t);
4176 q = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4177
4178 t = build2 (MULT_EXPR, itype, q, threadid);
4179 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4180
4181 t = fold_build2 (PLUS_EXPR, itype, s0, q);
4182 t = fold_build2 (MIN_EXPR, itype, t, n);
4183 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4184
4185 t = build2 (GE_EXPR, boolean_type_node, s0, e0);
4186 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4187
4188 /* Remove the GIMPLE_OMP_FOR statement. */
4189 gsi_remove (&gsi, true);
4190
4191 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4192 gsi = gsi_start_bb (seq_start_bb);
4193
4194 t = fold_convert (itype, s0);
4195 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4196 if (POINTER_TYPE_P (type))
4197 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4198 fold_convert (sizetype, t));
4199 else
4200 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4201 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4202 false, GSI_CONTINUE_LINKING);
4203 stmt = gimple_build_assign (fd->loop.v, t);
4204 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4205
4206 t = fold_convert (itype, e0);
4207 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4208 if (POINTER_TYPE_P (type))
4209 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4210 fold_convert (sizetype, t));
4211 else
4212 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4213 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4214 false, GSI_CONTINUE_LINKING);
4215
4216 /* The code controlling the sequential loop replaces the
4217 GIMPLE_OMP_CONTINUE. */
4218 gsi = gsi_last_bb (cont_bb);
4219 stmt = gsi_stmt (gsi);
4220 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4221 vmain = gimple_omp_continue_control_use (stmt);
4222 vback = gimple_omp_continue_control_def (stmt);
4223
4224 if (POINTER_TYPE_P (type))
4225 t = fold_build2 (POINTER_PLUS_EXPR, type, vmain,
4226 fold_convert (sizetype, fd->loop.step));
4227 else
4228 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
4229 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4230 true, GSI_SAME_STMT);
4231 stmt = gimple_build_assign (vback, t);
4232 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4233
4234 t = build2 (fd->loop.cond_code, boolean_type_node, vback, e);
4235 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4236
4237 /* Remove the GIMPLE_OMP_CONTINUE statement. */
4238 gsi_remove (&gsi, true);
4239
4240 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4241 gsi = gsi_last_bb (exit_bb);
4242 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4243 force_gimple_operand_gsi (&gsi, build_omp_barrier (), false, NULL_TREE,
4244 false, GSI_SAME_STMT);
4245 gsi_remove (&gsi, true);
4246
4247 /* Connect all the blocks. */
4248 find_edge (entry_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
4249 find_edge (entry_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
4250
4251 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4252 find_edge (cont_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4253
4254 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, entry_bb);
4255 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4256 recompute_dominator (CDI_DOMINATORS, body_bb));
4257 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4258 recompute_dominator (CDI_DOMINATORS, fin_bb));
4259 }
4260
4261
4262 /* A subroutine of expand_omp_for. Generate code for a parallel
4263 loop with static schedule and a specified chunk size. Given
4264 parameters:
4265
4266 for (V = N1; V cond N2; V += STEP) BODY;
4267
4268 where COND is "<" or ">", we generate pseudocode
4269
4270 if (cond is <)
4271 adj = STEP - 1;
4272 else
4273 adj = STEP + 1;
4274 if ((__typeof (V)) -1 > 0 && cond is >)
4275 n = -(adj + N2 - N1) / -STEP;
4276 else
4277 n = (adj + N2 - N1) / STEP;
4278 trip = 0;
4279 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
4280 here so that V is defined
4281 if the loop is not entered
4282 L0:
4283 s0 = (trip * nthreads + threadid) * CHUNK;
4284 e0 = min(s0 + CHUNK, n);
4285 if (s0 < n) goto L1; else goto L4;
4286 L1:
4287 V = s0 * STEP + N1;
4288 e = e0 * STEP + N1;
4289 L2:
4290 BODY;
4291 V += STEP;
4292 if (V cond e) goto L2; else goto L3;
4293 L3:
4294 trip += 1;
4295 goto L0;
4296 L4:
4297 */
4298
4299 static void
4300 expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
4301 {
4302 tree n, s0, e0, e, t;
4303 tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
4304 tree type, itype, v_main, v_back, v_extra;
4305 basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
4306 basic_block trip_update_bb, cont_bb, fin_bb;
4307 gimple_stmt_iterator si;
4308 gimple stmt;
4309 edge se;
4310
4311 itype = type = TREE_TYPE (fd->loop.v);
4312 if (POINTER_TYPE_P (type))
4313 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
4314
4315 entry_bb = region->entry;
4316 se = split_block (entry_bb, last_stmt (entry_bb));
4317 entry_bb = se->src;
4318 iter_part_bb = se->dest;
4319 cont_bb = region->cont;
4320 gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
4321 gcc_assert (BRANCH_EDGE (iter_part_bb)->dest
4322 == FALLTHRU_EDGE (cont_bb)->dest);
4323 seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
4324 body_bb = single_succ (seq_start_bb);
4325 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4326 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4327 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4328 trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
4329 exit_bb = region->exit;
4330
4331 /* Trip and adjustment setup goes in ENTRY_BB. */
4332 si = gsi_last_bb (entry_bb);
4333 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_FOR);
4334
4335 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
4336 t = fold_convert (itype, t);
4337 nthreads = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4338 true, GSI_SAME_STMT);
4339
4340 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
4341 t = fold_convert (itype, t);
4342 threadid = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4343 true, GSI_SAME_STMT);
4344
4345 fd->loop.n1
4346 = force_gimple_operand_gsi (&si, fold_convert (type, fd->loop.n1),
4347 true, NULL_TREE, true, GSI_SAME_STMT);
4348 fd->loop.n2
4349 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.n2),
4350 true, NULL_TREE, true, GSI_SAME_STMT);
4351 fd->loop.step
4352 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.step),
4353 true, NULL_TREE, true, GSI_SAME_STMT);
4354 fd->chunk_size
4355 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->chunk_size),
4356 true, NULL_TREE, true, GSI_SAME_STMT);
4357
4358 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4359 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4360 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4361 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4362 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4363 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4364 fold_build1 (NEGATE_EXPR, itype, t),
4365 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4366 else
4367 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4368 t = fold_convert (itype, t);
4369 n = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4370 true, GSI_SAME_STMT);
4371
4372 trip_var = create_tmp_var (itype, ".trip");
4373 if (gimple_in_ssa_p (cfun))
4374 {
4375 add_referenced_var (trip_var);
4376 trip_init = make_ssa_name (trip_var, NULL);
4377 trip_main = make_ssa_name (trip_var, NULL);
4378 trip_back = make_ssa_name (trip_var, NULL);
4379 }
4380 else
4381 {
4382 trip_init = trip_var;
4383 trip_main = trip_var;
4384 trip_back = trip_var;
4385 }
4386
4387 stmt = gimple_build_assign (trip_init, build_int_cst (itype, 0));
4388 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4389
4390 t = fold_build2 (MULT_EXPR, itype, threadid, fd->chunk_size);
4391 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4392 if (POINTER_TYPE_P (type))
4393 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4394 fold_convert (sizetype, t));
4395 else
4396 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4397 v_extra = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4398 true, GSI_SAME_STMT);
4399
4400 /* Remove the GIMPLE_OMP_FOR. */
4401 gsi_remove (&si, true);
4402
4403 /* Iteration space partitioning goes in ITER_PART_BB. */
4404 si = gsi_last_bb (iter_part_bb);
4405
4406 t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads);
4407 t = fold_build2 (PLUS_EXPR, itype, t, threadid);
4408 t = fold_build2 (MULT_EXPR, itype, t, fd->chunk_size);
4409 s0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4410 false, GSI_CONTINUE_LINKING);
4411
4412 t = fold_build2 (PLUS_EXPR, itype, s0, fd->chunk_size);
4413 t = fold_build2 (MIN_EXPR, itype, t, n);
4414 e0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4415 false, GSI_CONTINUE_LINKING);
4416
4417 t = build2 (LT_EXPR, boolean_type_node, s0, n);
4418 gsi_insert_after (&si, gimple_build_cond_empty (t), GSI_CONTINUE_LINKING);
4419
4420 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4421 si = gsi_start_bb (seq_start_bb);
4422
4423 t = fold_convert (itype, s0);
4424 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4425 if (POINTER_TYPE_P (type))
4426 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4427 fold_convert (sizetype, t));
4428 else
4429 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4430 t = force_gimple_operand_gsi (&si, t, false, NULL_TREE,
4431 false, GSI_CONTINUE_LINKING);
4432 stmt = gimple_build_assign (fd->loop.v, t);
4433 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4434
4435 t = fold_convert (itype, e0);
4436 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4437 if (POINTER_TYPE_P (type))
4438 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4439 fold_convert (sizetype, t));
4440 else
4441 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4442 e = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4443 false, GSI_CONTINUE_LINKING);
4444
4445 /* The code controlling the sequential loop goes in CONT_BB,
4446 replacing the GIMPLE_OMP_CONTINUE. */
4447 si = gsi_last_bb (cont_bb);
4448 stmt = gsi_stmt (si);
4449 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4450 v_main = gimple_omp_continue_control_use (stmt);
4451 v_back = gimple_omp_continue_control_def (stmt);
4452
4453 if (POINTER_TYPE_P (type))
4454 t = fold_build2 (POINTER_PLUS_EXPR, type, v_main,
4455 fold_convert (sizetype, fd->loop.step));
4456 else
4457 t = fold_build2 (PLUS_EXPR, type, v_main, fd->loop.step);
4458 stmt = gimple_build_assign (v_back, t);
4459 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4460
4461 t = build2 (fd->loop.cond_code, boolean_type_node, v_back, e);
4462 gsi_insert_before (&si, gimple_build_cond_empty (t), GSI_SAME_STMT);
4463
4464 /* Remove GIMPLE_OMP_CONTINUE. */
4465 gsi_remove (&si, true);
4466
4467 /* Trip update code goes into TRIP_UPDATE_BB. */
4468 si = gsi_start_bb (trip_update_bb);
4469
4470 t = build_int_cst (itype, 1);
4471 t = build2 (PLUS_EXPR, itype, trip_main, t);
4472 stmt = gimple_build_assign (trip_back, t);
4473 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4474
4475 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4476 si = gsi_last_bb (exit_bb);
4477 if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
4478 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4479 false, GSI_SAME_STMT);
4480 gsi_remove (&si, true);
4481
4482 /* Connect the new blocks. */
4483 find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
4484 find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4485
4486 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4487 find_edge (cont_bb, trip_update_bb)->flags = EDGE_FALSE_VALUE;
4488
4489 redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
4490
4491 if (gimple_in_ssa_p (cfun))
4492 {
4493 gimple_stmt_iterator psi;
4494 gimple phi;
4495 edge re, ene;
4496 edge_var_map_vector head;
4497 edge_var_map *vm;
4498 size_t i;
4499
4500 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
4501 remove arguments of the phi nodes in fin_bb. We need to create
4502 appropriate phi nodes in iter_part_bb instead. */
4503 se = single_pred_edge (fin_bb);
4504 re = single_succ_edge (trip_update_bb);
4505 head = redirect_edge_var_map_vector (re);
4506 ene = single_succ_edge (entry_bb);
4507
4508 psi = gsi_start_phis (fin_bb);
4509 for (i = 0; !gsi_end_p (psi) && VEC_iterate (edge_var_map, head, i, vm);
4510 gsi_next (&psi), ++i)
4511 {
4512 gimple nphi;
4513 source_location locus;
4514
4515 phi = gsi_stmt (psi);
4516 t = gimple_phi_result (phi);
4517 gcc_assert (t == redirect_edge_var_map_result (vm));
4518 nphi = create_phi_node (t, iter_part_bb);
4519 SSA_NAME_DEF_STMT (t) = nphi;
4520
4521 t = PHI_ARG_DEF_FROM_EDGE (phi, se);
4522 locus = gimple_phi_arg_location_from_edge (phi, se);
4523
4524 /* A special case -- fd->loop.v is not yet computed in
4525 iter_part_bb, we need to use v_extra instead. */
4526 if (t == fd->loop.v)
4527 t = v_extra;
4528 add_phi_arg (nphi, t, ene, locus);
4529 locus = redirect_edge_var_map_location (vm);
4530 add_phi_arg (nphi, redirect_edge_var_map_def (vm), re, locus);
4531 }
4532 gcc_assert (!gsi_end_p (psi) && i == VEC_length (edge_var_map, head));
4533 redirect_edge_var_map_clear (re);
4534 while (1)
4535 {
4536 psi = gsi_start_phis (fin_bb);
4537 if (gsi_end_p (psi))
4538 break;
4539 remove_phi_node (&psi, false);
4540 }
4541
4542 /* Make phi node for trip. */
4543 phi = create_phi_node (trip_main, iter_part_bb);
4544 SSA_NAME_DEF_STMT (trip_main) = phi;
4545 add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb),
4546 UNKNOWN_LOCATION);
4547 add_phi_arg (phi, trip_init, single_succ_edge (entry_bb),
4548 UNKNOWN_LOCATION);
4549 }
4550
4551 set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
4552 set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
4553 recompute_dominator (CDI_DOMINATORS, iter_part_bb));
4554 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4555 recompute_dominator (CDI_DOMINATORS, fin_bb));
4556 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
4557 recompute_dominator (CDI_DOMINATORS, seq_start_bb));
4558 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4559 recompute_dominator (CDI_DOMINATORS, body_bb));
4560 }
4561
4562
4563 /* Expand the OpenMP loop defined by REGION. */
4564
4565 static void
4566 expand_omp_for (struct omp_region *region)
4567 {
4568 struct omp_for_data fd;
4569 struct omp_for_data_loop *loops;
4570
4571 loops
4572 = (struct omp_for_data_loop *)
4573 alloca (gimple_omp_for_collapse (last_stmt (region->entry))
4574 * sizeof (struct omp_for_data_loop));
4575 extract_omp_for_data (last_stmt (region->entry), &fd, loops);
4576 region->sched_kind = fd.sched_kind;
4577
4578 gcc_assert (EDGE_COUNT (region->entry->succs) == 2);
4579 BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4580 FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4581 if (region->cont)
4582 {
4583 gcc_assert (EDGE_COUNT (region->cont->succs) == 2);
4584 BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4585 FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4586 }
4587
4588 if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
4589 && !fd.have_ordered
4590 && fd.collapse == 1
4591 && region->cont != NULL)
4592 {
4593 if (fd.chunk_size == NULL)
4594 expand_omp_for_static_nochunk (region, &fd);
4595 else
4596 expand_omp_for_static_chunk (region, &fd);
4597 }
4598 else
4599 {
4600 int fn_index, start_ix, next_ix;
4601
4602 gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
4603 fn_index = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
4604 ? 3 : fd.sched_kind;
4605 fn_index += fd.have_ordered * 4;
4606 start_ix = BUILT_IN_GOMP_LOOP_STATIC_START + fn_index;
4607 next_ix = BUILT_IN_GOMP_LOOP_STATIC_NEXT + fn_index;
4608 if (fd.iter_type == long_long_unsigned_type_node)
4609 {
4610 start_ix += BUILT_IN_GOMP_LOOP_ULL_STATIC_START
4611 - BUILT_IN_GOMP_LOOP_STATIC_START;
4612 next_ix += BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
4613 - BUILT_IN_GOMP_LOOP_STATIC_NEXT;
4614 }
4615 expand_omp_for_generic (region, &fd, (enum built_in_function) start_ix,
4616 (enum built_in_function) next_ix);
4617 }
4618
4619 update_ssa (TODO_update_ssa_only_virtuals);
4620 }
4621
4622
4623 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
4624
4625 v = GOMP_sections_start (n);
4626 L0:
4627 switch (v)
4628 {
4629 case 0:
4630 goto L2;
4631 case 1:
4632 section 1;
4633 goto L1;
4634 case 2:
4635 ...
4636 case n:
4637 ...
4638 default:
4639 abort ();
4640 }
4641 L1:
4642 v = GOMP_sections_next ();
4643 goto L0;
4644 L2:
4645 reduction;
4646
4647 If this is a combined parallel sections, replace the call to
4648 GOMP_sections_start with call to GOMP_sections_next. */
4649
4650 static void
4651 expand_omp_sections (struct omp_region *region)
4652 {
4653 tree t, u, vin = NULL, vmain, vnext, l2;
4654 VEC (tree,heap) *label_vec;
4655 unsigned len;
4656 basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
4657 gimple_stmt_iterator si, switch_si;
4658 gimple sections_stmt, stmt, cont;
4659 edge_iterator ei;
4660 edge e;
4661 struct omp_region *inner;
4662 unsigned i, casei;
4663 bool exit_reachable = region->cont != NULL;
4664
4665 gcc_assert (exit_reachable == (region->exit != NULL));
4666 entry_bb = region->entry;
4667 l0_bb = single_succ (entry_bb);
4668 l1_bb = region->cont;
4669 l2_bb = region->exit;
4670 if (exit_reachable)
4671 {
4672 if (single_pred_p (l2_bb) && single_pred (l2_bb) == l0_bb)
4673 l2 = gimple_block_label (l2_bb);
4674 else
4675 {
4676 /* This can happen if there are reductions. */
4677 len = EDGE_COUNT (l0_bb->succs);
4678 gcc_assert (len > 0);
4679 e = EDGE_SUCC (l0_bb, len - 1);
4680 si = gsi_last_bb (e->dest);
4681 l2 = NULL_TREE;
4682 if (gsi_end_p (si)
4683 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4684 l2 = gimple_block_label (e->dest);
4685 else
4686 FOR_EACH_EDGE (e, ei, l0_bb->succs)
4687 {
4688 si = gsi_last_bb (e->dest);
4689 if (gsi_end_p (si)
4690 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4691 {
4692 l2 = gimple_block_label (e->dest);
4693 break;
4694 }
4695 }
4696 }
4697 default_bb = create_empty_bb (l1_bb->prev_bb);
4698 }
4699 else
4700 {
4701 default_bb = create_empty_bb (l0_bb);
4702 l2 = gimple_block_label (default_bb);
4703 }
4704
4705 /* We will build a switch() with enough cases for all the
4706 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
4707 and a default case to abort if something goes wrong. */
4708 len = EDGE_COUNT (l0_bb->succs);
4709
4710 /* Use VEC_quick_push on label_vec throughout, since we know the size
4711 in advance. */
4712 label_vec = VEC_alloc (tree, heap, len);
4713
4714 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
4715 GIMPLE_OMP_SECTIONS statement. */
4716 si = gsi_last_bb (entry_bb);
4717 sections_stmt = gsi_stmt (si);
4718 gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS);
4719 vin = gimple_omp_sections_control (sections_stmt);
4720 if (!is_combined_parallel (region))
4721 {
4722 /* If we are not inside a combined parallel+sections region,
4723 call GOMP_sections_start. */
4724 t = build_int_cst (unsigned_type_node,
4725 exit_reachable ? len - 1 : len);
4726 u = built_in_decls[BUILT_IN_GOMP_SECTIONS_START];
4727 stmt = gimple_build_call (u, 1, t);
4728 }
4729 else
4730 {
4731 /* Otherwise, call GOMP_sections_next. */
4732 u = built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT];
4733 stmt = gimple_build_call (u, 0);
4734 }
4735 gimple_call_set_lhs (stmt, vin);
4736 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4737 gsi_remove (&si, true);
4738
4739 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
4740 L0_BB. */
4741 switch_si = gsi_last_bb (l0_bb);
4742 gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH);
4743 if (exit_reachable)
4744 {
4745 cont = last_stmt (l1_bb);
4746 gcc_assert (gimple_code (cont) == GIMPLE_OMP_CONTINUE);
4747 vmain = gimple_omp_continue_control_use (cont);
4748 vnext = gimple_omp_continue_control_def (cont);
4749 }
4750 else
4751 {
4752 vmain = vin;
4753 vnext = NULL_TREE;
4754 }
4755
4756 i = 0;
4757 if (exit_reachable)
4758 {
4759 t = build3 (CASE_LABEL_EXPR, void_type_node,
4760 build_int_cst (unsigned_type_node, 0), NULL, l2);
4761 VEC_quick_push (tree, label_vec, t);
4762 i++;
4763 }
4764
4765 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
4766 for (inner = region->inner, casei = 1;
4767 inner;
4768 inner = inner->next, i++, casei++)
4769 {
4770 basic_block s_entry_bb, s_exit_bb;
4771
4772 /* Skip optional reduction region. */
4773 if (inner->type == GIMPLE_OMP_ATOMIC_LOAD)
4774 {
4775 --i;
4776 --casei;
4777 continue;
4778 }
4779
4780 s_entry_bb = inner->entry;
4781 s_exit_bb = inner->exit;
4782
4783 t = gimple_block_label (s_entry_bb);
4784 u = build_int_cst (unsigned_type_node, casei);
4785 u = build3 (CASE_LABEL_EXPR, void_type_node, u, NULL, t);
4786 VEC_quick_push (tree, label_vec, u);
4787
4788 si = gsi_last_bb (s_entry_bb);
4789 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION);
4790 gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si)));
4791 gsi_remove (&si, true);
4792 single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
4793
4794 if (s_exit_bb == NULL)
4795 continue;
4796
4797 si = gsi_last_bb (s_exit_bb);
4798 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4799 gsi_remove (&si, true);
4800
4801 single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
4802 }
4803
4804 /* Error handling code goes in DEFAULT_BB. */
4805 t = gimple_block_label (default_bb);
4806 u = build3 (CASE_LABEL_EXPR, void_type_node, NULL, NULL, t);
4807 make_edge (l0_bb, default_bb, 0);
4808
4809 stmt = gimple_build_switch_vec (vmain, u, label_vec);
4810 gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT);
4811 gsi_remove (&switch_si, true);
4812 VEC_free (tree, heap, label_vec);
4813
4814 si = gsi_start_bb (default_bb);
4815 stmt = gimple_build_call (built_in_decls[BUILT_IN_TRAP], 0);
4816 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4817
4818 if (exit_reachable)
4819 {
4820 /* Code to get the next section goes in L1_BB. */
4821 si = gsi_last_bb (l1_bb);
4822 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE);
4823
4824 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT], 0);
4825 gimple_call_set_lhs (stmt, vnext);
4826 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4827 gsi_remove (&si, true);
4828
4829 single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
4830
4831 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
4832 si = gsi_last_bb (l2_bb);
4833 if (gimple_omp_return_nowait_p (gsi_stmt (si)))
4834 t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END_NOWAIT];
4835 else
4836 t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END];
4837 stmt = gimple_build_call (t, 0);
4838 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4839 gsi_remove (&si, true);
4840 }
4841
4842 set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
4843 }
4844
4845
4846 /* Expand code for an OpenMP single directive. We've already expanded
4847 much of the code, here we simply place the GOMP_barrier call. */
4848
4849 static void
4850 expand_omp_single (struct omp_region *region)
4851 {
4852 basic_block entry_bb, exit_bb;
4853 gimple_stmt_iterator si;
4854 bool need_barrier = false;
4855
4856 entry_bb = region->entry;
4857 exit_bb = region->exit;
4858
4859 si = gsi_last_bb (entry_bb);
4860 /* The terminal barrier at the end of a GOMP_single_copy sequence cannot
4861 be removed. We need to ensure that the thread that entered the single
4862 does not exit before the data is copied out by the other threads. */
4863 if (find_omp_clause (gimple_omp_single_clauses (gsi_stmt (si)),
4864 OMP_CLAUSE_COPYPRIVATE))
4865 need_barrier = true;
4866 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE);
4867 gsi_remove (&si, true);
4868 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4869
4870 si = gsi_last_bb (exit_bb);
4871 if (!gimple_omp_return_nowait_p (gsi_stmt (si)) || need_barrier)
4872 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4873 false, GSI_SAME_STMT);
4874 gsi_remove (&si, true);
4875 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4876 }
4877
4878
4879 /* Generic expansion for OpenMP synchronization directives: master,
4880 ordered and critical. All we need to do here is remove the entry
4881 and exit markers for REGION. */
4882
4883 static void
4884 expand_omp_synch (struct omp_region *region)
4885 {
4886 basic_block entry_bb, exit_bb;
4887 gimple_stmt_iterator si;
4888
4889 entry_bb = region->entry;
4890 exit_bb = region->exit;
4891
4892 si = gsi_last_bb (entry_bb);
4893 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE
4894 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER
4895 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED
4896 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL);
4897 gsi_remove (&si, true);
4898 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4899
4900 if (exit_bb)
4901 {
4902 si = gsi_last_bb (exit_bb);
4903 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4904 gsi_remove (&si, true);
4905 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4906 }
4907 }
4908
4909 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
4910 operation as a __sync_fetch_and_op builtin. INDEX is log2 of the
4911 size of the data type, and thus usable to find the index of the builtin
4912 decl. Returns false if the expression is not of the proper form. */
4913
4914 static bool
4915 expand_omp_atomic_fetch_op (basic_block load_bb,
4916 tree addr, tree loaded_val,
4917 tree stored_val, int index)
4918 {
4919 enum built_in_function base;
4920 tree decl, itype, call;
4921 enum insn_code *optab;
4922 tree rhs;
4923 basic_block store_bb = single_succ (load_bb);
4924 gimple_stmt_iterator gsi;
4925 gimple stmt;
4926 location_t loc;
4927
4928 /* We expect to find the following sequences:
4929
4930 load_bb:
4931 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
4932
4933 store_bb:
4934 val = tmp OP something; (or: something OP tmp)
4935 GIMPLE_OMP_STORE (val)
4936
4937 ???FIXME: Allow a more flexible sequence.
4938 Perhaps use data flow to pick the statements.
4939
4940 */
4941
4942 gsi = gsi_after_labels (store_bb);
4943 stmt = gsi_stmt (gsi);
4944 loc = gimple_location (stmt);
4945 if (!is_gimple_assign (stmt))
4946 return false;
4947 gsi_next (&gsi);
4948 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE)
4949 return false;
4950
4951 if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0))
4952 return false;
4953
4954 /* Check for one of the supported fetch-op operations. */
4955 switch (gimple_assign_rhs_code (stmt))
4956 {
4957 case PLUS_EXPR:
4958 case POINTER_PLUS_EXPR:
4959 base = BUILT_IN_FETCH_AND_ADD_N;
4960 optab = sync_add_optab;
4961 break;
4962 case MINUS_EXPR:
4963 base = BUILT_IN_FETCH_AND_SUB_N;
4964 optab = sync_add_optab;
4965 break;
4966 case BIT_AND_EXPR:
4967 base = BUILT_IN_FETCH_AND_AND_N;
4968 optab = sync_and_optab;
4969 break;
4970 case BIT_IOR_EXPR:
4971 base = BUILT_IN_FETCH_AND_OR_N;
4972 optab = sync_ior_optab;
4973 break;
4974 case BIT_XOR_EXPR:
4975 base = BUILT_IN_FETCH_AND_XOR_N;
4976 optab = sync_xor_optab;
4977 break;
4978 default:
4979 return false;
4980 }
4981 /* Make sure the expression is of the proper form. */
4982 if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0))
4983 rhs = gimple_assign_rhs2 (stmt);
4984 else if (commutative_tree_code (gimple_assign_rhs_code (stmt))
4985 && operand_equal_p (gimple_assign_rhs2 (stmt), loaded_val, 0))
4986 rhs = gimple_assign_rhs1 (stmt);
4987 else
4988 return false;
4989
4990 decl = built_in_decls[base + index + 1];
4991 itype = TREE_TYPE (TREE_TYPE (decl));
4992
4993 if (optab[TYPE_MODE (itype)] == CODE_FOR_nothing)
4994 return false;
4995
4996 gsi = gsi_last_bb (load_bb);
4997 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD);
4998 call = build_call_expr_loc (loc,
4999 decl, 2, addr,
5000 fold_convert_loc (loc, itype, rhs));
5001 call = fold_convert_loc (loc, void_type_node, call);
5002 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
5003 gsi_remove (&gsi, true);
5004
5005 gsi = gsi_last_bb (store_bb);
5006 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
5007 gsi_remove (&gsi, true);
5008 gsi = gsi_last_bb (store_bb);
5009 gsi_remove (&gsi, true);
5010
5011 if (gimple_in_ssa_p (cfun))
5012 update_ssa (TODO_update_ssa_no_phi);
5013
5014 return true;
5015 }
5016
5017 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5018
5019 oldval = *addr;
5020 repeat:
5021 newval = rhs; // with oldval replacing *addr in rhs
5022 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
5023 if (oldval != newval)
5024 goto repeat;
5025
5026 INDEX is log2 of the size of the data type, and thus usable to find the
5027 index of the builtin decl. */
5028
5029 static bool
5030 expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
5031 tree addr, tree loaded_val, tree stored_val,
5032 int index)
5033 {
5034 tree loadedi, storedi, initial, new_storedi, old_vali;
5035 tree type, itype, cmpxchg, iaddr;
5036 gimple_stmt_iterator si;
5037 basic_block loop_header = single_succ (load_bb);
5038 gimple phi, stmt;
5039 edge e;
5040
5041 cmpxchg = built_in_decls[BUILT_IN_VAL_COMPARE_AND_SWAP_N + index + 1];
5042 type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5043 itype = TREE_TYPE (TREE_TYPE (cmpxchg));
5044
5045 if (sync_compare_and_swap[TYPE_MODE (itype)] == CODE_FOR_nothing)
5046 return false;
5047
5048 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
5049 si = gsi_last_bb (load_bb);
5050 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
5051
5052 /* For floating-point values, we'll need to view-convert them to integers
5053 so that we can perform the atomic compare and swap. Simplify the
5054 following code by always setting up the "i"ntegral variables. */
5055 if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
5056 {
5057 tree iaddr_val;
5058
5059 iaddr = create_tmp_var (build_pointer_type_for_mode (itype, ptr_mode,
5060 true), NULL);
5061 iaddr_val
5062 = force_gimple_operand_gsi (&si,
5063 fold_convert (TREE_TYPE (iaddr), addr),
5064 false, NULL_TREE, true, GSI_SAME_STMT);
5065 stmt = gimple_build_assign (iaddr, iaddr_val);
5066 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5067 loadedi = create_tmp_var (itype, NULL);
5068 if (gimple_in_ssa_p (cfun))
5069 {
5070 add_referenced_var (iaddr);
5071 add_referenced_var (loadedi);
5072 loadedi = make_ssa_name (loadedi, NULL);
5073 }
5074 }
5075 else
5076 {
5077 iaddr = addr;
5078 loadedi = loaded_val;
5079 }
5080
5081 initial
5082 = force_gimple_operand_gsi (&si,
5083 build2 (MEM_REF, TREE_TYPE (TREE_TYPE (iaddr)),
5084 iaddr,
5085 build_int_cst (TREE_TYPE (iaddr), 0)),
5086 true, NULL_TREE, true, GSI_SAME_STMT);
5087
5088 /* Move the value to the LOADEDI temporary. */
5089 if (gimple_in_ssa_p (cfun))
5090 {
5091 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header)));
5092 phi = create_phi_node (loadedi, loop_header);
5093 SSA_NAME_DEF_STMT (loadedi) = phi;
5094 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
5095 initial);
5096 }
5097 else
5098 gsi_insert_before (&si,
5099 gimple_build_assign (loadedi, initial),
5100 GSI_SAME_STMT);
5101 if (loadedi != loaded_val)
5102 {
5103 gimple_stmt_iterator gsi2;
5104 tree x;
5105
5106 x = build1 (VIEW_CONVERT_EXPR, type, loadedi);
5107 gsi2 = gsi_start_bb (loop_header);
5108 if (gimple_in_ssa_p (cfun))
5109 {
5110 gimple stmt;
5111 x = force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5112 true, GSI_SAME_STMT);
5113 stmt = gimple_build_assign (loaded_val, x);
5114 gsi_insert_before (&gsi2, stmt, GSI_SAME_STMT);
5115 }
5116 else
5117 {
5118 x = build2 (MODIFY_EXPR, TREE_TYPE (loaded_val), loaded_val, x);
5119 force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5120 true, GSI_SAME_STMT);
5121 }
5122 }
5123 gsi_remove (&si, true);
5124
5125 si = gsi_last_bb (store_bb);
5126 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
5127
5128 if (iaddr == addr)
5129 storedi = stored_val;
5130 else
5131 storedi =
5132 force_gimple_operand_gsi (&si,
5133 build1 (VIEW_CONVERT_EXPR, itype,
5134 stored_val), true, NULL_TREE, true,
5135 GSI_SAME_STMT);
5136
5137 /* Build the compare&swap statement. */
5138 new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
5139 new_storedi = force_gimple_operand_gsi (&si,
5140 fold_convert (TREE_TYPE (loadedi),
5141 new_storedi),
5142 true, NULL_TREE,
5143 true, GSI_SAME_STMT);
5144
5145 if (gimple_in_ssa_p (cfun))
5146 old_vali = loadedi;
5147 else
5148 {
5149 old_vali = create_tmp_var (TREE_TYPE (loadedi), NULL);
5150 if (gimple_in_ssa_p (cfun))
5151 add_referenced_var (old_vali);
5152 stmt = gimple_build_assign (old_vali, loadedi);
5153 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5154
5155 stmt = gimple_build_assign (loadedi, new_storedi);
5156 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5157 }
5158
5159 /* Note that we always perform the comparison as an integer, even for
5160 floating point. This allows the atomic operation to properly
5161 succeed even with NaNs and -0.0. */
5162 stmt = gimple_build_cond_empty
5163 (build2 (NE_EXPR, boolean_type_node,
5164 new_storedi, old_vali));
5165 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5166
5167 /* Update cfg. */
5168 e = single_succ_edge (store_bb);
5169 e->flags &= ~EDGE_FALLTHRU;
5170 e->flags |= EDGE_FALSE_VALUE;
5171
5172 e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
5173
5174 /* Copy the new value to loadedi (we already did that before the condition
5175 if we are not in SSA). */
5176 if (gimple_in_ssa_p (cfun))
5177 {
5178 phi = gimple_seq_first_stmt (phi_nodes (loop_header));
5179 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi);
5180 }
5181
5182 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
5183 gsi_remove (&si, true);
5184
5185 if (gimple_in_ssa_p (cfun))
5186 update_ssa (TODO_update_ssa_no_phi);
5187
5188 return true;
5189 }
5190
5191 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5192
5193 GOMP_atomic_start ();
5194 *addr = rhs;
5195 GOMP_atomic_end ();
5196
5197 The result is not globally atomic, but works so long as all parallel
5198 references are within #pragma omp atomic directives. According to
5199 responses received from omp@openmp.org, appears to be within spec.
5200 Which makes sense, since that's how several other compilers handle
5201 this situation as well.
5202 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
5203 expanding. STORED_VAL is the operand of the matching
5204 GIMPLE_OMP_ATOMIC_STORE.
5205
5206 We replace
5207 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
5208 loaded_val = *addr;
5209
5210 and replace
5211 GIMPLE_OMP_ATOMIC_ATORE (stored_val) with
5212 *addr = stored_val;
5213 */
5214
5215 static bool
5216 expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
5217 tree addr, tree loaded_val, tree stored_val)
5218 {
5219 gimple_stmt_iterator si;
5220 gimple stmt;
5221 tree t;
5222
5223 si = gsi_last_bb (load_bb);
5224 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
5225
5226 t = built_in_decls[BUILT_IN_GOMP_ATOMIC_START];
5227 t = build_function_call_expr (UNKNOWN_LOCATION, t, 0);
5228 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5229
5230 stmt = gimple_build_assign (loaded_val, build_simple_mem_ref (addr));
5231 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5232 gsi_remove (&si, true);
5233
5234 si = gsi_last_bb (store_bb);
5235 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
5236
5237 stmt = gimple_build_assign (build_simple_mem_ref (unshare_expr (addr)),
5238 stored_val);
5239 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5240
5241 t = built_in_decls[BUILT_IN_GOMP_ATOMIC_END];
5242 t = build_function_call_expr (UNKNOWN_LOCATION, t, 0);
5243 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5244 gsi_remove (&si, true);
5245
5246 if (gimple_in_ssa_p (cfun))
5247 update_ssa (TODO_update_ssa_no_phi);
5248 return true;
5249 }
5250
5251 /* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
5252 using expand_omp_atomic_fetch_op. If it failed, we try to
5253 call expand_omp_atomic_pipeline, and if it fails too, the
5254 ultimate fallback is wrapping the operation in a mutex
5255 (expand_omp_atomic_mutex). REGION is the atomic region built
5256 by build_omp_regions_1(). */
5257
5258 static void
5259 expand_omp_atomic (struct omp_region *region)
5260 {
5261 basic_block load_bb = region->entry, store_bb = region->exit;
5262 gimple load = last_stmt (load_bb), store = last_stmt (store_bb);
5263 tree loaded_val = gimple_omp_atomic_load_lhs (load);
5264 tree addr = gimple_omp_atomic_load_rhs (load);
5265 tree stored_val = gimple_omp_atomic_store_val (store);
5266 tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5267 HOST_WIDE_INT index;
5268
5269 /* Make sure the type is one of the supported sizes. */
5270 index = tree_low_cst (TYPE_SIZE_UNIT (type), 1);
5271 index = exact_log2 (index);
5272 if (index >= 0 && index <= 4)
5273 {
5274 unsigned int align = TYPE_ALIGN_UNIT (type);
5275
5276 /* __sync builtins require strict data alignment. */
5277 if (exact_log2 (align) >= index)
5278 {
5279 /* When possible, use specialized atomic update functions. */
5280 if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
5281 && store_bb == single_succ (load_bb))
5282 {
5283 if (expand_omp_atomic_fetch_op (load_bb, addr,
5284 loaded_val, stored_val, index))
5285 return;
5286 }
5287
5288 /* If we don't have specialized __sync builtins, try and implement
5289 as a compare and swap loop. */
5290 if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
5291 loaded_val, stored_val, index))
5292 return;
5293 }
5294 }
5295
5296 /* The ultimate fallback is wrapping the operation in a mutex. */
5297 expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
5298 }
5299
5300
5301 /* Expand the parallel region tree rooted at REGION. Expansion
5302 proceeds in depth-first order. Innermost regions are expanded
5303 first. This way, parallel regions that require a new function to
5304 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
5305 internal dependencies in their body. */
5306
5307 static void
5308 expand_omp (struct omp_region *region)
5309 {
5310 while (region)
5311 {
5312 location_t saved_location;
5313
5314 /* First, determine whether this is a combined parallel+workshare
5315 region. */
5316 if (region->type == GIMPLE_OMP_PARALLEL)
5317 determine_parallel_type (region);
5318
5319 if (region->inner)
5320 expand_omp (region->inner);
5321
5322 saved_location = input_location;
5323 if (gimple_has_location (last_stmt (region->entry)))
5324 input_location = gimple_location (last_stmt (region->entry));
5325
5326 switch (region->type)
5327 {
5328 case GIMPLE_OMP_PARALLEL:
5329 case GIMPLE_OMP_TASK:
5330 expand_omp_taskreg (region);
5331 break;
5332
5333 case GIMPLE_OMP_FOR:
5334 expand_omp_for (region);
5335 break;
5336
5337 case GIMPLE_OMP_SECTIONS:
5338 expand_omp_sections (region);
5339 break;
5340
5341 case GIMPLE_OMP_SECTION:
5342 /* Individual omp sections are handled together with their
5343 parent GIMPLE_OMP_SECTIONS region. */
5344 break;
5345
5346 case GIMPLE_OMP_SINGLE:
5347 expand_omp_single (region);
5348 break;
5349
5350 case GIMPLE_OMP_MASTER:
5351 case GIMPLE_OMP_ORDERED:
5352 case GIMPLE_OMP_CRITICAL:
5353 expand_omp_synch (region);
5354 break;
5355
5356 case GIMPLE_OMP_ATOMIC_LOAD:
5357 expand_omp_atomic (region);
5358 break;
5359
5360 default:
5361 gcc_unreachable ();
5362 }
5363
5364 input_location = saved_location;
5365 region = region->next;
5366 }
5367 }
5368
5369
5370 /* Helper for build_omp_regions. Scan the dominator tree starting at
5371 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
5372 true, the function ends once a single tree is built (otherwise, whole
5373 forest of OMP constructs may be built). */
5374
5375 static void
5376 build_omp_regions_1 (basic_block bb, struct omp_region *parent,
5377 bool single_tree)
5378 {
5379 gimple_stmt_iterator gsi;
5380 gimple stmt;
5381 basic_block son;
5382
5383 gsi = gsi_last_bb (bb);
5384 if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi)))
5385 {
5386 struct omp_region *region;
5387 enum gimple_code code;
5388
5389 stmt = gsi_stmt (gsi);
5390 code = gimple_code (stmt);
5391 if (code == GIMPLE_OMP_RETURN)
5392 {
5393 /* STMT is the return point out of region PARENT. Mark it
5394 as the exit point and make PARENT the immediately
5395 enclosing region. */
5396 gcc_assert (parent);
5397 region = parent;
5398 region->exit = bb;
5399 parent = parent->outer;
5400 }
5401 else if (code == GIMPLE_OMP_ATOMIC_STORE)
5402 {
5403 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
5404 GIMPLE_OMP_RETURN, but matches with
5405 GIMPLE_OMP_ATOMIC_LOAD. */
5406 gcc_assert (parent);
5407 gcc_assert (parent->type == GIMPLE_OMP_ATOMIC_LOAD);
5408 region = parent;
5409 region->exit = bb;
5410 parent = parent->outer;
5411 }
5412
5413 else if (code == GIMPLE_OMP_CONTINUE)
5414 {
5415 gcc_assert (parent);
5416 parent->cont = bb;
5417 }
5418 else if (code == GIMPLE_OMP_SECTIONS_SWITCH)
5419 {
5420 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
5421 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
5422 ;
5423 }
5424 else
5425 {
5426 /* Otherwise, this directive becomes the parent for a new
5427 region. */
5428 region = new_omp_region (bb, code, parent);
5429 parent = region;
5430 }
5431 }
5432
5433 if (single_tree && !parent)
5434 return;
5435
5436 for (son = first_dom_son (CDI_DOMINATORS, bb);
5437 son;
5438 son = next_dom_son (CDI_DOMINATORS, son))
5439 build_omp_regions_1 (son, parent, single_tree);
5440 }
5441
5442 /* Builds the tree of OMP regions rooted at ROOT, storing it to
5443 root_omp_region. */
5444
5445 static void
5446 build_omp_regions_root (basic_block root)
5447 {
5448 gcc_assert (root_omp_region == NULL);
5449 build_omp_regions_1 (root, NULL, true);
5450 gcc_assert (root_omp_region != NULL);
5451 }
5452
5453 /* Expands omp construct (and its subconstructs) starting in HEAD. */
5454
5455 void
5456 omp_expand_local (basic_block head)
5457 {
5458 build_omp_regions_root (head);
5459 if (dump_file && (dump_flags & TDF_DETAILS))
5460 {
5461 fprintf (dump_file, "\nOMP region tree\n\n");
5462 dump_omp_region (dump_file, root_omp_region, 0);
5463 fprintf (dump_file, "\n");
5464 }
5465
5466 remove_exit_barriers (root_omp_region);
5467 expand_omp (root_omp_region);
5468
5469 free_omp_regions ();
5470 }
5471
5472 /* Scan the CFG and build a tree of OMP regions. Return the root of
5473 the OMP region tree. */
5474
5475 static void
5476 build_omp_regions (void)
5477 {
5478 gcc_assert (root_omp_region == NULL);
5479 calculate_dominance_info (CDI_DOMINATORS);
5480 build_omp_regions_1 (ENTRY_BLOCK_PTR, NULL, false);
5481 }
5482
5483 /* Main entry point for expanding OMP-GIMPLE into runtime calls. */
5484
5485 static unsigned int
5486 execute_expand_omp (void)
5487 {
5488 build_omp_regions ();
5489
5490 if (!root_omp_region)
5491 return 0;
5492
5493 if (dump_file)
5494 {
5495 fprintf (dump_file, "\nOMP region tree\n\n");
5496 dump_omp_region (dump_file, root_omp_region, 0);
5497 fprintf (dump_file, "\n");
5498 }
5499
5500 remove_exit_barriers (root_omp_region);
5501
5502 expand_omp (root_omp_region);
5503
5504 cleanup_tree_cfg ();
5505
5506 free_omp_regions ();
5507
5508 return 0;
5509 }
5510
5511 /* OMP expansion -- the default pass, run before creation of SSA form. */
5512
5513 static bool
5514 gate_expand_omp (void)
5515 {
5516 return (flag_openmp != 0 && !seen_error ());
5517 }
5518
5519 struct gimple_opt_pass pass_expand_omp =
5520 {
5521 {
5522 GIMPLE_PASS,
5523 "ompexp", /* name */
5524 gate_expand_omp, /* gate */
5525 execute_expand_omp, /* execute */
5526 NULL, /* sub */
5527 NULL, /* next */
5528 0, /* static_pass_number */
5529 TV_NONE, /* tv_id */
5530 PROP_gimple_any, /* properties_required */
5531 0, /* properties_provided */
5532 0, /* properties_destroyed */
5533 0, /* todo_flags_start */
5534 TODO_dump_func /* todo_flags_finish */
5535 }
5536 };
5537 \f
5538 /* Routines to lower OpenMP directives into OMP-GIMPLE. */
5539
5540 /* Lower the OpenMP sections directive in the current statement in GSI_P.
5541 CTX is the enclosing OMP context for the current statement. */
5542
5543 static void
5544 lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5545 {
5546 tree block, control;
5547 gimple_stmt_iterator tgsi;
5548 unsigned i, len;
5549 gimple stmt, new_stmt, bind, t;
5550 gimple_seq ilist, dlist, olist, new_body, body;
5551 struct gimplify_ctx gctx;
5552
5553 stmt = gsi_stmt (*gsi_p);
5554
5555 push_gimplify_context (&gctx);
5556
5557 dlist = NULL;
5558 ilist = NULL;
5559 lower_rec_input_clauses (gimple_omp_sections_clauses (stmt),
5560 &ilist, &dlist, ctx);
5561
5562 tgsi = gsi_start (gimple_omp_body (stmt));
5563 for (len = 0; !gsi_end_p (tgsi); len++, gsi_next (&tgsi))
5564 continue;
5565
5566 tgsi = gsi_start (gimple_omp_body (stmt));
5567 body = NULL;
5568 for (i = 0; i < len; i++, gsi_next (&tgsi))
5569 {
5570 omp_context *sctx;
5571 gimple sec_start;
5572
5573 sec_start = gsi_stmt (tgsi);
5574 sctx = maybe_lookup_ctx (sec_start);
5575 gcc_assert (sctx);
5576
5577 gimple_seq_add_stmt (&body, sec_start);
5578
5579 lower_omp (gimple_omp_body (sec_start), sctx);
5580 gimple_seq_add_seq (&body, gimple_omp_body (sec_start));
5581 gimple_omp_set_body (sec_start, NULL);
5582
5583 if (i == len - 1)
5584 {
5585 gimple_seq l = NULL;
5586 lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt), NULL,
5587 &l, ctx);
5588 gimple_seq_add_seq (&body, l);
5589 gimple_omp_section_set_last (sec_start);
5590 }
5591
5592 gimple_seq_add_stmt (&body, gimple_build_omp_return (false));
5593 }
5594
5595 block = make_node (BLOCK);
5596 bind = gimple_build_bind (NULL, body, block);
5597
5598 olist = NULL;
5599 lower_reduction_clauses (gimple_omp_sections_clauses (stmt), &olist, ctx);
5600
5601 block = make_node (BLOCK);
5602 new_stmt = gimple_build_bind (NULL, NULL, block);
5603
5604 pop_gimplify_context (new_stmt);
5605 gimple_bind_append_vars (new_stmt, ctx->block_vars);
5606 BLOCK_VARS (block) = gimple_bind_vars (bind);
5607 if (BLOCK_VARS (block))
5608 TREE_USED (block) = 1;
5609
5610 new_body = NULL;
5611 gimple_seq_add_seq (&new_body, ilist);
5612 gimple_seq_add_stmt (&new_body, stmt);
5613 gimple_seq_add_stmt (&new_body, gimple_build_omp_sections_switch ());
5614 gimple_seq_add_stmt (&new_body, bind);
5615
5616 control = create_tmp_var (unsigned_type_node, ".section");
5617 t = gimple_build_omp_continue (control, control);
5618 gimple_omp_sections_set_control (stmt, control);
5619 gimple_seq_add_stmt (&new_body, t);
5620
5621 gimple_seq_add_seq (&new_body, olist);
5622 gimple_seq_add_seq (&new_body, dlist);
5623
5624 new_body = maybe_catch_exception (new_body);
5625
5626 t = gimple_build_omp_return
5627 (!!find_omp_clause (gimple_omp_sections_clauses (stmt),
5628 OMP_CLAUSE_NOWAIT));
5629 gimple_seq_add_stmt (&new_body, t);
5630
5631 gimple_bind_set_body (new_stmt, new_body);
5632 gimple_omp_set_body (stmt, NULL);
5633
5634 gsi_replace (gsi_p, new_stmt, true);
5635 }
5636
5637
5638 /* A subroutine of lower_omp_single. Expand the simple form of
5639 a GIMPLE_OMP_SINGLE, without a copyprivate clause:
5640
5641 if (GOMP_single_start ())
5642 BODY;
5643 [ GOMP_barrier (); ] -> unless 'nowait' is present.
5644
5645 FIXME. It may be better to delay expanding the logic of this until
5646 pass_expand_omp. The expanded logic may make the job more difficult
5647 to a synchronization analysis pass. */
5648
5649 static void
5650 lower_omp_single_simple (gimple single_stmt, gimple_seq *pre_p)
5651 {
5652 location_t loc = gimple_location (single_stmt);
5653 tree tlabel = create_artificial_label (loc);
5654 tree flabel = create_artificial_label (loc);
5655 gimple call, cond;
5656 tree lhs, decl;
5657
5658 decl = built_in_decls[BUILT_IN_GOMP_SINGLE_START];
5659 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (decl)), NULL);
5660 call = gimple_build_call (decl, 0);
5661 gimple_call_set_lhs (call, lhs);
5662 gimple_seq_add_stmt (pre_p, call);
5663
5664 cond = gimple_build_cond (EQ_EXPR, lhs,
5665 fold_convert_loc (loc, TREE_TYPE (lhs),
5666 boolean_true_node),
5667 tlabel, flabel);
5668 gimple_seq_add_stmt (pre_p, cond);
5669 gimple_seq_add_stmt (pre_p, gimple_build_label (tlabel));
5670 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5671 gimple_seq_add_stmt (pre_p, gimple_build_label (flabel));
5672 }
5673
5674
5675 /* A subroutine of lower_omp_single. Expand the simple form of
5676 a GIMPLE_OMP_SINGLE, with a copyprivate clause:
5677
5678 #pragma omp single copyprivate (a, b, c)
5679
5680 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
5681
5682 {
5683 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
5684 {
5685 BODY;
5686 copyout.a = a;
5687 copyout.b = b;
5688 copyout.c = c;
5689 GOMP_single_copy_end (&copyout);
5690 }
5691 else
5692 {
5693 a = copyout_p->a;
5694 b = copyout_p->b;
5695 c = copyout_p->c;
5696 }
5697 GOMP_barrier ();
5698 }
5699
5700 FIXME. It may be better to delay expanding the logic of this until
5701 pass_expand_omp. The expanded logic may make the job more difficult
5702 to a synchronization analysis pass. */
5703
5704 static void
5705 lower_omp_single_copy (gimple single_stmt, gimple_seq *pre_p, omp_context *ctx)
5706 {
5707 tree ptr_type, t, l0, l1, l2;
5708 gimple_seq copyin_seq;
5709 location_t loc = gimple_location (single_stmt);
5710
5711 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
5712
5713 ptr_type = build_pointer_type (ctx->record_type);
5714 ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
5715
5716 l0 = create_artificial_label (loc);
5717 l1 = create_artificial_label (loc);
5718 l2 = create_artificial_label (loc);
5719
5720 t = build_call_expr_loc (loc, built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_START], 0);
5721 t = fold_convert_loc (loc, ptr_type, t);
5722 gimplify_assign (ctx->receiver_decl, t, pre_p);
5723
5724 t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
5725 build_int_cst (ptr_type, 0));
5726 t = build3 (COND_EXPR, void_type_node, t,
5727 build_and_jump (&l0), build_and_jump (&l1));
5728 gimplify_and_add (t, pre_p);
5729
5730 gimple_seq_add_stmt (pre_p, gimple_build_label (l0));
5731
5732 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5733
5734 copyin_seq = NULL;
5735 lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt), pre_p,
5736 &copyin_seq, ctx);
5737
5738 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
5739 t = build_call_expr_loc (loc, built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_END],
5740 1, t);
5741 gimplify_and_add (t, pre_p);
5742
5743 t = build_and_jump (&l2);
5744 gimplify_and_add (t, pre_p);
5745
5746 gimple_seq_add_stmt (pre_p, gimple_build_label (l1));
5747
5748 gimple_seq_add_seq (pre_p, copyin_seq);
5749
5750 gimple_seq_add_stmt (pre_p, gimple_build_label (l2));
5751 }
5752
5753
5754 /* Expand code for an OpenMP single directive. */
5755
5756 static void
5757 lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5758 {
5759 tree block;
5760 gimple t, bind, single_stmt = gsi_stmt (*gsi_p);
5761 gimple_seq bind_body, dlist;
5762 struct gimplify_ctx gctx;
5763
5764 push_gimplify_context (&gctx);
5765
5766 bind_body = NULL;
5767 lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt),
5768 &bind_body, &dlist, ctx);
5769 lower_omp (gimple_omp_body (single_stmt), ctx);
5770
5771 gimple_seq_add_stmt (&bind_body, single_stmt);
5772
5773 if (ctx->record_type)
5774 lower_omp_single_copy (single_stmt, &bind_body, ctx);
5775 else
5776 lower_omp_single_simple (single_stmt, &bind_body);
5777
5778 gimple_omp_set_body (single_stmt, NULL);
5779
5780 gimple_seq_add_seq (&bind_body, dlist);
5781
5782 bind_body = maybe_catch_exception (bind_body);
5783
5784 t = gimple_build_omp_return
5785 (!!find_omp_clause (gimple_omp_single_clauses (single_stmt),
5786 OMP_CLAUSE_NOWAIT));
5787 gimple_seq_add_stmt (&bind_body, t);
5788
5789 block = make_node (BLOCK);
5790 bind = gimple_build_bind (NULL, bind_body, block);
5791
5792 pop_gimplify_context (bind);
5793
5794 gimple_bind_append_vars (bind, ctx->block_vars);
5795 BLOCK_VARS (block) = ctx->block_vars;
5796 gsi_replace (gsi_p, bind, true);
5797 if (BLOCK_VARS (block))
5798 TREE_USED (block) = 1;
5799 }
5800
5801
5802 /* Expand code for an OpenMP master directive. */
5803
5804 static void
5805 lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5806 {
5807 tree block, lab = NULL, x;
5808 gimple stmt = gsi_stmt (*gsi_p), bind;
5809 location_t loc = gimple_location (stmt);
5810 gimple_seq tseq;
5811 struct gimplify_ctx gctx;
5812
5813 push_gimplify_context (&gctx);
5814
5815 block = make_node (BLOCK);
5816 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
5817 block);
5818
5819 x = build_call_expr_loc (loc, built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
5820 x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
5821 x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
5822 tseq = NULL;
5823 gimplify_and_add (x, &tseq);
5824 gimple_bind_add_seq (bind, tseq);
5825
5826 lower_omp (gimple_omp_body (stmt), ctx);
5827 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5828 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5829 gimple_omp_set_body (stmt, NULL);
5830
5831 gimple_bind_add_stmt (bind, gimple_build_label (lab));
5832
5833 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
5834
5835 pop_gimplify_context (bind);
5836
5837 gimple_bind_append_vars (bind, ctx->block_vars);
5838 BLOCK_VARS (block) = ctx->block_vars;
5839 gsi_replace (gsi_p, bind, true);
5840 }
5841
5842
5843 /* Expand code for an OpenMP ordered directive. */
5844
5845 static void
5846 lower_omp_ordered (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5847 {
5848 tree block;
5849 gimple stmt = gsi_stmt (*gsi_p), bind, x;
5850 struct gimplify_ctx gctx;
5851
5852 push_gimplify_context (&gctx);
5853
5854 block = make_node (BLOCK);
5855 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
5856 block);
5857
5858 x = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ORDERED_START], 0);
5859 gimple_bind_add_stmt (bind, x);
5860
5861 lower_omp (gimple_omp_body (stmt), ctx);
5862 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5863 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5864 gimple_omp_set_body (stmt, NULL);
5865
5866 x = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ORDERED_END], 0);
5867 gimple_bind_add_stmt (bind, x);
5868
5869 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
5870
5871 pop_gimplify_context (bind);
5872
5873 gimple_bind_append_vars (bind, ctx->block_vars);
5874 BLOCK_VARS (block) = gimple_bind_vars (bind);
5875 gsi_replace (gsi_p, bind, true);
5876 }
5877
5878
5879 /* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
5880 substitution of a couple of function calls. But in the NAMED case,
5881 requires that languages coordinate a symbol name. It is therefore
5882 best put here in common code. */
5883
5884 static GTY((param1_is (tree), param2_is (tree)))
5885 splay_tree critical_name_mutexes;
5886
5887 static void
5888 lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5889 {
5890 tree block;
5891 tree name, lock, unlock;
5892 gimple stmt = gsi_stmt (*gsi_p), bind;
5893 location_t loc = gimple_location (stmt);
5894 gimple_seq tbody;
5895 struct gimplify_ctx gctx;
5896
5897 name = gimple_omp_critical_name (stmt);
5898 if (name)
5899 {
5900 tree decl;
5901 splay_tree_node n;
5902
5903 if (!critical_name_mutexes)
5904 critical_name_mutexes
5905 = splay_tree_new_ggc (splay_tree_compare_pointers,
5906 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_s,
5907 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_node_s);
5908
5909 n = splay_tree_lookup (critical_name_mutexes, (splay_tree_key) name);
5910 if (n == NULL)
5911 {
5912 char *new_str;
5913
5914 decl = create_tmp_var_raw (ptr_type_node, NULL);
5915
5916 new_str = ACONCAT ((".gomp_critical_user_",
5917 IDENTIFIER_POINTER (name), NULL));
5918 DECL_NAME (decl) = get_identifier (new_str);
5919 TREE_PUBLIC (decl) = 1;
5920 TREE_STATIC (decl) = 1;
5921 DECL_COMMON (decl) = 1;
5922 DECL_ARTIFICIAL (decl) = 1;
5923 DECL_IGNORED_P (decl) = 1;
5924 varpool_finalize_decl (decl);
5925
5926 splay_tree_insert (critical_name_mutexes, (splay_tree_key) name,
5927 (splay_tree_value) decl);
5928 }
5929 else
5930 decl = (tree) n->value;
5931
5932 lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_START];
5933 lock = build_call_expr_loc (loc, lock, 1, build_fold_addr_expr_loc (loc, decl));
5934
5935 unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_END];
5936 unlock = build_call_expr_loc (loc, unlock, 1,
5937 build_fold_addr_expr_loc (loc, decl));
5938 }
5939 else
5940 {
5941 lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_START];
5942 lock = build_call_expr_loc (loc, lock, 0);
5943
5944 unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_END];
5945 unlock = build_call_expr_loc (loc, unlock, 0);
5946 }
5947
5948 push_gimplify_context (&gctx);
5949
5950 block = make_node (BLOCK);
5951 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt), block);
5952
5953 tbody = gimple_bind_body (bind);
5954 gimplify_and_add (lock, &tbody);
5955 gimple_bind_set_body (bind, tbody);
5956
5957 lower_omp (gimple_omp_body (stmt), ctx);
5958 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5959 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5960 gimple_omp_set_body (stmt, NULL);
5961
5962 tbody = gimple_bind_body (bind);
5963 gimplify_and_add (unlock, &tbody);
5964 gimple_bind_set_body (bind, tbody);
5965
5966 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
5967
5968 pop_gimplify_context (bind);
5969 gimple_bind_append_vars (bind, ctx->block_vars);
5970 BLOCK_VARS (block) = gimple_bind_vars (bind);
5971 gsi_replace (gsi_p, bind, true);
5972 }
5973
5974
5975 /* A subroutine of lower_omp_for. Generate code to emit the predicate
5976 for a lastprivate clause. Given a loop control predicate of (V
5977 cond N2), we gate the clause on (!(V cond N2)). The lowered form
5978 is appended to *DLIST, iterator initialization is appended to
5979 *BODY_P. */
5980
5981 static void
5982 lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p,
5983 gimple_seq *dlist, struct omp_context *ctx)
5984 {
5985 tree clauses, cond, vinit;
5986 enum tree_code cond_code;
5987 gimple_seq stmts;
5988
5989 cond_code = fd->loop.cond_code;
5990 cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
5991
5992 /* When possible, use a strict equality expression. This can let VRP
5993 type optimizations deduce the value and remove a copy. */
5994 if (host_integerp (fd->loop.step, 0))
5995 {
5996 HOST_WIDE_INT step = TREE_INT_CST_LOW (fd->loop.step);
5997 if (step == 1 || step == -1)
5998 cond_code = EQ_EXPR;
5999 }
6000
6001 cond = build2 (cond_code, boolean_type_node, fd->loop.v, fd->loop.n2);
6002
6003 clauses = gimple_omp_for_clauses (fd->for_stmt);
6004 stmts = NULL;
6005 lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
6006 if (!gimple_seq_empty_p (stmts))
6007 {
6008 gimple_seq_add_seq (&stmts, *dlist);
6009 *dlist = stmts;
6010
6011 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
6012 vinit = fd->loop.n1;
6013 if (cond_code == EQ_EXPR
6014 && host_integerp (fd->loop.n2, 0)
6015 && ! integer_zerop (fd->loop.n2))
6016 vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0);
6017
6018 /* Initialize the iterator variable, so that threads that don't execute
6019 any iterations don't execute the lastprivate clauses by accident. */
6020 gimplify_assign (fd->loop.v, vinit, body_p);
6021 }
6022 }
6023
6024
6025 /* Lower code for an OpenMP loop directive. */
6026
6027 static void
6028 lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6029 {
6030 tree *rhs_p, block;
6031 struct omp_for_data fd;
6032 gimple stmt = gsi_stmt (*gsi_p), new_stmt;
6033 gimple_seq omp_for_body, body, dlist;
6034 size_t i;
6035 struct gimplify_ctx gctx;
6036
6037 push_gimplify_context (&gctx);
6038
6039 lower_omp (gimple_omp_for_pre_body (stmt), ctx);
6040 lower_omp (gimple_omp_body (stmt), ctx);
6041
6042 block = make_node (BLOCK);
6043 new_stmt = gimple_build_bind (NULL, NULL, block);
6044
6045 /* Move declaration of temporaries in the loop body before we make
6046 it go away. */
6047 omp_for_body = gimple_omp_body (stmt);
6048 if (!gimple_seq_empty_p (omp_for_body)
6049 && gimple_code (gimple_seq_first_stmt (omp_for_body)) == GIMPLE_BIND)
6050 {
6051 tree vars = gimple_bind_vars (gimple_seq_first_stmt (omp_for_body));
6052 gimple_bind_append_vars (new_stmt, vars);
6053 }
6054
6055 /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
6056 dlist = NULL;
6057 body = NULL;
6058 lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx);
6059 gimple_seq_add_seq (&body, gimple_omp_for_pre_body (stmt));
6060
6061 /* Lower the header expressions. At this point, we can assume that
6062 the header is of the form:
6063
6064 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
6065
6066 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
6067 using the .omp_data_s mapping, if needed. */
6068 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
6069 {
6070 rhs_p = gimple_omp_for_initial_ptr (stmt, i);
6071 if (!is_gimple_min_invariant (*rhs_p))
6072 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6073
6074 rhs_p = gimple_omp_for_final_ptr (stmt, i);
6075 if (!is_gimple_min_invariant (*rhs_p))
6076 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6077
6078 rhs_p = &TREE_OPERAND (gimple_omp_for_incr (stmt, i), 1);
6079 if (!is_gimple_min_invariant (*rhs_p))
6080 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6081 }
6082
6083 /* Once lowered, extract the bounds and clauses. */
6084 extract_omp_for_data (stmt, &fd, NULL);
6085
6086 lower_omp_for_lastprivate (&fd, &body, &dlist, ctx);
6087
6088 gimple_seq_add_stmt (&body, stmt);
6089 gimple_seq_add_seq (&body, gimple_omp_body (stmt));
6090
6091 gimple_seq_add_stmt (&body, gimple_build_omp_continue (fd.loop.v,
6092 fd.loop.v));
6093
6094 /* After the loop, add exit clauses. */
6095 lower_reduction_clauses (gimple_omp_for_clauses (stmt), &body, ctx);
6096 gimple_seq_add_seq (&body, dlist);
6097
6098 body = maybe_catch_exception (body);
6099
6100 /* Region exit marker goes at the end of the loop body. */
6101 gimple_seq_add_stmt (&body, gimple_build_omp_return (fd.have_nowait));
6102
6103 pop_gimplify_context (new_stmt);
6104
6105 gimple_bind_append_vars (new_stmt, ctx->block_vars);
6106 BLOCK_VARS (block) = gimple_bind_vars (new_stmt);
6107 if (BLOCK_VARS (block))
6108 TREE_USED (block) = 1;
6109
6110 gimple_bind_set_body (new_stmt, body);
6111 gimple_omp_set_body (stmt, NULL);
6112 gimple_omp_for_set_pre_body (stmt, NULL);
6113 gsi_replace (gsi_p, new_stmt, true);
6114 }
6115
6116 /* Callback for walk_stmts. Check if the current statement only contains
6117 GIMPLE_OMP_FOR or GIMPLE_OMP_PARALLEL. */
6118
6119 static tree
6120 check_combined_parallel (gimple_stmt_iterator *gsi_p,
6121 bool *handled_ops_p,
6122 struct walk_stmt_info *wi)
6123 {
6124 int *info = (int *) wi->info;
6125 gimple stmt = gsi_stmt (*gsi_p);
6126
6127 *handled_ops_p = true;
6128 switch (gimple_code (stmt))
6129 {
6130 WALK_SUBSTMTS;
6131
6132 case GIMPLE_OMP_FOR:
6133 case GIMPLE_OMP_SECTIONS:
6134 *info = *info == 0 ? 1 : -1;
6135 break;
6136 default:
6137 *info = -1;
6138 break;
6139 }
6140 return NULL;
6141 }
6142
6143 struct omp_taskcopy_context
6144 {
6145 /* This field must be at the beginning, as we do "inheritance": Some
6146 callback functions for tree-inline.c (e.g., omp_copy_decl)
6147 receive a copy_body_data pointer that is up-casted to an
6148 omp_context pointer. */
6149 copy_body_data cb;
6150 omp_context *ctx;
6151 };
6152
6153 static tree
6154 task_copyfn_copy_decl (tree var, copy_body_data *cb)
6155 {
6156 struct omp_taskcopy_context *tcctx = (struct omp_taskcopy_context *) cb;
6157
6158 if (splay_tree_lookup (tcctx->ctx->sfield_map, (splay_tree_key) var))
6159 return create_tmp_var (TREE_TYPE (var), NULL);
6160
6161 return var;
6162 }
6163
6164 static tree
6165 task_copyfn_remap_type (struct omp_taskcopy_context *tcctx, tree orig_type)
6166 {
6167 tree name, new_fields = NULL, type, f;
6168
6169 type = lang_hooks.types.make_type (RECORD_TYPE);
6170 name = DECL_NAME (TYPE_NAME (orig_type));
6171 name = build_decl (gimple_location (tcctx->ctx->stmt),
6172 TYPE_DECL, name, type);
6173 TYPE_NAME (type) = name;
6174
6175 for (f = TYPE_FIELDS (orig_type); f ; f = TREE_CHAIN (f))
6176 {
6177 tree new_f = copy_node (f);
6178 DECL_CONTEXT (new_f) = type;
6179 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &tcctx->cb);
6180 TREE_CHAIN (new_f) = new_fields;
6181 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6182 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6183 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
6184 &tcctx->cb, NULL);
6185 new_fields = new_f;
6186 *pointer_map_insert (tcctx->cb.decl_map, f) = new_f;
6187 }
6188 TYPE_FIELDS (type) = nreverse (new_fields);
6189 layout_type (type);
6190 return type;
6191 }
6192
6193 /* Create task copyfn. */
6194
6195 static void
6196 create_task_copyfn (gimple task_stmt, omp_context *ctx)
6197 {
6198 struct function *child_cfun;
6199 tree child_fn, t, c, src, dst, f, sf, arg, sarg, decl;
6200 tree record_type, srecord_type, bind, list;
6201 bool record_needs_remap = false, srecord_needs_remap = false;
6202 splay_tree_node n;
6203 struct omp_taskcopy_context tcctx;
6204 struct gimplify_ctx gctx;
6205 location_t loc = gimple_location (task_stmt);
6206
6207 child_fn = gimple_omp_task_copy_fn (task_stmt);
6208 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
6209 gcc_assert (child_cfun->cfg == NULL);
6210 child_cfun->dont_save_pending_sizes_p = 1;
6211 DECL_SAVED_TREE (child_fn) = alloc_stmt_list ();
6212
6213 /* Reset DECL_CONTEXT on function arguments. */
6214 for (t = DECL_ARGUMENTS (child_fn); t; t = TREE_CHAIN (t))
6215 DECL_CONTEXT (t) = child_fn;
6216
6217 /* Populate the function. */
6218 push_gimplify_context (&gctx);
6219 current_function_decl = child_fn;
6220
6221 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
6222 TREE_SIDE_EFFECTS (bind) = 1;
6223 list = NULL;
6224 DECL_SAVED_TREE (child_fn) = bind;
6225 DECL_SOURCE_LOCATION (child_fn) = gimple_location (task_stmt);
6226
6227 /* Remap src and dst argument types if needed. */
6228 record_type = ctx->record_type;
6229 srecord_type = ctx->srecord_type;
6230 for (f = TYPE_FIELDS (record_type); f ; f = TREE_CHAIN (f))
6231 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6232 {
6233 record_needs_remap = true;
6234 break;
6235 }
6236 for (f = TYPE_FIELDS (srecord_type); f ; f = TREE_CHAIN (f))
6237 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6238 {
6239 srecord_needs_remap = true;
6240 break;
6241 }
6242
6243 if (record_needs_remap || srecord_needs_remap)
6244 {
6245 memset (&tcctx, '\0', sizeof (tcctx));
6246 tcctx.cb.src_fn = ctx->cb.src_fn;
6247 tcctx.cb.dst_fn = child_fn;
6248 tcctx.cb.src_node = cgraph_node (tcctx.cb.src_fn);
6249 tcctx.cb.dst_node = tcctx.cb.src_node;
6250 tcctx.cb.src_cfun = ctx->cb.src_cfun;
6251 tcctx.cb.copy_decl = task_copyfn_copy_decl;
6252 tcctx.cb.eh_lp_nr = 0;
6253 tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE;
6254 tcctx.cb.decl_map = pointer_map_create ();
6255 tcctx.ctx = ctx;
6256
6257 if (record_needs_remap)
6258 record_type = task_copyfn_remap_type (&tcctx, record_type);
6259 if (srecord_needs_remap)
6260 srecord_type = task_copyfn_remap_type (&tcctx, srecord_type);
6261 }
6262 else
6263 tcctx.cb.decl_map = NULL;
6264
6265 push_cfun (child_cfun);
6266
6267 arg = DECL_ARGUMENTS (child_fn);
6268 TREE_TYPE (arg) = build_pointer_type (record_type);
6269 sarg = TREE_CHAIN (arg);
6270 TREE_TYPE (sarg) = build_pointer_type (srecord_type);
6271
6272 /* First pass: initialize temporaries used in record_type and srecord_type
6273 sizes and field offsets. */
6274 if (tcctx.cb.decl_map)
6275 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6276 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6277 {
6278 tree *p;
6279
6280 decl = OMP_CLAUSE_DECL (c);
6281 p = (tree *) pointer_map_contains (tcctx.cb.decl_map, decl);
6282 if (p == NULL)
6283 continue;
6284 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6285 sf = (tree) n->value;
6286 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6287 src = build_simple_mem_ref_loc (loc, sarg);
6288 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6289 t = build2 (MODIFY_EXPR, TREE_TYPE (*p), *p, src);
6290 append_to_statement_list (t, &list);
6291 }
6292
6293 /* Second pass: copy shared var pointers and copy construct non-VLA
6294 firstprivate vars. */
6295 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6296 switch (OMP_CLAUSE_CODE (c))
6297 {
6298 case OMP_CLAUSE_SHARED:
6299 decl = OMP_CLAUSE_DECL (c);
6300 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6301 if (n == NULL)
6302 break;
6303 f = (tree) n->value;
6304 if (tcctx.cb.decl_map)
6305 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6306 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6307 sf = (tree) n->value;
6308 if (tcctx.cb.decl_map)
6309 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6310 src = build_simple_mem_ref_loc (loc, sarg);
6311 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6312 dst = build_simple_mem_ref_loc (loc, arg);
6313 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6314 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
6315 append_to_statement_list (t, &list);
6316 break;
6317 case OMP_CLAUSE_FIRSTPRIVATE:
6318 decl = OMP_CLAUSE_DECL (c);
6319 if (is_variable_sized (decl))
6320 break;
6321 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6322 if (n == NULL)
6323 break;
6324 f = (tree) n->value;
6325 if (tcctx.cb.decl_map)
6326 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6327 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6328 if (n != NULL)
6329 {
6330 sf = (tree) n->value;
6331 if (tcctx.cb.decl_map)
6332 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6333 src = build_simple_mem_ref_loc (loc, sarg);
6334 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6335 if (use_pointer_for_field (decl, NULL) || is_reference (decl))
6336 src = build_simple_mem_ref_loc (loc, src);
6337 }
6338 else
6339 src = decl;
6340 dst = build_simple_mem_ref_loc (loc, arg);
6341 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6342 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6343 append_to_statement_list (t, &list);
6344 break;
6345 case OMP_CLAUSE_PRIVATE:
6346 if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c))
6347 break;
6348 decl = OMP_CLAUSE_DECL (c);
6349 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6350 f = (tree) n->value;
6351 if (tcctx.cb.decl_map)
6352 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6353 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6354 if (n != NULL)
6355 {
6356 sf = (tree) n->value;
6357 if (tcctx.cb.decl_map)
6358 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6359 src = build_simple_mem_ref_loc (loc, sarg);
6360 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6361 if (use_pointer_for_field (decl, NULL))
6362 src = build_simple_mem_ref_loc (loc, src);
6363 }
6364 else
6365 src = decl;
6366 dst = build_simple_mem_ref_loc (loc, arg);
6367 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6368 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
6369 append_to_statement_list (t, &list);
6370 break;
6371 default:
6372 break;
6373 }
6374
6375 /* Last pass: handle VLA firstprivates. */
6376 if (tcctx.cb.decl_map)
6377 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6378 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6379 {
6380 tree ind, ptr, df;
6381
6382 decl = OMP_CLAUSE_DECL (c);
6383 if (!is_variable_sized (decl))
6384 continue;
6385 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6386 if (n == NULL)
6387 continue;
6388 f = (tree) n->value;
6389 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6390 gcc_assert (DECL_HAS_VALUE_EXPR_P (decl));
6391 ind = DECL_VALUE_EXPR (decl);
6392 gcc_assert (TREE_CODE (ind) == INDIRECT_REF);
6393 gcc_assert (DECL_P (TREE_OPERAND (ind, 0)));
6394 n = splay_tree_lookup (ctx->sfield_map,
6395 (splay_tree_key) TREE_OPERAND (ind, 0));
6396 sf = (tree) n->value;
6397 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6398 src = build_simple_mem_ref_loc (loc, sarg);
6399 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6400 src = build_simple_mem_ref_loc (loc, src);
6401 dst = build_simple_mem_ref_loc (loc, arg);
6402 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6403 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6404 append_to_statement_list (t, &list);
6405 n = splay_tree_lookup (ctx->field_map,
6406 (splay_tree_key) TREE_OPERAND (ind, 0));
6407 df = (tree) n->value;
6408 df = *(tree *) pointer_map_contains (tcctx.cb.decl_map, df);
6409 ptr = build_simple_mem_ref_loc (loc, arg);
6410 ptr = build3 (COMPONENT_REF, TREE_TYPE (df), ptr, df, NULL);
6411 t = build2 (MODIFY_EXPR, TREE_TYPE (ptr), ptr,
6412 build_fold_addr_expr_loc (loc, dst));
6413 append_to_statement_list (t, &list);
6414 }
6415
6416 t = build1 (RETURN_EXPR, void_type_node, NULL);
6417 append_to_statement_list (t, &list);
6418
6419 if (tcctx.cb.decl_map)
6420 pointer_map_destroy (tcctx.cb.decl_map);
6421 pop_gimplify_context (NULL);
6422 BIND_EXPR_BODY (bind) = list;
6423 pop_cfun ();
6424 current_function_decl = ctx->cb.src_fn;
6425 }
6426
6427 /* Lower the OpenMP parallel or task directive in the current statement
6428 in GSI_P. CTX holds context information for the directive. */
6429
6430 static void
6431 lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6432 {
6433 tree clauses;
6434 tree child_fn, t;
6435 gimple stmt = gsi_stmt (*gsi_p);
6436 gimple par_bind, bind;
6437 gimple_seq par_body, olist, ilist, par_olist, par_ilist, new_body;
6438 struct gimplify_ctx gctx;
6439 location_t loc = gimple_location (stmt);
6440
6441 clauses = gimple_omp_taskreg_clauses (stmt);
6442 par_bind = gimple_seq_first_stmt (gimple_omp_body (stmt));
6443 par_body = gimple_bind_body (par_bind);
6444 child_fn = ctx->cb.dst_fn;
6445 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
6446 && !gimple_omp_parallel_combined_p (stmt))
6447 {
6448 struct walk_stmt_info wi;
6449 int ws_num = 0;
6450
6451 memset (&wi, 0, sizeof (wi));
6452 wi.info = &ws_num;
6453 wi.val_only = true;
6454 walk_gimple_seq (par_body, check_combined_parallel, NULL, &wi);
6455 if (ws_num == 1)
6456 gimple_omp_parallel_set_combined_p (stmt, true);
6457 }
6458 if (ctx->srecord_type)
6459 create_task_copyfn (stmt, ctx);
6460
6461 push_gimplify_context (&gctx);
6462
6463 par_olist = NULL;
6464 par_ilist = NULL;
6465 lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx);
6466 lower_omp (par_body, ctx);
6467 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL)
6468 lower_reduction_clauses (clauses, &par_olist, ctx);
6469
6470 /* Declare all the variables created by mapping and the variables
6471 declared in the scope of the parallel body. */
6472 record_vars_into (ctx->block_vars, child_fn);
6473 record_vars_into (gimple_bind_vars (par_bind), child_fn);
6474
6475 if (ctx->record_type)
6476 {
6477 ctx->sender_decl
6478 = create_tmp_var (ctx->srecord_type ? ctx->srecord_type
6479 : ctx->record_type, ".omp_data_o");
6480 TREE_ADDRESSABLE (ctx->sender_decl) = 1;
6481 gimple_omp_taskreg_set_data_arg (stmt, ctx->sender_decl);
6482 }
6483
6484 olist = NULL;
6485 ilist = NULL;
6486 lower_send_clauses (clauses, &ilist, &olist, ctx);
6487 lower_send_shared_vars (&ilist, &olist, ctx);
6488
6489 /* Once all the expansions are done, sequence all the different
6490 fragments inside gimple_omp_body. */
6491
6492 new_body = NULL;
6493
6494 if (ctx->record_type)
6495 {
6496 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
6497 /* fixup_child_record_type might have changed receiver_decl's type. */
6498 t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
6499 gimple_seq_add_stmt (&new_body,
6500 gimple_build_assign (ctx->receiver_decl, t));
6501 }
6502
6503 gimple_seq_add_seq (&new_body, par_ilist);
6504 gimple_seq_add_seq (&new_body, par_body);
6505 gimple_seq_add_seq (&new_body, par_olist);
6506 new_body = maybe_catch_exception (new_body);
6507 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
6508 gimple_omp_set_body (stmt, new_body);
6509
6510 bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
6511 gimple_bind_add_stmt (bind, stmt);
6512 if (ilist || olist)
6513 {
6514 gimple_seq_add_stmt (&ilist, bind);
6515 gimple_seq_add_seq (&ilist, olist);
6516 bind = gimple_build_bind (NULL, ilist, NULL);
6517 }
6518
6519 gsi_replace (gsi_p, bind, true);
6520
6521 pop_gimplify_context (NULL);
6522 }
6523
6524 /* Callback for lower_omp_1. Return non-NULL if *tp needs to be
6525 regimplified. If DATA is non-NULL, lower_omp_1 is outside
6526 of OpenMP context, but with task_shared_vars set. */
6527
6528 static tree
6529 lower_omp_regimplify_p (tree *tp, int *walk_subtrees,
6530 void *data)
6531 {
6532 tree t = *tp;
6533
6534 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
6535 if (TREE_CODE (t) == VAR_DECL && data == NULL && DECL_HAS_VALUE_EXPR_P (t))
6536 return t;
6537
6538 if (task_shared_vars
6539 && DECL_P (t)
6540 && bitmap_bit_p (task_shared_vars, DECL_UID (t)))
6541 return t;
6542
6543 /* If a global variable has been privatized, TREE_CONSTANT on
6544 ADDR_EXPR might be wrong. */
6545 if (data == NULL && TREE_CODE (t) == ADDR_EXPR)
6546 recompute_tree_invariant_for_addr_expr (t);
6547
6548 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
6549 return NULL_TREE;
6550 }
6551
6552 static void
6553 lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6554 {
6555 gimple stmt = gsi_stmt (*gsi_p);
6556 struct walk_stmt_info wi;
6557
6558 if (gimple_has_location (stmt))
6559 input_location = gimple_location (stmt);
6560
6561 if (task_shared_vars)
6562 memset (&wi, '\0', sizeof (wi));
6563
6564 /* If we have issued syntax errors, avoid doing any heavy lifting.
6565 Just replace the OpenMP directives with a NOP to avoid
6566 confusing RTL expansion. */
6567 if (seen_error () && is_gimple_omp (stmt))
6568 {
6569 gsi_replace (gsi_p, gimple_build_nop (), true);
6570 return;
6571 }
6572
6573 switch (gimple_code (stmt))
6574 {
6575 case GIMPLE_COND:
6576 if ((ctx || task_shared_vars)
6577 && (walk_tree (gimple_cond_lhs_ptr (stmt), lower_omp_regimplify_p,
6578 ctx ? NULL : &wi, NULL)
6579 || walk_tree (gimple_cond_rhs_ptr (stmt), lower_omp_regimplify_p,
6580 ctx ? NULL : &wi, NULL)))
6581 gimple_regimplify_operands (stmt, gsi_p);
6582 break;
6583 case GIMPLE_CATCH:
6584 lower_omp (gimple_catch_handler (stmt), ctx);
6585 break;
6586 case GIMPLE_EH_FILTER:
6587 lower_omp (gimple_eh_filter_failure (stmt), ctx);
6588 break;
6589 case GIMPLE_TRY:
6590 lower_omp (gimple_try_eval (stmt), ctx);
6591 lower_omp (gimple_try_cleanup (stmt), ctx);
6592 break;
6593 case GIMPLE_BIND:
6594 lower_omp (gimple_bind_body (stmt), ctx);
6595 break;
6596 case GIMPLE_OMP_PARALLEL:
6597 case GIMPLE_OMP_TASK:
6598 ctx = maybe_lookup_ctx (stmt);
6599 lower_omp_taskreg (gsi_p, ctx);
6600 break;
6601 case GIMPLE_OMP_FOR:
6602 ctx = maybe_lookup_ctx (stmt);
6603 gcc_assert (ctx);
6604 lower_omp_for (gsi_p, ctx);
6605 break;
6606 case GIMPLE_OMP_SECTIONS:
6607 ctx = maybe_lookup_ctx (stmt);
6608 gcc_assert (ctx);
6609 lower_omp_sections (gsi_p, ctx);
6610 break;
6611 case GIMPLE_OMP_SINGLE:
6612 ctx = maybe_lookup_ctx (stmt);
6613 gcc_assert (ctx);
6614 lower_omp_single (gsi_p, ctx);
6615 break;
6616 case GIMPLE_OMP_MASTER:
6617 ctx = maybe_lookup_ctx (stmt);
6618 gcc_assert (ctx);
6619 lower_omp_master (gsi_p, ctx);
6620 break;
6621 case GIMPLE_OMP_ORDERED:
6622 ctx = maybe_lookup_ctx (stmt);
6623 gcc_assert (ctx);
6624 lower_omp_ordered (gsi_p, ctx);
6625 break;
6626 case GIMPLE_OMP_CRITICAL:
6627 ctx = maybe_lookup_ctx (stmt);
6628 gcc_assert (ctx);
6629 lower_omp_critical (gsi_p, ctx);
6630 break;
6631 case GIMPLE_OMP_ATOMIC_LOAD:
6632 if ((ctx || task_shared_vars)
6633 && walk_tree (gimple_omp_atomic_load_rhs_ptr (stmt),
6634 lower_omp_regimplify_p, ctx ? NULL : &wi, NULL))
6635 gimple_regimplify_operands (stmt, gsi_p);
6636 break;
6637 default:
6638 if ((ctx || task_shared_vars)
6639 && walk_gimple_op (stmt, lower_omp_regimplify_p,
6640 ctx ? NULL : &wi))
6641 gimple_regimplify_operands (stmt, gsi_p);
6642 break;
6643 }
6644 }
6645
6646 static void
6647 lower_omp (gimple_seq body, omp_context *ctx)
6648 {
6649 location_t saved_location = input_location;
6650 gimple_stmt_iterator gsi = gsi_start (body);
6651 for (gsi = gsi_start (body); !gsi_end_p (gsi); gsi_next (&gsi))
6652 lower_omp_1 (&gsi, ctx);
6653 input_location = saved_location;
6654 }
6655 \f
6656 /* Main entry point. */
6657
6658 static unsigned int
6659 execute_lower_omp (void)
6660 {
6661 gimple_seq body;
6662
6663 /* This pass always runs, to provide PROP_gimple_lomp.
6664 But there is nothing to do unless -fopenmp is given. */
6665 if (flag_openmp == 0)
6666 return 0;
6667
6668 all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
6669 delete_omp_context);
6670
6671 body = gimple_body (current_function_decl);
6672 scan_omp (body, NULL);
6673 gcc_assert (taskreg_nesting_level == 0);
6674
6675 if (all_contexts->root)
6676 {
6677 struct gimplify_ctx gctx;
6678
6679 if (task_shared_vars)
6680 push_gimplify_context (&gctx);
6681 lower_omp (body, NULL);
6682 if (task_shared_vars)
6683 pop_gimplify_context (NULL);
6684 }
6685
6686 if (all_contexts)
6687 {
6688 splay_tree_delete (all_contexts);
6689 all_contexts = NULL;
6690 }
6691 BITMAP_FREE (task_shared_vars);
6692 return 0;
6693 }
6694
6695 struct gimple_opt_pass pass_lower_omp =
6696 {
6697 {
6698 GIMPLE_PASS,
6699 "omplower", /* name */
6700 NULL, /* gate */
6701 execute_lower_omp, /* execute */
6702 NULL, /* sub */
6703 NULL, /* next */
6704 0, /* static_pass_number */
6705 TV_NONE, /* tv_id */
6706 PROP_gimple_any, /* properties_required */
6707 PROP_gimple_lomp, /* properties_provided */
6708 0, /* properties_destroyed */
6709 0, /* todo_flags_start */
6710 TODO_dump_func /* todo_flags_finish */
6711 }
6712 };
6713 \f
6714 /* The following is a utility to diagnose OpenMP structured block violations.
6715 It is not part of the "omplower" pass, as that's invoked too late. It
6716 should be invoked by the respective front ends after gimplification. */
6717
6718 static splay_tree all_labels;
6719
6720 /* Check for mismatched contexts and generate an error if needed. Return
6721 true if an error is detected. */
6722
6723 static bool
6724 diagnose_sb_0 (gimple_stmt_iterator *gsi_p,
6725 gimple branch_ctx, gimple label_ctx)
6726 {
6727 if (label_ctx == branch_ctx)
6728 return false;
6729
6730
6731 /*
6732 Previously we kept track of the label's entire context in diagnose_sb_[12]
6733 so we could traverse it and issue a correct "exit" or "enter" error
6734 message upon a structured block violation.
6735
6736 We built the context by building a list with tree_cons'ing, but there is
6737 no easy counterpart in gimple tuples. It seems like far too much work
6738 for issuing exit/enter error messages. If someone really misses the
6739 distinct error message... patches welcome.
6740 */
6741
6742 #if 0
6743 /* Try to avoid confusing the user by producing and error message
6744 with correct "exit" or "enter" verbiage. We prefer "exit"
6745 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
6746 if (branch_ctx == NULL)
6747 exit_p = false;
6748 else
6749 {
6750 while (label_ctx)
6751 {
6752 if (TREE_VALUE (label_ctx) == branch_ctx)
6753 {
6754 exit_p = false;
6755 break;
6756 }
6757 label_ctx = TREE_CHAIN (label_ctx);
6758 }
6759 }
6760
6761 if (exit_p)
6762 error ("invalid exit from OpenMP structured block");
6763 else
6764 error ("invalid entry to OpenMP structured block");
6765 #endif
6766
6767 /* If it's obvious we have an invalid entry, be specific about the error. */
6768 if (branch_ctx == NULL)
6769 error ("invalid entry to OpenMP structured block");
6770 else
6771 /* Otherwise, be vague and lazy, but efficient. */
6772 error ("invalid branch to/from an OpenMP structured block");
6773
6774 gsi_replace (gsi_p, gimple_build_nop (), false);
6775 return true;
6776 }
6777
6778 /* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
6779 where each label is found. */
6780
6781 static tree
6782 diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
6783 struct walk_stmt_info *wi)
6784 {
6785 gimple context = (gimple) wi->info;
6786 gimple inner_context;
6787 gimple stmt = gsi_stmt (*gsi_p);
6788
6789 *handled_ops_p = true;
6790
6791 switch (gimple_code (stmt))
6792 {
6793 WALK_SUBSTMTS;
6794
6795 case GIMPLE_OMP_PARALLEL:
6796 case GIMPLE_OMP_TASK:
6797 case GIMPLE_OMP_SECTIONS:
6798 case GIMPLE_OMP_SINGLE:
6799 case GIMPLE_OMP_SECTION:
6800 case GIMPLE_OMP_MASTER:
6801 case GIMPLE_OMP_ORDERED:
6802 case GIMPLE_OMP_CRITICAL:
6803 /* The minimal context here is just the current OMP construct. */
6804 inner_context = stmt;
6805 wi->info = inner_context;
6806 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
6807 wi->info = context;
6808 break;
6809
6810 case GIMPLE_OMP_FOR:
6811 inner_context = stmt;
6812 wi->info = inner_context;
6813 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
6814 walk them. */
6815 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
6816 diagnose_sb_1, NULL, wi);
6817 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
6818 wi->info = context;
6819 break;
6820
6821 case GIMPLE_LABEL:
6822 splay_tree_insert (all_labels, (splay_tree_key) gimple_label_label (stmt),
6823 (splay_tree_value) context);
6824 break;
6825
6826 default:
6827 break;
6828 }
6829
6830 return NULL_TREE;
6831 }
6832
6833 /* Pass 2: Check each branch and see if its context differs from that of
6834 the destination label's context. */
6835
6836 static tree
6837 diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
6838 struct walk_stmt_info *wi)
6839 {
6840 gimple context = (gimple) wi->info;
6841 splay_tree_node n;
6842 gimple stmt = gsi_stmt (*gsi_p);
6843
6844 *handled_ops_p = true;
6845
6846 switch (gimple_code (stmt))
6847 {
6848 WALK_SUBSTMTS;
6849
6850 case GIMPLE_OMP_PARALLEL:
6851 case GIMPLE_OMP_TASK:
6852 case GIMPLE_OMP_SECTIONS:
6853 case GIMPLE_OMP_SINGLE:
6854 case GIMPLE_OMP_SECTION:
6855 case GIMPLE_OMP_MASTER:
6856 case GIMPLE_OMP_ORDERED:
6857 case GIMPLE_OMP_CRITICAL:
6858 wi->info = stmt;
6859 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
6860 wi->info = context;
6861 break;
6862
6863 case GIMPLE_OMP_FOR:
6864 wi->info = stmt;
6865 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
6866 walk them. */
6867 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
6868 diagnose_sb_2, NULL, wi);
6869 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
6870 wi->info = context;
6871 break;
6872
6873 case GIMPLE_COND:
6874 {
6875 tree lab = gimple_cond_true_label (stmt);
6876 if (lab)
6877 {
6878 n = splay_tree_lookup (all_labels,
6879 (splay_tree_key) lab);
6880 diagnose_sb_0 (gsi_p, context,
6881 n ? (gimple) n->value : NULL);
6882 }
6883 lab = gimple_cond_false_label (stmt);
6884 if (lab)
6885 {
6886 n = splay_tree_lookup (all_labels,
6887 (splay_tree_key) lab);
6888 diagnose_sb_0 (gsi_p, context,
6889 n ? (gimple) n->value : NULL);
6890 }
6891 }
6892 break;
6893
6894 case GIMPLE_GOTO:
6895 {
6896 tree lab = gimple_goto_dest (stmt);
6897 if (TREE_CODE (lab) != LABEL_DECL)
6898 break;
6899
6900 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
6901 diagnose_sb_0 (gsi_p, context, n ? (gimple) n->value : NULL);
6902 }
6903 break;
6904
6905 case GIMPLE_SWITCH:
6906 {
6907 unsigned int i;
6908 for (i = 0; i < gimple_switch_num_labels (stmt); ++i)
6909 {
6910 tree lab = CASE_LABEL (gimple_switch_label (stmt, i));
6911 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
6912 if (n && diagnose_sb_0 (gsi_p, context, (gimple) n->value))
6913 break;
6914 }
6915 }
6916 break;
6917
6918 case GIMPLE_RETURN:
6919 diagnose_sb_0 (gsi_p, context, NULL);
6920 break;
6921
6922 default:
6923 break;
6924 }
6925
6926 return NULL_TREE;
6927 }
6928
6929 static unsigned int
6930 diagnose_omp_structured_block_errors (void)
6931 {
6932 struct walk_stmt_info wi;
6933 gimple_seq body = gimple_body (current_function_decl);
6934
6935 all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
6936
6937 memset (&wi, 0, sizeof (wi));
6938 walk_gimple_seq (body, diagnose_sb_1, NULL, &wi);
6939
6940 memset (&wi, 0, sizeof (wi));
6941 wi.want_locations = true;
6942 walk_gimple_seq (body, diagnose_sb_2, NULL, &wi);
6943
6944 splay_tree_delete (all_labels);
6945 all_labels = NULL;
6946
6947 return 0;
6948 }
6949
6950 static bool
6951 gate_diagnose_omp_blocks (void)
6952 {
6953 return flag_openmp != 0;
6954 }
6955
6956 struct gimple_opt_pass pass_diagnose_omp_blocks =
6957 {
6958 {
6959 GIMPLE_PASS,
6960 "*diagnose_omp_blocks", /* name */
6961 gate_diagnose_omp_blocks, /* gate */
6962 diagnose_omp_structured_block_errors, /* execute */
6963 NULL, /* sub */
6964 NULL, /* next */
6965 0, /* static_pass_number */
6966 TV_NONE, /* tv_id */
6967 PROP_gimple_any, /* properties_required */
6968 0, /* properties_provided */
6969 0, /* properties_destroyed */
6970 0, /* todo_flags_start */
6971 0, /* todo_flags_finish */
6972 }
6973 };
6974
6975 #include "gt-omp-low.h"