re PR libgomp/49490 (suboptimal load balancing in loops)
[gcc.git] / gcc / omp-low.c
1 /* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
5
6 Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011
7 Free Software Foundation, Inc.
8
9 This file is part of GCC.
10
11 GCC is free software; you can redistribute it and/or modify it under
12 the terms of the GNU General Public License as published by the Free
13 Software Foundation; either version 3, or (at your option) any later
14 version.
15
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING3. If not see
23 <http://www.gnu.org/licenses/>. */
24
25 #include "config.h"
26 #include "system.h"
27 #include "coretypes.h"
28 #include "tm.h"
29 #include "tree.h"
30 #include "rtl.h"
31 #include "gimple.h"
32 #include "tree-iterator.h"
33 #include "tree-inline.h"
34 #include "langhooks.h"
35 #include "diagnostic-core.h"
36 #include "tree-flow.h"
37 #include "timevar.h"
38 #include "flags.h"
39 #include "function.h"
40 #include "expr.h"
41 #include "tree-pass.h"
42 #include "ggc.h"
43 #include "except.h"
44 #include "splay-tree.h"
45 #include "optabs.h"
46 #include "cfgloop.h"
47
48
49 /* Lowering of OpenMP parallel and workshare constructs proceeds in two
50 phases. The first phase scans the function looking for OMP statements
51 and then for variables that must be replaced to satisfy data sharing
52 clauses. The second phase expands code for the constructs, as well as
53 re-gimplifying things when variables have been replaced with complex
54 expressions.
55
56 Final code generation is done by pass_expand_omp. The flowgraph is
57 scanned for parallel regions which are then moved to a new
58 function, to be invoked by the thread library. */
59
60 /* Context structure. Used to store information about each parallel
61 directive in the code. */
62
63 typedef struct omp_context
64 {
65 /* This field must be at the beginning, as we do "inheritance": Some
66 callback functions for tree-inline.c (e.g., omp_copy_decl)
67 receive a copy_body_data pointer that is up-casted to an
68 omp_context pointer. */
69 copy_body_data cb;
70
71 /* The tree of contexts corresponding to the encountered constructs. */
72 struct omp_context *outer;
73 gimple stmt;
74
75 /* Map variables to fields in a structure that allows communication
76 between sending and receiving threads. */
77 splay_tree field_map;
78 tree record_type;
79 tree sender_decl;
80 tree receiver_decl;
81
82 /* These are used just by task contexts, if task firstprivate fn is
83 needed. srecord_type is used to communicate from the thread
84 that encountered the task construct to task firstprivate fn,
85 record_type is allocated by GOMP_task, initialized by task firstprivate
86 fn and passed to the task body fn. */
87 splay_tree sfield_map;
88 tree srecord_type;
89
90 /* A chain of variables to add to the top-level block surrounding the
91 construct. In the case of a parallel, this is in the child function. */
92 tree block_vars;
93
94 /* What to do with variables with implicitly determined sharing
95 attributes. */
96 enum omp_clause_default_kind default_kind;
97
98 /* Nesting depth of this context. Used to beautify error messages re
99 invalid gotos. The outermost ctx is depth 1, with depth 0 being
100 reserved for the main body of the function. */
101 int depth;
102
103 /* True if this parallel directive is nested within another. */
104 bool is_nested;
105 } omp_context;
106
107
108 struct omp_for_data_loop
109 {
110 tree v, n1, n2, step;
111 enum tree_code cond_code;
112 };
113
114 /* A structure describing the main elements of a parallel loop. */
115
116 struct omp_for_data
117 {
118 struct omp_for_data_loop loop;
119 tree chunk_size;
120 gimple for_stmt;
121 tree pre, iter_type;
122 int collapse;
123 bool have_nowait, have_ordered;
124 enum omp_clause_schedule_kind sched_kind;
125 struct omp_for_data_loop *loops;
126 };
127
128
129 static splay_tree all_contexts;
130 static int taskreg_nesting_level;
131 struct omp_region *root_omp_region;
132 static bitmap task_shared_vars;
133
134 static void scan_omp (gimple_seq, omp_context *);
135 static tree scan_omp_1_op (tree *, int *, void *);
136
137 #define WALK_SUBSTMTS \
138 case GIMPLE_BIND: \
139 case GIMPLE_TRY: \
140 case GIMPLE_CATCH: \
141 case GIMPLE_EH_FILTER: \
142 /* The sub-statements for these should be walked. */ \
143 *handled_ops_p = false; \
144 break;
145
146 /* Convenience function for calling scan_omp_1_op on tree operands. */
147
148 static inline tree
149 scan_omp_op (tree *tp, omp_context *ctx)
150 {
151 struct walk_stmt_info wi;
152
153 memset (&wi, 0, sizeof (wi));
154 wi.info = ctx;
155 wi.want_locations = true;
156
157 return walk_tree (tp, scan_omp_1_op, &wi, NULL);
158 }
159
160 static void lower_omp (gimple_seq, omp_context *);
161 static tree lookup_decl_in_outer_ctx (tree, omp_context *);
162 static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
163
164 /* Find an OpenMP clause of type KIND within CLAUSES. */
165
166 tree
167 find_omp_clause (tree clauses, enum omp_clause_code kind)
168 {
169 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
170 if (OMP_CLAUSE_CODE (clauses) == kind)
171 return clauses;
172
173 return NULL_TREE;
174 }
175
176 /* Return true if CTX is for an omp parallel. */
177
178 static inline bool
179 is_parallel_ctx (omp_context *ctx)
180 {
181 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL;
182 }
183
184
185 /* Return true if CTX is for an omp task. */
186
187 static inline bool
188 is_task_ctx (omp_context *ctx)
189 {
190 return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
191 }
192
193
194 /* Return true if CTX is for an omp parallel or omp task. */
195
196 static inline bool
197 is_taskreg_ctx (omp_context *ctx)
198 {
199 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL
200 || gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
201 }
202
203
204 /* Return true if REGION is a combined parallel+workshare region. */
205
206 static inline bool
207 is_combined_parallel (struct omp_region *region)
208 {
209 return region->is_combined_parallel;
210 }
211
212
213 /* Extract the header elements of parallel loop FOR_STMT and store
214 them into *FD. */
215
216 static void
217 extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
218 struct omp_for_data_loop *loops)
219 {
220 tree t, var, *collapse_iter, *collapse_count;
221 tree count = NULL_TREE, iter_type = long_integer_type_node;
222 struct omp_for_data_loop *loop;
223 int i;
224 struct omp_for_data_loop dummy_loop;
225 location_t loc = gimple_location (for_stmt);
226
227 fd->for_stmt = for_stmt;
228 fd->pre = NULL;
229 fd->collapse = gimple_omp_for_collapse (for_stmt);
230 if (fd->collapse > 1)
231 fd->loops = loops;
232 else
233 fd->loops = &fd->loop;
234
235 fd->have_nowait = fd->have_ordered = false;
236 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
237 fd->chunk_size = NULL_TREE;
238 collapse_iter = NULL;
239 collapse_count = NULL;
240
241 for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
242 switch (OMP_CLAUSE_CODE (t))
243 {
244 case OMP_CLAUSE_NOWAIT:
245 fd->have_nowait = true;
246 break;
247 case OMP_CLAUSE_ORDERED:
248 fd->have_ordered = true;
249 break;
250 case OMP_CLAUSE_SCHEDULE:
251 fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
252 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
253 break;
254 case OMP_CLAUSE_COLLAPSE:
255 if (fd->collapse > 1)
256 {
257 collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
258 collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
259 }
260 default:
261 break;
262 }
263
264 /* FIXME: for now map schedule(auto) to schedule(static).
265 There should be analysis to determine whether all iterations
266 are approximately the same amount of work (then schedule(static)
267 is best) or if it varies (then schedule(dynamic,N) is better). */
268 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
269 {
270 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
271 gcc_assert (fd->chunk_size == NULL);
272 }
273 gcc_assert (fd->collapse == 1 || collapse_iter != NULL);
274 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
275 gcc_assert (fd->chunk_size == NULL);
276 else if (fd->chunk_size == NULL)
277 {
278 /* We only need to compute a default chunk size for ordered
279 static loops and dynamic loops. */
280 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
281 || fd->have_ordered
282 || fd->collapse > 1)
283 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
284 ? integer_zero_node : integer_one_node;
285 }
286
287 for (i = 0; i < fd->collapse; i++)
288 {
289 if (fd->collapse == 1)
290 loop = &fd->loop;
291 else if (loops != NULL)
292 loop = loops + i;
293 else
294 loop = &dummy_loop;
295
296
297 loop->v = gimple_omp_for_index (for_stmt, i);
298 gcc_assert (SSA_VAR_P (loop->v));
299 gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
300 || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
301 var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
302 loop->n1 = gimple_omp_for_initial (for_stmt, i);
303
304 loop->cond_code = gimple_omp_for_cond (for_stmt, i);
305 loop->n2 = gimple_omp_for_final (for_stmt, i);
306 switch (loop->cond_code)
307 {
308 case LT_EXPR:
309 case GT_EXPR:
310 break;
311 case LE_EXPR:
312 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
313 loop->n2 = fold_build2_loc (loc,
314 POINTER_PLUS_EXPR, TREE_TYPE (loop->n2),
315 loop->n2, size_one_node);
316 else
317 loop->n2 = fold_build2_loc (loc,
318 PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
319 build_int_cst (TREE_TYPE (loop->n2), 1));
320 loop->cond_code = LT_EXPR;
321 break;
322 case GE_EXPR:
323 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
324 loop->n2 = fold_build2_loc (loc,
325 POINTER_PLUS_EXPR, TREE_TYPE (loop->n2),
326 loop->n2, size_int (-1));
327 else
328 loop->n2 = fold_build2_loc (loc,
329 MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
330 build_int_cst (TREE_TYPE (loop->n2), 1));
331 loop->cond_code = GT_EXPR;
332 break;
333 default:
334 gcc_unreachable ();
335 }
336
337 t = gimple_omp_for_incr (for_stmt, i);
338 gcc_assert (TREE_OPERAND (t, 0) == var);
339 switch (TREE_CODE (t))
340 {
341 case PLUS_EXPR:
342 case POINTER_PLUS_EXPR:
343 loop->step = TREE_OPERAND (t, 1);
344 break;
345 case MINUS_EXPR:
346 loop->step = TREE_OPERAND (t, 1);
347 loop->step = fold_build1_loc (loc,
348 NEGATE_EXPR, TREE_TYPE (loop->step),
349 loop->step);
350 break;
351 default:
352 gcc_unreachable ();
353 }
354
355 if (iter_type != long_long_unsigned_type_node)
356 {
357 if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
358 iter_type = long_long_unsigned_type_node;
359 else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
360 && TYPE_PRECISION (TREE_TYPE (loop->v))
361 >= TYPE_PRECISION (iter_type))
362 {
363 tree n;
364
365 if (loop->cond_code == LT_EXPR)
366 n = fold_build2_loc (loc,
367 PLUS_EXPR, TREE_TYPE (loop->v),
368 loop->n2, loop->step);
369 else
370 n = loop->n1;
371 if (TREE_CODE (n) != INTEGER_CST
372 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
373 iter_type = long_long_unsigned_type_node;
374 }
375 else if (TYPE_PRECISION (TREE_TYPE (loop->v))
376 > TYPE_PRECISION (iter_type))
377 {
378 tree n1, n2;
379
380 if (loop->cond_code == LT_EXPR)
381 {
382 n1 = loop->n1;
383 n2 = fold_build2_loc (loc,
384 PLUS_EXPR, TREE_TYPE (loop->v),
385 loop->n2, loop->step);
386 }
387 else
388 {
389 n1 = fold_build2_loc (loc,
390 MINUS_EXPR, TREE_TYPE (loop->v),
391 loop->n2, loop->step);
392 n2 = loop->n1;
393 }
394 if (TREE_CODE (n1) != INTEGER_CST
395 || TREE_CODE (n2) != INTEGER_CST
396 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
397 || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
398 iter_type = long_long_unsigned_type_node;
399 }
400 }
401
402 if (collapse_count && *collapse_count == NULL)
403 {
404 if ((i == 0 || count != NULL_TREE)
405 && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
406 && TREE_CONSTANT (loop->n1)
407 && TREE_CONSTANT (loop->n2)
408 && TREE_CODE (loop->step) == INTEGER_CST)
409 {
410 tree itype = TREE_TYPE (loop->v);
411
412 if (POINTER_TYPE_P (itype))
413 itype
414 = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
415 t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
416 t = fold_build2_loc (loc,
417 PLUS_EXPR, itype,
418 fold_convert_loc (loc, itype, loop->step), t);
419 t = fold_build2_loc (loc, PLUS_EXPR, itype, t,
420 fold_convert_loc (loc, itype, loop->n2));
421 t = fold_build2_loc (loc, MINUS_EXPR, itype, t,
422 fold_convert_loc (loc, itype, loop->n1));
423 if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
424 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype,
425 fold_build1_loc (loc, NEGATE_EXPR, itype, t),
426 fold_build1_loc (loc, NEGATE_EXPR, itype,
427 fold_convert_loc (loc, itype,
428 loop->step)));
429 else
430 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, t,
431 fold_convert_loc (loc, itype, loop->step));
432 t = fold_convert_loc (loc, long_long_unsigned_type_node, t);
433 if (count != NULL_TREE)
434 count = fold_build2_loc (loc,
435 MULT_EXPR, long_long_unsigned_type_node,
436 count, t);
437 else
438 count = t;
439 if (TREE_CODE (count) != INTEGER_CST)
440 count = NULL_TREE;
441 }
442 else
443 count = NULL_TREE;
444 }
445 }
446
447 if (count)
448 {
449 if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
450 iter_type = long_long_unsigned_type_node;
451 else
452 iter_type = long_integer_type_node;
453 }
454 else if (collapse_iter && *collapse_iter != NULL)
455 iter_type = TREE_TYPE (*collapse_iter);
456 fd->iter_type = iter_type;
457 if (collapse_iter && *collapse_iter == NULL)
458 *collapse_iter = create_tmp_var (iter_type, ".iter");
459 if (collapse_count && *collapse_count == NULL)
460 {
461 if (count)
462 *collapse_count = fold_convert_loc (loc, iter_type, count);
463 else
464 *collapse_count = create_tmp_var (iter_type, ".count");
465 }
466
467 if (fd->collapse > 1)
468 {
469 fd->loop.v = *collapse_iter;
470 fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
471 fd->loop.n2 = *collapse_count;
472 fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
473 fd->loop.cond_code = LT_EXPR;
474 }
475 }
476
477
478 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
479 is the immediate dominator of PAR_ENTRY_BB, return true if there
480 are no data dependencies that would prevent expanding the parallel
481 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
482
483 When expanding a combined parallel+workshare region, the call to
484 the child function may need additional arguments in the case of
485 GIMPLE_OMP_FOR regions. In some cases, these arguments are
486 computed out of variables passed in from the parent to the child
487 via 'struct .omp_data_s'. For instance:
488
489 #pragma omp parallel for schedule (guided, i * 4)
490 for (j ...)
491
492 Is lowered into:
493
494 # BLOCK 2 (PAR_ENTRY_BB)
495 .omp_data_o.i = i;
496 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
497
498 # BLOCK 3 (WS_ENTRY_BB)
499 .omp_data_i = &.omp_data_o;
500 D.1667 = .omp_data_i->i;
501 D.1598 = D.1667 * 4;
502 #pragma omp for schedule (guided, D.1598)
503
504 When we outline the parallel region, the call to the child function
505 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
506 that value is computed *after* the call site. So, in principle we
507 cannot do the transformation.
508
509 To see whether the code in WS_ENTRY_BB blocks the combined
510 parallel+workshare call, we collect all the variables used in the
511 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
512 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
513 call.
514
515 FIXME. If we had the SSA form built at this point, we could merely
516 hoist the code in block 3 into block 2 and be done with it. But at
517 this point we don't have dataflow information and though we could
518 hack something up here, it is really not worth the aggravation. */
519
520 static bool
521 workshare_safe_to_combine_p (basic_block ws_entry_bb)
522 {
523 struct omp_for_data fd;
524 gimple ws_stmt = last_stmt (ws_entry_bb);
525
526 if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
527 return true;
528
529 gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR);
530
531 extract_omp_for_data (ws_stmt, &fd, NULL);
532
533 if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
534 return false;
535 if (fd.iter_type != long_integer_type_node)
536 return false;
537
538 /* FIXME. We give up too easily here. If any of these arguments
539 are not constants, they will likely involve variables that have
540 been mapped into fields of .omp_data_s for sharing with the child
541 function. With appropriate data flow, it would be possible to
542 see through this. */
543 if (!is_gimple_min_invariant (fd.loop.n1)
544 || !is_gimple_min_invariant (fd.loop.n2)
545 || !is_gimple_min_invariant (fd.loop.step)
546 || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
547 return false;
548
549 return true;
550 }
551
552
553 /* Collect additional arguments needed to emit a combined
554 parallel+workshare call. WS_STMT is the workshare directive being
555 expanded. */
556
557 static VEC(tree,gc) *
558 get_ws_args_for (gimple ws_stmt)
559 {
560 tree t;
561 location_t loc = gimple_location (ws_stmt);
562 VEC(tree,gc) *ws_args;
563
564 if (gimple_code (ws_stmt) == GIMPLE_OMP_FOR)
565 {
566 struct omp_for_data fd;
567
568 extract_omp_for_data (ws_stmt, &fd, NULL);
569
570 ws_args = VEC_alloc (tree, gc, 3 + (fd.chunk_size != 0));
571
572 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n1);
573 VEC_quick_push (tree, ws_args, t);
574
575 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n2);
576 VEC_quick_push (tree, ws_args, t);
577
578 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.step);
579 VEC_quick_push (tree, ws_args, t);
580
581 if (fd.chunk_size)
582 {
583 t = fold_convert_loc (loc, long_integer_type_node, fd.chunk_size);
584 VEC_quick_push (tree, ws_args, t);
585 }
586
587 return ws_args;
588 }
589 else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
590 {
591 /* Number of sections is equal to the number of edges from the
592 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
593 the exit of the sections region. */
594 basic_block bb = single_succ (gimple_bb (ws_stmt));
595 t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
596 ws_args = VEC_alloc (tree, gc, 1);
597 VEC_quick_push (tree, ws_args, t);
598 return ws_args;
599 }
600
601 gcc_unreachable ();
602 }
603
604
605 /* Discover whether REGION is a combined parallel+workshare region. */
606
607 static void
608 determine_parallel_type (struct omp_region *region)
609 {
610 basic_block par_entry_bb, par_exit_bb;
611 basic_block ws_entry_bb, ws_exit_bb;
612
613 if (region == NULL || region->inner == NULL
614 || region->exit == NULL || region->inner->exit == NULL
615 || region->inner->cont == NULL)
616 return;
617
618 /* We only support parallel+for and parallel+sections. */
619 if (region->type != GIMPLE_OMP_PARALLEL
620 || (region->inner->type != GIMPLE_OMP_FOR
621 && region->inner->type != GIMPLE_OMP_SECTIONS))
622 return;
623
624 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
625 WS_EXIT_BB -> PAR_EXIT_BB. */
626 par_entry_bb = region->entry;
627 par_exit_bb = region->exit;
628 ws_entry_bb = region->inner->entry;
629 ws_exit_bb = region->inner->exit;
630
631 if (single_succ (par_entry_bb) == ws_entry_bb
632 && single_succ (ws_exit_bb) == par_exit_bb
633 && workshare_safe_to_combine_p (ws_entry_bb)
634 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb))
635 || (last_and_only_stmt (ws_entry_bb)
636 && last_and_only_stmt (par_exit_bb))))
637 {
638 gimple ws_stmt = last_stmt (ws_entry_bb);
639
640 if (region->inner->type == GIMPLE_OMP_FOR)
641 {
642 /* If this is a combined parallel loop, we need to determine
643 whether or not to use the combined library calls. There
644 are two cases where we do not apply the transformation:
645 static loops and any kind of ordered loop. In the first
646 case, we already open code the loop so there is no need
647 to do anything else. In the latter case, the combined
648 parallel loop call would still need extra synchronization
649 to implement ordered semantics, so there would not be any
650 gain in using the combined call. */
651 tree clauses = gimple_omp_for_clauses (ws_stmt);
652 tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
653 if (c == NULL
654 || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC
655 || find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
656 {
657 region->is_combined_parallel = false;
658 region->inner->is_combined_parallel = false;
659 return;
660 }
661 }
662
663 region->is_combined_parallel = true;
664 region->inner->is_combined_parallel = true;
665 region->ws_args = get_ws_args_for (ws_stmt);
666 }
667 }
668
669
670 /* Return true if EXPR is variable sized. */
671
672 static inline bool
673 is_variable_sized (const_tree expr)
674 {
675 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
676 }
677
678 /* Return true if DECL is a reference type. */
679
680 static inline bool
681 is_reference (tree decl)
682 {
683 return lang_hooks.decls.omp_privatize_by_reference (decl);
684 }
685
686 /* Lookup variables in the decl or field splay trees. The "maybe" form
687 allows for the variable form to not have been entered, otherwise we
688 assert that the variable must have been entered. */
689
690 static inline tree
691 lookup_decl (tree var, omp_context *ctx)
692 {
693 tree *n;
694 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
695 return *n;
696 }
697
698 static inline tree
699 maybe_lookup_decl (const_tree var, omp_context *ctx)
700 {
701 tree *n;
702 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
703 return n ? *n : NULL_TREE;
704 }
705
706 static inline tree
707 lookup_field (tree var, omp_context *ctx)
708 {
709 splay_tree_node n;
710 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
711 return (tree) n->value;
712 }
713
714 static inline tree
715 lookup_sfield (tree var, omp_context *ctx)
716 {
717 splay_tree_node n;
718 n = splay_tree_lookup (ctx->sfield_map
719 ? ctx->sfield_map : ctx->field_map,
720 (splay_tree_key) var);
721 return (tree) n->value;
722 }
723
724 static inline tree
725 maybe_lookup_field (tree var, omp_context *ctx)
726 {
727 splay_tree_node n;
728 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
729 return n ? (tree) n->value : NULL_TREE;
730 }
731
732 /* Return true if DECL should be copied by pointer. SHARED_CTX is
733 the parallel context if DECL is to be shared. */
734
735 static bool
736 use_pointer_for_field (tree decl, omp_context *shared_ctx)
737 {
738 if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
739 return true;
740
741 /* We can only use copy-in/copy-out semantics for shared variables
742 when we know the value is not accessible from an outer scope. */
743 if (shared_ctx)
744 {
745 /* ??? Trivially accessible from anywhere. But why would we even
746 be passing an address in this case? Should we simply assert
747 this to be false, or should we have a cleanup pass that removes
748 these from the list of mappings? */
749 if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
750 return true;
751
752 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
753 without analyzing the expression whether or not its location
754 is accessible to anyone else. In the case of nested parallel
755 regions it certainly may be. */
756 if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
757 return true;
758
759 /* Do not use copy-in/copy-out for variables that have their
760 address taken. */
761 if (TREE_ADDRESSABLE (decl))
762 return true;
763
764 /* Disallow copy-in/out in nested parallel if
765 decl is shared in outer parallel, otherwise
766 each thread could store the shared variable
767 in its own copy-in location, making the
768 variable no longer really shared. */
769 if (!TREE_READONLY (decl) && shared_ctx->is_nested)
770 {
771 omp_context *up;
772
773 for (up = shared_ctx->outer; up; up = up->outer)
774 if (is_taskreg_ctx (up) && maybe_lookup_decl (decl, up))
775 break;
776
777 if (up)
778 {
779 tree c;
780
781 for (c = gimple_omp_taskreg_clauses (up->stmt);
782 c; c = OMP_CLAUSE_CHAIN (c))
783 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
784 && OMP_CLAUSE_DECL (c) == decl)
785 break;
786
787 if (c)
788 return true;
789 }
790 }
791
792 /* For tasks avoid using copy-in/out, unless they are readonly
793 (in which case just copy-in is used). As tasks can be
794 deferred or executed in different thread, when GOMP_task
795 returns, the task hasn't necessarily terminated. */
796 if (!TREE_READONLY (decl) && is_task_ctx (shared_ctx))
797 {
798 tree outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
799 if (is_gimple_reg (outer))
800 {
801 /* Taking address of OUTER in lower_send_shared_vars
802 might need regimplification of everything that uses the
803 variable. */
804 if (!task_shared_vars)
805 task_shared_vars = BITMAP_ALLOC (NULL);
806 bitmap_set_bit (task_shared_vars, DECL_UID (outer));
807 TREE_ADDRESSABLE (outer) = 1;
808 }
809 return true;
810 }
811 }
812
813 return false;
814 }
815
816 /* Create a new VAR_DECL and copy information from VAR to it. */
817
818 tree
819 copy_var_decl (tree var, tree name, tree type)
820 {
821 tree copy = build_decl (DECL_SOURCE_LOCATION (var), VAR_DECL, name, type);
822
823 TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (var);
824 TREE_THIS_VOLATILE (copy) = TREE_THIS_VOLATILE (var);
825 DECL_GIMPLE_REG_P (copy) = DECL_GIMPLE_REG_P (var);
826 DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (var);
827 DECL_IGNORED_P (copy) = DECL_IGNORED_P (var);
828 DECL_CONTEXT (copy) = DECL_CONTEXT (var);
829 TREE_USED (copy) = 1;
830 DECL_SEEN_IN_BIND_EXPR_P (copy) = 1;
831
832 return copy;
833 }
834
835 /* Construct a new automatic decl similar to VAR. */
836
837 static tree
838 omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
839 {
840 tree copy = copy_var_decl (var, name, type);
841
842 DECL_CONTEXT (copy) = current_function_decl;
843 DECL_CHAIN (copy) = ctx->block_vars;
844 ctx->block_vars = copy;
845
846 return copy;
847 }
848
849 static tree
850 omp_copy_decl_1 (tree var, omp_context *ctx)
851 {
852 return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
853 }
854
855 /* Build tree nodes to access the field for VAR on the receiver side. */
856
857 static tree
858 build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
859 {
860 tree x, field = lookup_field (var, ctx);
861
862 /* If the receiver record type was remapped in the child function,
863 remap the field into the new record type. */
864 x = maybe_lookup_field (field, ctx);
865 if (x != NULL)
866 field = x;
867
868 x = build_simple_mem_ref (ctx->receiver_decl);
869 x = build3 (COMPONENT_REF, TREE_TYPE (field), x, field, NULL);
870 if (by_ref)
871 x = build_simple_mem_ref (x);
872
873 return x;
874 }
875
876 /* Build tree nodes to access VAR in the scope outer to CTX. In the case
877 of a parallel, this is a component reference; for workshare constructs
878 this is some variable. */
879
880 static tree
881 build_outer_var_ref (tree var, omp_context *ctx)
882 {
883 tree x;
884
885 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
886 x = var;
887 else if (is_variable_sized (var))
888 {
889 x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
890 x = build_outer_var_ref (x, ctx);
891 x = build_simple_mem_ref (x);
892 }
893 else if (is_taskreg_ctx (ctx))
894 {
895 bool by_ref = use_pointer_for_field (var, NULL);
896 x = build_receiver_ref (var, by_ref, ctx);
897 }
898 else if (ctx->outer)
899 x = lookup_decl (var, ctx->outer);
900 else if (is_reference (var))
901 /* This can happen with orphaned constructs. If var is reference, it is
902 possible it is shared and as such valid. */
903 x = var;
904 else
905 gcc_unreachable ();
906
907 if (is_reference (var))
908 x = build_simple_mem_ref (x);
909
910 return x;
911 }
912
913 /* Build tree nodes to access the field for VAR on the sender side. */
914
915 static tree
916 build_sender_ref (tree var, omp_context *ctx)
917 {
918 tree field = lookup_sfield (var, ctx);
919 return build3 (COMPONENT_REF, TREE_TYPE (field),
920 ctx->sender_decl, field, NULL);
921 }
922
923 /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
924
925 static void
926 install_var_field (tree var, bool by_ref, int mask, omp_context *ctx)
927 {
928 tree field, type, sfield = NULL_TREE;
929
930 gcc_assert ((mask & 1) == 0
931 || !splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
932 gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
933 || !splay_tree_lookup (ctx->sfield_map, (splay_tree_key) var));
934
935 type = TREE_TYPE (var);
936 if (by_ref)
937 type = build_pointer_type (type);
938 else if ((mask & 3) == 1 && is_reference (var))
939 type = TREE_TYPE (type);
940
941 field = build_decl (DECL_SOURCE_LOCATION (var),
942 FIELD_DECL, DECL_NAME (var), type);
943
944 /* Remember what variable this field was created for. This does have a
945 side effect of making dwarf2out ignore this member, so for helpful
946 debugging we clear it later in delete_omp_context. */
947 DECL_ABSTRACT_ORIGIN (field) = var;
948 if (type == TREE_TYPE (var))
949 {
950 DECL_ALIGN (field) = DECL_ALIGN (var);
951 DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
952 TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
953 }
954 else
955 DECL_ALIGN (field) = TYPE_ALIGN (type);
956
957 if ((mask & 3) == 3)
958 {
959 insert_field_into_struct (ctx->record_type, field);
960 if (ctx->srecord_type)
961 {
962 sfield = build_decl (DECL_SOURCE_LOCATION (var),
963 FIELD_DECL, DECL_NAME (var), type);
964 DECL_ABSTRACT_ORIGIN (sfield) = var;
965 DECL_ALIGN (sfield) = DECL_ALIGN (field);
966 DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
967 TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
968 insert_field_into_struct (ctx->srecord_type, sfield);
969 }
970 }
971 else
972 {
973 if (ctx->srecord_type == NULL_TREE)
974 {
975 tree t;
976
977 ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
978 ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
979 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
980 {
981 sfield = build_decl (DECL_SOURCE_LOCATION (var),
982 FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
983 DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
984 insert_field_into_struct (ctx->srecord_type, sfield);
985 splay_tree_insert (ctx->sfield_map,
986 (splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
987 (splay_tree_value) sfield);
988 }
989 }
990 sfield = field;
991 insert_field_into_struct ((mask & 1) ? ctx->record_type
992 : ctx->srecord_type, field);
993 }
994
995 if (mask & 1)
996 splay_tree_insert (ctx->field_map, (splay_tree_key) var,
997 (splay_tree_value) field);
998 if ((mask & 2) && ctx->sfield_map)
999 splay_tree_insert (ctx->sfield_map, (splay_tree_key) var,
1000 (splay_tree_value) sfield);
1001 }
1002
1003 static tree
1004 install_var_local (tree var, omp_context *ctx)
1005 {
1006 tree new_var = omp_copy_decl_1 (var, ctx);
1007 insert_decl_map (&ctx->cb, var, new_var);
1008 return new_var;
1009 }
1010
1011 /* Adjust the replacement for DECL in CTX for the new context. This means
1012 copying the DECL_VALUE_EXPR, and fixing up the type. */
1013
1014 static void
1015 fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
1016 {
1017 tree new_decl, size;
1018
1019 new_decl = lookup_decl (decl, ctx);
1020
1021 TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
1022
1023 if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
1024 && DECL_HAS_VALUE_EXPR_P (decl))
1025 {
1026 tree ve = DECL_VALUE_EXPR (decl);
1027 walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL);
1028 SET_DECL_VALUE_EXPR (new_decl, ve);
1029 DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
1030 }
1031
1032 if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
1033 {
1034 size = remap_decl (DECL_SIZE (decl), &ctx->cb);
1035 if (size == error_mark_node)
1036 size = TYPE_SIZE (TREE_TYPE (new_decl));
1037 DECL_SIZE (new_decl) = size;
1038
1039 size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
1040 if (size == error_mark_node)
1041 size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
1042 DECL_SIZE_UNIT (new_decl) = size;
1043 }
1044 }
1045
1046 /* The callback for remap_decl. Search all containing contexts for a
1047 mapping of the variable; this avoids having to duplicate the splay
1048 tree ahead of time. We know a mapping doesn't already exist in the
1049 given context. Create new mappings to implement default semantics. */
1050
1051 static tree
1052 omp_copy_decl (tree var, copy_body_data *cb)
1053 {
1054 omp_context *ctx = (omp_context *) cb;
1055 tree new_var;
1056
1057 if (TREE_CODE (var) == LABEL_DECL)
1058 {
1059 new_var = create_artificial_label (DECL_SOURCE_LOCATION (var));
1060 DECL_CONTEXT (new_var) = current_function_decl;
1061 insert_decl_map (&ctx->cb, var, new_var);
1062 return new_var;
1063 }
1064
1065 while (!is_taskreg_ctx (ctx))
1066 {
1067 ctx = ctx->outer;
1068 if (ctx == NULL)
1069 return var;
1070 new_var = maybe_lookup_decl (var, ctx);
1071 if (new_var)
1072 return new_var;
1073 }
1074
1075 if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
1076 return var;
1077
1078 return error_mark_node;
1079 }
1080
1081
1082 /* Return the parallel region associated with STMT. */
1083
1084 /* Debugging dumps for parallel regions. */
1085 void dump_omp_region (FILE *, struct omp_region *, int);
1086 void debug_omp_region (struct omp_region *);
1087 void debug_all_omp_regions (void);
1088
1089 /* Dump the parallel region tree rooted at REGION. */
1090
1091 void
1092 dump_omp_region (FILE *file, struct omp_region *region, int indent)
1093 {
1094 fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
1095 gimple_code_name[region->type]);
1096
1097 if (region->inner)
1098 dump_omp_region (file, region->inner, indent + 4);
1099
1100 if (region->cont)
1101 {
1102 fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
1103 region->cont->index);
1104 }
1105
1106 if (region->exit)
1107 fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
1108 region->exit->index);
1109 else
1110 fprintf (file, "%*s[no exit marker]\n", indent, "");
1111
1112 if (region->next)
1113 dump_omp_region (file, region->next, indent);
1114 }
1115
1116 DEBUG_FUNCTION void
1117 debug_omp_region (struct omp_region *region)
1118 {
1119 dump_omp_region (stderr, region, 0);
1120 }
1121
1122 DEBUG_FUNCTION void
1123 debug_all_omp_regions (void)
1124 {
1125 dump_omp_region (stderr, root_omp_region, 0);
1126 }
1127
1128
1129 /* Create a new parallel region starting at STMT inside region PARENT. */
1130
1131 struct omp_region *
1132 new_omp_region (basic_block bb, enum gimple_code type,
1133 struct omp_region *parent)
1134 {
1135 struct omp_region *region = XCNEW (struct omp_region);
1136
1137 region->outer = parent;
1138 region->entry = bb;
1139 region->type = type;
1140
1141 if (parent)
1142 {
1143 /* This is a nested region. Add it to the list of inner
1144 regions in PARENT. */
1145 region->next = parent->inner;
1146 parent->inner = region;
1147 }
1148 else
1149 {
1150 /* This is a toplevel region. Add it to the list of toplevel
1151 regions in ROOT_OMP_REGION. */
1152 region->next = root_omp_region;
1153 root_omp_region = region;
1154 }
1155
1156 return region;
1157 }
1158
1159 /* Release the memory associated with the region tree rooted at REGION. */
1160
1161 static void
1162 free_omp_region_1 (struct omp_region *region)
1163 {
1164 struct omp_region *i, *n;
1165
1166 for (i = region->inner; i ; i = n)
1167 {
1168 n = i->next;
1169 free_omp_region_1 (i);
1170 }
1171
1172 free (region);
1173 }
1174
1175 /* Release the memory for the entire omp region tree. */
1176
1177 void
1178 free_omp_regions (void)
1179 {
1180 struct omp_region *r, *n;
1181 for (r = root_omp_region; r ; r = n)
1182 {
1183 n = r->next;
1184 free_omp_region_1 (r);
1185 }
1186 root_omp_region = NULL;
1187 }
1188
1189
1190 /* Create a new context, with OUTER_CTX being the surrounding context. */
1191
1192 static omp_context *
1193 new_omp_context (gimple stmt, omp_context *outer_ctx)
1194 {
1195 omp_context *ctx = XCNEW (omp_context);
1196
1197 splay_tree_insert (all_contexts, (splay_tree_key) stmt,
1198 (splay_tree_value) ctx);
1199 ctx->stmt = stmt;
1200
1201 if (outer_ctx)
1202 {
1203 ctx->outer = outer_ctx;
1204 ctx->cb = outer_ctx->cb;
1205 ctx->cb.block = NULL;
1206 ctx->depth = outer_ctx->depth + 1;
1207 }
1208 else
1209 {
1210 ctx->cb.src_fn = current_function_decl;
1211 ctx->cb.dst_fn = current_function_decl;
1212 ctx->cb.src_node = cgraph_get_node (current_function_decl);
1213 gcc_checking_assert (ctx->cb.src_node);
1214 ctx->cb.dst_node = ctx->cb.src_node;
1215 ctx->cb.src_cfun = cfun;
1216 ctx->cb.copy_decl = omp_copy_decl;
1217 ctx->cb.eh_lp_nr = 0;
1218 ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
1219 ctx->depth = 1;
1220 }
1221
1222 ctx->cb.decl_map = pointer_map_create ();
1223
1224 return ctx;
1225 }
1226
1227 static gimple_seq maybe_catch_exception (gimple_seq);
1228
1229 /* Finalize task copyfn. */
1230
1231 static void
1232 finalize_task_copyfn (gimple task_stmt)
1233 {
1234 struct function *child_cfun;
1235 tree child_fn, old_fn;
1236 gimple_seq seq, new_seq;
1237 gimple bind;
1238
1239 child_fn = gimple_omp_task_copy_fn (task_stmt);
1240 if (child_fn == NULL_TREE)
1241 return;
1242
1243 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
1244
1245 /* Inform the callgraph about the new function. */
1246 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
1247 = cfun->curr_properties;
1248
1249 old_fn = current_function_decl;
1250 push_cfun (child_cfun);
1251 current_function_decl = child_fn;
1252 bind = gimplify_body (&DECL_SAVED_TREE (child_fn), child_fn, false);
1253 seq = gimple_seq_alloc ();
1254 gimple_seq_add_stmt (&seq, bind);
1255 new_seq = maybe_catch_exception (seq);
1256 if (new_seq != seq)
1257 {
1258 bind = gimple_build_bind (NULL, new_seq, NULL);
1259 seq = gimple_seq_alloc ();
1260 gimple_seq_add_stmt (&seq, bind);
1261 }
1262 gimple_set_body (child_fn, seq);
1263 pop_cfun ();
1264 current_function_decl = old_fn;
1265
1266 cgraph_add_new_function (child_fn, false);
1267 }
1268
1269 /* Destroy a omp_context data structures. Called through the splay tree
1270 value delete callback. */
1271
1272 static void
1273 delete_omp_context (splay_tree_value value)
1274 {
1275 omp_context *ctx = (omp_context *) value;
1276
1277 pointer_map_destroy (ctx->cb.decl_map);
1278
1279 if (ctx->field_map)
1280 splay_tree_delete (ctx->field_map);
1281 if (ctx->sfield_map)
1282 splay_tree_delete (ctx->sfield_map);
1283
1284 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1285 it produces corrupt debug information. */
1286 if (ctx->record_type)
1287 {
1288 tree t;
1289 for (t = TYPE_FIELDS (ctx->record_type); t ; t = DECL_CHAIN (t))
1290 DECL_ABSTRACT_ORIGIN (t) = NULL;
1291 }
1292 if (ctx->srecord_type)
1293 {
1294 tree t;
1295 for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = DECL_CHAIN (t))
1296 DECL_ABSTRACT_ORIGIN (t) = NULL;
1297 }
1298
1299 if (is_task_ctx (ctx))
1300 finalize_task_copyfn (ctx->stmt);
1301
1302 XDELETE (ctx);
1303 }
1304
1305 /* Fix up RECEIVER_DECL with a type that has been remapped to the child
1306 context. */
1307
1308 static void
1309 fixup_child_record_type (omp_context *ctx)
1310 {
1311 tree f, type = ctx->record_type;
1312
1313 /* ??? It isn't sufficient to just call remap_type here, because
1314 variably_modified_type_p doesn't work the way we expect for
1315 record types. Testing each field for whether it needs remapping
1316 and creating a new record by hand works, however. */
1317 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
1318 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
1319 break;
1320 if (f)
1321 {
1322 tree name, new_fields = NULL;
1323
1324 type = lang_hooks.types.make_type (RECORD_TYPE);
1325 name = DECL_NAME (TYPE_NAME (ctx->record_type));
1326 name = build_decl (DECL_SOURCE_LOCATION (ctx->receiver_decl),
1327 TYPE_DECL, name, type);
1328 TYPE_NAME (type) = name;
1329
1330 for (f = TYPE_FIELDS (ctx->record_type); f ; f = DECL_CHAIN (f))
1331 {
1332 tree new_f = copy_node (f);
1333 DECL_CONTEXT (new_f) = type;
1334 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
1335 DECL_CHAIN (new_f) = new_fields;
1336 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
1337 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
1338 &ctx->cb, NULL);
1339 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
1340 &ctx->cb, NULL);
1341 new_fields = new_f;
1342
1343 /* Arrange to be able to look up the receiver field
1344 given the sender field. */
1345 splay_tree_insert (ctx->field_map, (splay_tree_key) f,
1346 (splay_tree_value) new_f);
1347 }
1348 TYPE_FIELDS (type) = nreverse (new_fields);
1349 layout_type (type);
1350 }
1351
1352 TREE_TYPE (ctx->receiver_decl) = build_pointer_type (type);
1353 }
1354
1355 /* Instantiate decls as necessary in CTX to satisfy the data sharing
1356 specified by CLAUSES. */
1357
1358 static void
1359 scan_sharing_clauses (tree clauses, omp_context *ctx)
1360 {
1361 tree c, decl;
1362 bool scan_array_reductions = false;
1363
1364 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1365 {
1366 bool by_ref;
1367
1368 switch (OMP_CLAUSE_CODE (c))
1369 {
1370 case OMP_CLAUSE_PRIVATE:
1371 decl = OMP_CLAUSE_DECL (c);
1372 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
1373 goto do_private;
1374 else if (!is_variable_sized (decl))
1375 install_var_local (decl, ctx);
1376 break;
1377
1378 case OMP_CLAUSE_SHARED:
1379 gcc_assert (is_taskreg_ctx (ctx));
1380 decl = OMP_CLAUSE_DECL (c);
1381 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
1382 || !is_variable_sized (decl));
1383 /* Global variables don't need to be copied,
1384 the receiver side will use them directly. */
1385 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1386 break;
1387 by_ref = use_pointer_for_field (decl, ctx);
1388 if (! TREE_READONLY (decl)
1389 || TREE_ADDRESSABLE (decl)
1390 || by_ref
1391 || is_reference (decl))
1392 {
1393 install_var_field (decl, by_ref, 3, ctx);
1394 install_var_local (decl, ctx);
1395 break;
1396 }
1397 /* We don't need to copy const scalar vars back. */
1398 OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
1399 goto do_private;
1400
1401 case OMP_CLAUSE_LASTPRIVATE:
1402 /* Let the corresponding firstprivate clause create
1403 the variable. */
1404 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1405 break;
1406 /* FALLTHRU */
1407
1408 case OMP_CLAUSE_FIRSTPRIVATE:
1409 case OMP_CLAUSE_REDUCTION:
1410 decl = OMP_CLAUSE_DECL (c);
1411 do_private:
1412 if (is_variable_sized (decl))
1413 {
1414 if (is_task_ctx (ctx))
1415 install_var_field (decl, false, 1, ctx);
1416 break;
1417 }
1418 else if (is_taskreg_ctx (ctx))
1419 {
1420 bool global
1421 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
1422 by_ref = use_pointer_for_field (decl, NULL);
1423
1424 if (is_task_ctx (ctx)
1425 && (global || by_ref || is_reference (decl)))
1426 {
1427 install_var_field (decl, false, 1, ctx);
1428 if (!global)
1429 install_var_field (decl, by_ref, 2, ctx);
1430 }
1431 else if (!global)
1432 install_var_field (decl, by_ref, 3, ctx);
1433 }
1434 install_var_local (decl, ctx);
1435 break;
1436
1437 case OMP_CLAUSE_COPYPRIVATE:
1438 case OMP_CLAUSE_COPYIN:
1439 decl = OMP_CLAUSE_DECL (c);
1440 by_ref = use_pointer_for_field (decl, NULL);
1441 install_var_field (decl, by_ref, 3, ctx);
1442 break;
1443
1444 case OMP_CLAUSE_DEFAULT:
1445 ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
1446 break;
1447
1448 case OMP_CLAUSE_IF:
1449 case OMP_CLAUSE_NUM_THREADS:
1450 case OMP_CLAUSE_SCHEDULE:
1451 if (ctx->outer)
1452 scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
1453 break;
1454
1455 case OMP_CLAUSE_NOWAIT:
1456 case OMP_CLAUSE_ORDERED:
1457 case OMP_CLAUSE_COLLAPSE:
1458 case OMP_CLAUSE_UNTIED:
1459 break;
1460
1461 default:
1462 gcc_unreachable ();
1463 }
1464 }
1465
1466 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1467 {
1468 switch (OMP_CLAUSE_CODE (c))
1469 {
1470 case OMP_CLAUSE_LASTPRIVATE:
1471 /* Let the corresponding firstprivate clause create
1472 the variable. */
1473 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1474 scan_array_reductions = true;
1475 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1476 break;
1477 /* FALLTHRU */
1478
1479 case OMP_CLAUSE_PRIVATE:
1480 case OMP_CLAUSE_FIRSTPRIVATE:
1481 case OMP_CLAUSE_REDUCTION:
1482 decl = OMP_CLAUSE_DECL (c);
1483 if (is_variable_sized (decl))
1484 install_var_local (decl, ctx);
1485 fixup_remapped_decl (decl, ctx,
1486 OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
1487 && OMP_CLAUSE_PRIVATE_DEBUG (c));
1488 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1489 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1490 scan_array_reductions = true;
1491 break;
1492
1493 case OMP_CLAUSE_SHARED:
1494 decl = OMP_CLAUSE_DECL (c);
1495 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1496 fixup_remapped_decl (decl, ctx, false);
1497 break;
1498
1499 case OMP_CLAUSE_COPYPRIVATE:
1500 case OMP_CLAUSE_COPYIN:
1501 case OMP_CLAUSE_DEFAULT:
1502 case OMP_CLAUSE_IF:
1503 case OMP_CLAUSE_NUM_THREADS:
1504 case OMP_CLAUSE_SCHEDULE:
1505 case OMP_CLAUSE_NOWAIT:
1506 case OMP_CLAUSE_ORDERED:
1507 case OMP_CLAUSE_COLLAPSE:
1508 case OMP_CLAUSE_UNTIED:
1509 break;
1510
1511 default:
1512 gcc_unreachable ();
1513 }
1514 }
1515
1516 if (scan_array_reductions)
1517 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1518 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1519 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1520 {
1521 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
1522 scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
1523 }
1524 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
1525 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1526 scan_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
1527 }
1528
1529 /* Create a new name for omp child function. Returns an identifier. */
1530
1531 static GTY(()) unsigned int tmp_ompfn_id_num;
1532
1533 static tree
1534 create_omp_child_function_name (bool task_copy)
1535 {
1536 return (clone_function_name (current_function_decl,
1537 task_copy ? "_omp_cpyfn" : "_omp_fn"));
1538 }
1539
1540 /* Build a decl for the omp child function. It'll not contain a body
1541 yet, just the bare decl. */
1542
1543 static void
1544 create_omp_child_function (omp_context *ctx, bool task_copy)
1545 {
1546 tree decl, type, name, t;
1547
1548 name = create_omp_child_function_name (task_copy);
1549 if (task_copy)
1550 type = build_function_type_list (void_type_node, ptr_type_node,
1551 ptr_type_node, NULL_TREE);
1552 else
1553 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
1554
1555 decl = build_decl (gimple_location (ctx->stmt),
1556 FUNCTION_DECL, name, type);
1557
1558 if (!task_copy)
1559 ctx->cb.dst_fn = decl;
1560 else
1561 gimple_omp_task_set_copy_fn (ctx->stmt, decl);
1562
1563 TREE_STATIC (decl) = 1;
1564 TREE_USED (decl) = 1;
1565 DECL_ARTIFICIAL (decl) = 1;
1566 DECL_NAMELESS (decl) = 1;
1567 DECL_IGNORED_P (decl) = 0;
1568 TREE_PUBLIC (decl) = 0;
1569 DECL_UNINLINABLE (decl) = 1;
1570 DECL_EXTERNAL (decl) = 0;
1571 DECL_CONTEXT (decl) = NULL_TREE;
1572 DECL_INITIAL (decl) = make_node (BLOCK);
1573
1574 t = build_decl (DECL_SOURCE_LOCATION (decl),
1575 RESULT_DECL, NULL_TREE, void_type_node);
1576 DECL_ARTIFICIAL (t) = 1;
1577 DECL_IGNORED_P (t) = 1;
1578 DECL_CONTEXT (t) = decl;
1579 DECL_RESULT (decl) = t;
1580
1581 t = build_decl (DECL_SOURCE_LOCATION (decl),
1582 PARM_DECL, get_identifier (".omp_data_i"), ptr_type_node);
1583 DECL_ARTIFICIAL (t) = 1;
1584 DECL_NAMELESS (t) = 1;
1585 DECL_ARG_TYPE (t) = ptr_type_node;
1586 DECL_CONTEXT (t) = current_function_decl;
1587 TREE_USED (t) = 1;
1588 DECL_ARGUMENTS (decl) = t;
1589 if (!task_copy)
1590 ctx->receiver_decl = t;
1591 else
1592 {
1593 t = build_decl (DECL_SOURCE_LOCATION (decl),
1594 PARM_DECL, get_identifier (".omp_data_o"),
1595 ptr_type_node);
1596 DECL_ARTIFICIAL (t) = 1;
1597 DECL_NAMELESS (t) = 1;
1598 DECL_ARG_TYPE (t) = ptr_type_node;
1599 DECL_CONTEXT (t) = current_function_decl;
1600 TREE_USED (t) = 1;
1601 TREE_ADDRESSABLE (t) = 1;
1602 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
1603 DECL_ARGUMENTS (decl) = t;
1604 }
1605
1606 /* Allocate memory for the function structure. The call to
1607 allocate_struct_function clobbers CFUN, so we need to restore
1608 it afterward. */
1609 push_struct_function (decl);
1610 cfun->function_end_locus = gimple_location (ctx->stmt);
1611 pop_cfun ();
1612 }
1613
1614
1615 /* Scan an OpenMP parallel directive. */
1616
1617 static void
1618 scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1619 {
1620 omp_context *ctx;
1621 tree name;
1622 gimple stmt = gsi_stmt (*gsi);
1623
1624 /* Ignore parallel directives with empty bodies, unless there
1625 are copyin clauses. */
1626 if (optimize > 0
1627 && empty_body_p (gimple_omp_body (stmt))
1628 && find_omp_clause (gimple_omp_parallel_clauses (stmt),
1629 OMP_CLAUSE_COPYIN) == NULL)
1630 {
1631 gsi_replace (gsi, gimple_build_nop (), false);
1632 return;
1633 }
1634
1635 ctx = new_omp_context (stmt, outer_ctx);
1636 if (taskreg_nesting_level > 1)
1637 ctx->is_nested = true;
1638 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1639 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1640 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1641 name = create_tmp_var_name (".omp_data_s");
1642 name = build_decl (gimple_location (stmt),
1643 TYPE_DECL, name, ctx->record_type);
1644 DECL_ARTIFICIAL (name) = 1;
1645 DECL_NAMELESS (name) = 1;
1646 TYPE_NAME (ctx->record_type) = name;
1647 create_omp_child_function (ctx, false);
1648 gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
1649
1650 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx);
1651 scan_omp (gimple_omp_body (stmt), ctx);
1652
1653 if (TYPE_FIELDS (ctx->record_type) == NULL)
1654 ctx->record_type = ctx->receiver_decl = NULL;
1655 else
1656 {
1657 layout_type (ctx->record_type);
1658 fixup_child_record_type (ctx);
1659 }
1660 }
1661
1662 /* Scan an OpenMP task directive. */
1663
1664 static void
1665 scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1666 {
1667 omp_context *ctx;
1668 tree name, t;
1669 gimple stmt = gsi_stmt (*gsi);
1670 location_t loc = gimple_location (stmt);
1671
1672 /* Ignore task directives with empty bodies. */
1673 if (optimize > 0
1674 && empty_body_p (gimple_omp_body (stmt)))
1675 {
1676 gsi_replace (gsi, gimple_build_nop (), false);
1677 return;
1678 }
1679
1680 ctx = new_omp_context (stmt, outer_ctx);
1681 if (taskreg_nesting_level > 1)
1682 ctx->is_nested = true;
1683 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1684 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1685 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1686 name = create_tmp_var_name (".omp_data_s");
1687 name = build_decl (gimple_location (stmt),
1688 TYPE_DECL, name, ctx->record_type);
1689 DECL_ARTIFICIAL (name) = 1;
1690 DECL_NAMELESS (name) = 1;
1691 TYPE_NAME (ctx->record_type) = name;
1692 create_omp_child_function (ctx, false);
1693 gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
1694
1695 scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
1696
1697 if (ctx->srecord_type)
1698 {
1699 name = create_tmp_var_name (".omp_data_a");
1700 name = build_decl (gimple_location (stmt),
1701 TYPE_DECL, name, ctx->srecord_type);
1702 DECL_ARTIFICIAL (name) = 1;
1703 DECL_NAMELESS (name) = 1;
1704 TYPE_NAME (ctx->srecord_type) = name;
1705 create_omp_child_function (ctx, true);
1706 }
1707
1708 scan_omp (gimple_omp_body (stmt), ctx);
1709
1710 if (TYPE_FIELDS (ctx->record_type) == NULL)
1711 {
1712 ctx->record_type = ctx->receiver_decl = NULL;
1713 t = build_int_cst (long_integer_type_node, 0);
1714 gimple_omp_task_set_arg_size (stmt, t);
1715 t = build_int_cst (long_integer_type_node, 1);
1716 gimple_omp_task_set_arg_align (stmt, t);
1717 }
1718 else
1719 {
1720 tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
1721 /* Move VLA fields to the end. */
1722 p = &TYPE_FIELDS (ctx->record_type);
1723 while (*p)
1724 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
1725 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
1726 {
1727 *q = *p;
1728 *p = TREE_CHAIN (*p);
1729 TREE_CHAIN (*q) = NULL_TREE;
1730 q = &TREE_CHAIN (*q);
1731 }
1732 else
1733 p = &DECL_CHAIN (*p);
1734 *p = vla_fields;
1735 layout_type (ctx->record_type);
1736 fixup_child_record_type (ctx);
1737 if (ctx->srecord_type)
1738 layout_type (ctx->srecord_type);
1739 t = fold_convert_loc (loc, long_integer_type_node,
1740 TYPE_SIZE_UNIT (ctx->record_type));
1741 gimple_omp_task_set_arg_size (stmt, t);
1742 t = build_int_cst (long_integer_type_node,
1743 TYPE_ALIGN_UNIT (ctx->record_type));
1744 gimple_omp_task_set_arg_align (stmt, t);
1745 }
1746 }
1747
1748
1749 /* Scan an OpenMP loop directive. */
1750
1751 static void
1752 scan_omp_for (gimple stmt, omp_context *outer_ctx)
1753 {
1754 omp_context *ctx;
1755 size_t i;
1756
1757 ctx = new_omp_context (stmt, outer_ctx);
1758
1759 scan_sharing_clauses (gimple_omp_for_clauses (stmt), ctx);
1760
1761 scan_omp (gimple_omp_for_pre_body (stmt), ctx);
1762 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
1763 {
1764 scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx);
1765 scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx);
1766 scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx);
1767 scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx);
1768 }
1769 scan_omp (gimple_omp_body (stmt), ctx);
1770 }
1771
1772 /* Scan an OpenMP sections directive. */
1773
1774 static void
1775 scan_omp_sections (gimple stmt, omp_context *outer_ctx)
1776 {
1777 omp_context *ctx;
1778
1779 ctx = new_omp_context (stmt, outer_ctx);
1780 scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx);
1781 scan_omp (gimple_omp_body (stmt), ctx);
1782 }
1783
1784 /* Scan an OpenMP single directive. */
1785
1786 static void
1787 scan_omp_single (gimple stmt, omp_context *outer_ctx)
1788 {
1789 omp_context *ctx;
1790 tree name;
1791
1792 ctx = new_omp_context (stmt, outer_ctx);
1793 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1794 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1795 name = create_tmp_var_name (".omp_copy_s");
1796 name = build_decl (gimple_location (stmt),
1797 TYPE_DECL, name, ctx->record_type);
1798 TYPE_NAME (ctx->record_type) = name;
1799
1800 scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx);
1801 scan_omp (gimple_omp_body (stmt), ctx);
1802
1803 if (TYPE_FIELDS (ctx->record_type) == NULL)
1804 ctx->record_type = NULL;
1805 else
1806 layout_type (ctx->record_type);
1807 }
1808
1809
1810 /* Check OpenMP nesting restrictions. */
1811 static void
1812 check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
1813 {
1814 switch (gimple_code (stmt))
1815 {
1816 case GIMPLE_OMP_FOR:
1817 case GIMPLE_OMP_SECTIONS:
1818 case GIMPLE_OMP_SINGLE:
1819 case GIMPLE_CALL:
1820 for (; ctx != NULL; ctx = ctx->outer)
1821 switch (gimple_code (ctx->stmt))
1822 {
1823 case GIMPLE_OMP_FOR:
1824 case GIMPLE_OMP_SECTIONS:
1825 case GIMPLE_OMP_SINGLE:
1826 case GIMPLE_OMP_ORDERED:
1827 case GIMPLE_OMP_MASTER:
1828 case GIMPLE_OMP_TASK:
1829 if (is_gimple_call (stmt))
1830 {
1831 warning (0, "barrier region may not be closely nested inside "
1832 "of work-sharing, critical, ordered, master or "
1833 "explicit task region");
1834 return;
1835 }
1836 warning (0, "work-sharing region may not be closely nested inside "
1837 "of work-sharing, critical, ordered, master or explicit "
1838 "task region");
1839 return;
1840 case GIMPLE_OMP_PARALLEL:
1841 return;
1842 default:
1843 break;
1844 }
1845 break;
1846 case GIMPLE_OMP_MASTER:
1847 for (; ctx != NULL; ctx = ctx->outer)
1848 switch (gimple_code (ctx->stmt))
1849 {
1850 case GIMPLE_OMP_FOR:
1851 case GIMPLE_OMP_SECTIONS:
1852 case GIMPLE_OMP_SINGLE:
1853 case GIMPLE_OMP_TASK:
1854 warning (0, "master region may not be closely nested inside "
1855 "of work-sharing or explicit task region");
1856 return;
1857 case GIMPLE_OMP_PARALLEL:
1858 return;
1859 default:
1860 break;
1861 }
1862 break;
1863 case GIMPLE_OMP_ORDERED:
1864 for (; ctx != NULL; ctx = ctx->outer)
1865 switch (gimple_code (ctx->stmt))
1866 {
1867 case GIMPLE_OMP_CRITICAL:
1868 case GIMPLE_OMP_TASK:
1869 warning (0, "ordered region may not be closely nested inside "
1870 "of critical or explicit task region");
1871 return;
1872 case GIMPLE_OMP_FOR:
1873 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
1874 OMP_CLAUSE_ORDERED) == NULL)
1875 warning (0, "ordered region must be closely nested inside "
1876 "a loop region with an ordered clause");
1877 return;
1878 case GIMPLE_OMP_PARALLEL:
1879 return;
1880 default:
1881 break;
1882 }
1883 break;
1884 case GIMPLE_OMP_CRITICAL:
1885 for (; ctx != NULL; ctx = ctx->outer)
1886 if (gimple_code (ctx->stmt) == GIMPLE_OMP_CRITICAL
1887 && (gimple_omp_critical_name (stmt)
1888 == gimple_omp_critical_name (ctx->stmt)))
1889 {
1890 warning (0, "critical region may not be nested inside a critical "
1891 "region with the same name");
1892 return;
1893 }
1894 break;
1895 default:
1896 break;
1897 }
1898 }
1899
1900
1901 /* Helper function scan_omp.
1902
1903 Callback for walk_tree or operators in walk_gimple_stmt used to
1904 scan for OpenMP directives in TP. */
1905
1906 static tree
1907 scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
1908 {
1909 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
1910 omp_context *ctx = (omp_context *) wi->info;
1911 tree t = *tp;
1912
1913 switch (TREE_CODE (t))
1914 {
1915 case VAR_DECL:
1916 case PARM_DECL:
1917 case LABEL_DECL:
1918 case RESULT_DECL:
1919 if (ctx)
1920 *tp = remap_decl (t, &ctx->cb);
1921 break;
1922
1923 default:
1924 if (ctx && TYPE_P (t))
1925 *tp = remap_type (t, &ctx->cb);
1926 else if (!DECL_P (t))
1927 {
1928 *walk_subtrees = 1;
1929 if (ctx)
1930 {
1931 tree tem = remap_type (TREE_TYPE (t), &ctx->cb);
1932 if (tem != TREE_TYPE (t))
1933 {
1934 if (TREE_CODE (t) == INTEGER_CST)
1935 *tp = build_int_cst_wide (tem,
1936 TREE_INT_CST_LOW (t),
1937 TREE_INT_CST_HIGH (t));
1938 else
1939 TREE_TYPE (t) = tem;
1940 }
1941 }
1942 }
1943 break;
1944 }
1945
1946 return NULL_TREE;
1947 }
1948
1949
1950 /* Helper function for scan_omp.
1951
1952 Callback for walk_gimple_stmt used to scan for OpenMP directives in
1953 the current statement in GSI. */
1954
1955 static tree
1956 scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
1957 struct walk_stmt_info *wi)
1958 {
1959 gimple stmt = gsi_stmt (*gsi);
1960 omp_context *ctx = (omp_context *) wi->info;
1961
1962 if (gimple_has_location (stmt))
1963 input_location = gimple_location (stmt);
1964
1965 /* Check the OpenMP nesting restrictions. */
1966 if (ctx != NULL)
1967 {
1968 if (is_gimple_omp (stmt))
1969 check_omp_nesting_restrictions (stmt, ctx);
1970 else if (is_gimple_call (stmt))
1971 {
1972 tree fndecl = gimple_call_fndecl (stmt);
1973 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
1974 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
1975 check_omp_nesting_restrictions (stmt, ctx);
1976 }
1977 }
1978
1979 *handled_ops_p = true;
1980
1981 switch (gimple_code (stmt))
1982 {
1983 case GIMPLE_OMP_PARALLEL:
1984 taskreg_nesting_level++;
1985 scan_omp_parallel (gsi, ctx);
1986 taskreg_nesting_level--;
1987 break;
1988
1989 case GIMPLE_OMP_TASK:
1990 taskreg_nesting_level++;
1991 scan_omp_task (gsi, ctx);
1992 taskreg_nesting_level--;
1993 break;
1994
1995 case GIMPLE_OMP_FOR:
1996 scan_omp_for (stmt, ctx);
1997 break;
1998
1999 case GIMPLE_OMP_SECTIONS:
2000 scan_omp_sections (stmt, ctx);
2001 break;
2002
2003 case GIMPLE_OMP_SINGLE:
2004 scan_omp_single (stmt, ctx);
2005 break;
2006
2007 case GIMPLE_OMP_SECTION:
2008 case GIMPLE_OMP_MASTER:
2009 case GIMPLE_OMP_ORDERED:
2010 case GIMPLE_OMP_CRITICAL:
2011 ctx = new_omp_context (stmt, ctx);
2012 scan_omp (gimple_omp_body (stmt), ctx);
2013 break;
2014
2015 case GIMPLE_BIND:
2016 {
2017 tree var;
2018
2019 *handled_ops_p = false;
2020 if (ctx)
2021 for (var = gimple_bind_vars (stmt); var ; var = DECL_CHAIN (var))
2022 insert_decl_map (&ctx->cb, var, var);
2023 }
2024 break;
2025 default:
2026 *handled_ops_p = false;
2027 break;
2028 }
2029
2030 return NULL_TREE;
2031 }
2032
2033
2034 /* Scan all the statements starting at the current statement. CTX
2035 contains context information about the OpenMP directives and
2036 clauses found during the scan. */
2037
2038 static void
2039 scan_omp (gimple_seq body, omp_context *ctx)
2040 {
2041 location_t saved_location;
2042 struct walk_stmt_info wi;
2043
2044 memset (&wi, 0, sizeof (wi));
2045 wi.info = ctx;
2046 wi.want_locations = true;
2047
2048 saved_location = input_location;
2049 walk_gimple_seq (body, scan_omp_1_stmt, scan_omp_1_op, &wi);
2050 input_location = saved_location;
2051 }
2052 \f
2053 /* Re-gimplification and code generation routines. */
2054
2055 /* Build a call to GOMP_barrier. */
2056
2057 static tree
2058 build_omp_barrier (void)
2059 {
2060 return build_call_expr (built_in_decls[BUILT_IN_GOMP_BARRIER], 0);
2061 }
2062
2063 /* If a context was created for STMT when it was scanned, return it. */
2064
2065 static omp_context *
2066 maybe_lookup_ctx (gimple stmt)
2067 {
2068 splay_tree_node n;
2069 n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
2070 return n ? (omp_context *) n->value : NULL;
2071 }
2072
2073
2074 /* Find the mapping for DECL in CTX or the immediately enclosing
2075 context that has a mapping for DECL.
2076
2077 If CTX is a nested parallel directive, we may have to use the decl
2078 mappings created in CTX's parent context. Suppose that we have the
2079 following parallel nesting (variable UIDs showed for clarity):
2080
2081 iD.1562 = 0;
2082 #omp parallel shared(iD.1562) -> outer parallel
2083 iD.1562 = iD.1562 + 1;
2084
2085 #omp parallel shared (iD.1562) -> inner parallel
2086 iD.1562 = iD.1562 - 1;
2087
2088 Each parallel structure will create a distinct .omp_data_s structure
2089 for copying iD.1562 in/out of the directive:
2090
2091 outer parallel .omp_data_s.1.i -> iD.1562
2092 inner parallel .omp_data_s.2.i -> iD.1562
2093
2094 A shared variable mapping will produce a copy-out operation before
2095 the parallel directive and a copy-in operation after it. So, in
2096 this case we would have:
2097
2098 iD.1562 = 0;
2099 .omp_data_o.1.i = iD.1562;
2100 #omp parallel shared(iD.1562) -> outer parallel
2101 .omp_data_i.1 = &.omp_data_o.1
2102 .omp_data_i.1->i = .omp_data_i.1->i + 1;
2103
2104 .omp_data_o.2.i = iD.1562; -> **
2105 #omp parallel shared(iD.1562) -> inner parallel
2106 .omp_data_i.2 = &.omp_data_o.2
2107 .omp_data_i.2->i = .omp_data_i.2->i - 1;
2108
2109
2110 ** This is a problem. The symbol iD.1562 cannot be referenced
2111 inside the body of the outer parallel region. But since we are
2112 emitting this copy operation while expanding the inner parallel
2113 directive, we need to access the CTX structure of the outer
2114 parallel directive to get the correct mapping:
2115
2116 .omp_data_o.2.i = .omp_data_i.1->i
2117
2118 Since there may be other workshare or parallel directives enclosing
2119 the parallel directive, it may be necessary to walk up the context
2120 parent chain. This is not a problem in general because nested
2121 parallelism happens only rarely. */
2122
2123 static tree
2124 lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2125 {
2126 tree t;
2127 omp_context *up;
2128
2129 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2130 t = maybe_lookup_decl (decl, up);
2131
2132 gcc_assert (!ctx->is_nested || t || is_global_var (decl));
2133
2134 return t ? t : decl;
2135 }
2136
2137
2138 /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
2139 in outer contexts. */
2140
2141 static tree
2142 maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2143 {
2144 tree t = NULL;
2145 omp_context *up;
2146
2147 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2148 t = maybe_lookup_decl (decl, up);
2149
2150 return t ? t : decl;
2151 }
2152
2153
2154 /* Construct the initialization value for reduction CLAUSE. */
2155
2156 tree
2157 omp_reduction_init (tree clause, tree type)
2158 {
2159 location_t loc = OMP_CLAUSE_LOCATION (clause);
2160 switch (OMP_CLAUSE_REDUCTION_CODE (clause))
2161 {
2162 case PLUS_EXPR:
2163 case MINUS_EXPR:
2164 case BIT_IOR_EXPR:
2165 case BIT_XOR_EXPR:
2166 case TRUTH_OR_EXPR:
2167 case TRUTH_ORIF_EXPR:
2168 case TRUTH_XOR_EXPR:
2169 case NE_EXPR:
2170 return build_zero_cst (type);
2171
2172 case MULT_EXPR:
2173 case TRUTH_AND_EXPR:
2174 case TRUTH_ANDIF_EXPR:
2175 case EQ_EXPR:
2176 return fold_convert_loc (loc, type, integer_one_node);
2177
2178 case BIT_AND_EXPR:
2179 return fold_convert_loc (loc, type, integer_minus_one_node);
2180
2181 case MAX_EXPR:
2182 if (SCALAR_FLOAT_TYPE_P (type))
2183 {
2184 REAL_VALUE_TYPE max, min;
2185 if (HONOR_INFINITIES (TYPE_MODE (type)))
2186 {
2187 real_inf (&max);
2188 real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
2189 }
2190 else
2191 real_maxval (&min, 1, TYPE_MODE (type));
2192 return build_real (type, min);
2193 }
2194 else
2195 {
2196 gcc_assert (INTEGRAL_TYPE_P (type));
2197 return TYPE_MIN_VALUE (type);
2198 }
2199
2200 case MIN_EXPR:
2201 if (SCALAR_FLOAT_TYPE_P (type))
2202 {
2203 REAL_VALUE_TYPE max;
2204 if (HONOR_INFINITIES (TYPE_MODE (type)))
2205 real_inf (&max);
2206 else
2207 real_maxval (&max, 0, TYPE_MODE (type));
2208 return build_real (type, max);
2209 }
2210 else
2211 {
2212 gcc_assert (INTEGRAL_TYPE_P (type));
2213 return TYPE_MAX_VALUE (type);
2214 }
2215
2216 default:
2217 gcc_unreachable ();
2218 }
2219 }
2220
2221 /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
2222 from the receiver (aka child) side and initializers for REFERENCE_TYPE
2223 private variables. Initialization statements go in ILIST, while calls
2224 to destructors go in DLIST. */
2225
2226 static void
2227 lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
2228 omp_context *ctx)
2229 {
2230 gimple_stmt_iterator diter;
2231 tree c, dtor, copyin_seq, x, ptr;
2232 bool copyin_by_ref = false;
2233 bool lastprivate_firstprivate = false;
2234 int pass;
2235
2236 *dlist = gimple_seq_alloc ();
2237 diter = gsi_start (*dlist);
2238 copyin_seq = NULL;
2239
2240 /* Do all the fixed sized types in the first pass, and the variable sized
2241 types in the second pass. This makes sure that the scalar arguments to
2242 the variable sized types are processed before we use them in the
2243 variable sized operations. */
2244 for (pass = 0; pass < 2; ++pass)
2245 {
2246 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2247 {
2248 enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
2249 tree var, new_var;
2250 bool by_ref;
2251 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2252
2253 switch (c_kind)
2254 {
2255 case OMP_CLAUSE_PRIVATE:
2256 if (OMP_CLAUSE_PRIVATE_DEBUG (c))
2257 continue;
2258 break;
2259 case OMP_CLAUSE_SHARED:
2260 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
2261 {
2262 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
2263 continue;
2264 }
2265 case OMP_CLAUSE_FIRSTPRIVATE:
2266 case OMP_CLAUSE_COPYIN:
2267 case OMP_CLAUSE_REDUCTION:
2268 break;
2269 case OMP_CLAUSE_LASTPRIVATE:
2270 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2271 {
2272 lastprivate_firstprivate = true;
2273 if (pass != 0)
2274 continue;
2275 }
2276 break;
2277 default:
2278 continue;
2279 }
2280
2281 new_var = var = OMP_CLAUSE_DECL (c);
2282 if (c_kind != OMP_CLAUSE_COPYIN)
2283 new_var = lookup_decl (var, ctx);
2284
2285 if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
2286 {
2287 if (pass != 0)
2288 continue;
2289 }
2290 else if (is_variable_sized (var))
2291 {
2292 /* For variable sized types, we need to allocate the
2293 actual storage here. Call alloca and store the
2294 result in the pointer decl that we created elsewhere. */
2295 if (pass == 0)
2296 continue;
2297
2298 if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
2299 {
2300 gimple stmt;
2301 tree tmp;
2302
2303 ptr = DECL_VALUE_EXPR (new_var);
2304 gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
2305 ptr = TREE_OPERAND (ptr, 0);
2306 gcc_assert (DECL_P (ptr));
2307 x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
2308
2309 /* void *tmp = __builtin_alloca */
2310 stmt
2311 = gimple_build_call (built_in_decls[BUILT_IN_ALLOCA], 1, x);
2312 tmp = create_tmp_var_raw (ptr_type_node, NULL);
2313 gimple_add_tmp_var (tmp);
2314 gimple_call_set_lhs (stmt, tmp);
2315
2316 gimple_seq_add_stmt (ilist, stmt);
2317
2318 x = fold_convert_loc (clause_loc, TREE_TYPE (ptr), tmp);
2319 gimplify_assign (ptr, x, ilist);
2320 }
2321 }
2322 else if (is_reference (var))
2323 {
2324 /* For references that are being privatized for Fortran,
2325 allocate new backing storage for the new pointer
2326 variable. This allows us to avoid changing all the
2327 code that expects a pointer to something that expects
2328 a direct variable. Note that this doesn't apply to
2329 C++, since reference types are disallowed in data
2330 sharing clauses there, except for NRV optimized
2331 return values. */
2332 if (pass == 0)
2333 continue;
2334
2335 x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
2336 if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
2337 {
2338 x = build_receiver_ref (var, false, ctx);
2339 x = build_fold_addr_expr_loc (clause_loc, x);
2340 }
2341 else if (TREE_CONSTANT (x))
2342 {
2343 const char *name = NULL;
2344 if (DECL_NAME (var))
2345 name = IDENTIFIER_POINTER (DECL_NAME (new_var));
2346
2347 x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
2348 name);
2349 gimple_add_tmp_var (x);
2350 TREE_ADDRESSABLE (x) = 1;
2351 x = build_fold_addr_expr_loc (clause_loc, x);
2352 }
2353 else
2354 {
2355 x = build_call_expr_loc (clause_loc,
2356 built_in_decls[BUILT_IN_ALLOCA], 1, x);
2357 }
2358
2359 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
2360 gimplify_assign (new_var, x, ilist);
2361
2362 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2363 }
2364 else if (c_kind == OMP_CLAUSE_REDUCTION
2365 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2366 {
2367 if (pass == 0)
2368 continue;
2369 }
2370 else if (pass != 0)
2371 continue;
2372
2373 switch (OMP_CLAUSE_CODE (c))
2374 {
2375 case OMP_CLAUSE_SHARED:
2376 /* Shared global vars are just accessed directly. */
2377 if (is_global_var (new_var))
2378 break;
2379 /* Set up the DECL_VALUE_EXPR for shared variables now. This
2380 needs to be delayed until after fixup_child_record_type so
2381 that we get the correct type during the dereference. */
2382 by_ref = use_pointer_for_field (var, ctx);
2383 x = build_receiver_ref (var, by_ref, ctx);
2384 SET_DECL_VALUE_EXPR (new_var, x);
2385 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2386
2387 /* ??? If VAR is not passed by reference, and the variable
2388 hasn't been initialized yet, then we'll get a warning for
2389 the store into the omp_data_s structure. Ideally, we'd be
2390 able to notice this and not store anything at all, but
2391 we're generating code too early. Suppress the warning. */
2392 if (!by_ref)
2393 TREE_NO_WARNING (var) = 1;
2394 break;
2395
2396 case OMP_CLAUSE_LASTPRIVATE:
2397 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2398 break;
2399 /* FALLTHRU */
2400
2401 case OMP_CLAUSE_PRIVATE:
2402 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE)
2403 x = build_outer_var_ref (var, ctx);
2404 else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2405 {
2406 if (is_task_ctx (ctx))
2407 x = build_receiver_ref (var, false, ctx);
2408 else
2409 x = build_outer_var_ref (var, ctx);
2410 }
2411 else
2412 x = NULL;
2413 x = lang_hooks.decls.omp_clause_default_ctor (c, new_var, x);
2414 if (x)
2415 gimplify_and_add (x, ilist);
2416 /* FALLTHRU */
2417
2418 do_dtor:
2419 x = lang_hooks.decls.omp_clause_dtor (c, new_var);
2420 if (x)
2421 {
2422 gimple_seq tseq = NULL;
2423
2424 dtor = x;
2425 gimplify_stmt (&dtor, &tseq);
2426 gsi_insert_seq_before (&diter, tseq, GSI_SAME_STMT);
2427 }
2428 break;
2429
2430 case OMP_CLAUSE_FIRSTPRIVATE:
2431 if (is_task_ctx (ctx))
2432 {
2433 if (is_reference (var) || is_variable_sized (var))
2434 goto do_dtor;
2435 else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var,
2436 ctx))
2437 || use_pointer_for_field (var, NULL))
2438 {
2439 x = build_receiver_ref (var, false, ctx);
2440 SET_DECL_VALUE_EXPR (new_var, x);
2441 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2442 goto do_dtor;
2443 }
2444 }
2445 x = build_outer_var_ref (var, ctx);
2446 x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x);
2447 gimplify_and_add (x, ilist);
2448 goto do_dtor;
2449 break;
2450
2451 case OMP_CLAUSE_COPYIN:
2452 by_ref = use_pointer_for_field (var, NULL);
2453 x = build_receiver_ref (var, by_ref, ctx);
2454 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
2455 append_to_statement_list (x, &copyin_seq);
2456 copyin_by_ref |= by_ref;
2457 break;
2458
2459 case OMP_CLAUSE_REDUCTION:
2460 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2461 {
2462 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2463 x = build_outer_var_ref (var, ctx);
2464
2465 if (is_reference (var))
2466 x = build_fold_addr_expr_loc (clause_loc, x);
2467 SET_DECL_VALUE_EXPR (placeholder, x);
2468 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2469 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
2470 gimple_seq_add_seq (ilist,
2471 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c));
2472 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
2473 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
2474 }
2475 else
2476 {
2477 x = omp_reduction_init (c, TREE_TYPE (new_var));
2478 gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
2479 gimplify_assign (new_var, x, ilist);
2480 }
2481 break;
2482
2483 default:
2484 gcc_unreachable ();
2485 }
2486 }
2487 }
2488
2489 /* The copyin sequence is not to be executed by the main thread, since
2490 that would result in self-copies. Perhaps not visible to scalars,
2491 but it certainly is to C++ operator=. */
2492 if (copyin_seq)
2493 {
2494 x = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
2495 x = build2 (NE_EXPR, boolean_type_node, x,
2496 build_int_cst (TREE_TYPE (x), 0));
2497 x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
2498 gimplify_and_add (x, ilist);
2499 }
2500
2501 /* If any copyin variable is passed by reference, we must ensure the
2502 master thread doesn't modify it before it is copied over in all
2503 threads. Similarly for variables in both firstprivate and
2504 lastprivate clauses we need to ensure the lastprivate copying
2505 happens after firstprivate copying in all threads. */
2506 if (copyin_by_ref || lastprivate_firstprivate)
2507 gimplify_and_add (build_omp_barrier (), ilist);
2508 }
2509
2510
2511 /* Generate code to implement the LASTPRIVATE clauses. This is used for
2512 both parallel and workshare constructs. PREDICATE may be NULL if it's
2513 always true. */
2514
2515 static void
2516 lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *stmt_list,
2517 omp_context *ctx)
2518 {
2519 tree x, c, label = NULL;
2520 bool par_clauses = false;
2521
2522 /* Early exit if there are no lastprivate clauses. */
2523 clauses = find_omp_clause (clauses, OMP_CLAUSE_LASTPRIVATE);
2524 if (clauses == NULL)
2525 {
2526 /* If this was a workshare clause, see if it had been combined
2527 with its parallel. In that case, look for the clauses on the
2528 parallel statement itself. */
2529 if (is_parallel_ctx (ctx))
2530 return;
2531
2532 ctx = ctx->outer;
2533 if (ctx == NULL || !is_parallel_ctx (ctx))
2534 return;
2535
2536 clauses = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2537 OMP_CLAUSE_LASTPRIVATE);
2538 if (clauses == NULL)
2539 return;
2540 par_clauses = true;
2541 }
2542
2543 if (predicate)
2544 {
2545 gimple stmt;
2546 tree label_true, arm1, arm2;
2547
2548 label = create_artificial_label (UNKNOWN_LOCATION);
2549 label_true = create_artificial_label (UNKNOWN_LOCATION);
2550 arm1 = TREE_OPERAND (predicate, 0);
2551 arm2 = TREE_OPERAND (predicate, 1);
2552 gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
2553 gimplify_expr (&arm2, stmt_list, NULL, is_gimple_val, fb_rvalue);
2554 stmt = gimple_build_cond (TREE_CODE (predicate), arm1, arm2,
2555 label_true, label);
2556 gimple_seq_add_stmt (stmt_list, stmt);
2557 gimple_seq_add_stmt (stmt_list, gimple_build_label (label_true));
2558 }
2559
2560 for (c = clauses; c ;)
2561 {
2562 tree var, new_var;
2563 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2564
2565 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
2566 {
2567 var = OMP_CLAUSE_DECL (c);
2568 new_var = lookup_decl (var, ctx);
2569
2570 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
2571 {
2572 lower_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
2573 gimple_seq_add_seq (stmt_list,
2574 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
2575 }
2576 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) = NULL;
2577
2578 x = build_outer_var_ref (var, ctx);
2579 if (is_reference (var))
2580 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2581 x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
2582 gimplify_and_add (x, stmt_list);
2583 }
2584 c = OMP_CLAUSE_CHAIN (c);
2585 if (c == NULL && !par_clauses)
2586 {
2587 /* If this was a workshare clause, see if it had been combined
2588 with its parallel. In that case, continue looking for the
2589 clauses also on the parallel statement itself. */
2590 if (is_parallel_ctx (ctx))
2591 break;
2592
2593 ctx = ctx->outer;
2594 if (ctx == NULL || !is_parallel_ctx (ctx))
2595 break;
2596
2597 c = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2598 OMP_CLAUSE_LASTPRIVATE);
2599 par_clauses = true;
2600 }
2601 }
2602
2603 if (label)
2604 gimple_seq_add_stmt (stmt_list, gimple_build_label (label));
2605 }
2606
2607
2608 /* Generate code to implement the REDUCTION clauses. */
2609
2610 static void
2611 lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
2612 {
2613 gimple_seq sub_seq = NULL;
2614 gimple stmt;
2615 tree x, c;
2616 int count = 0;
2617
2618 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
2619 update in that case, otherwise use a lock. */
2620 for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
2621 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
2622 {
2623 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2624 {
2625 /* Never use OMP_ATOMIC for array reductions. */
2626 count = -1;
2627 break;
2628 }
2629 count++;
2630 }
2631
2632 if (count == 0)
2633 return;
2634
2635 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2636 {
2637 tree var, ref, new_var;
2638 enum tree_code code;
2639 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2640
2641 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
2642 continue;
2643
2644 var = OMP_CLAUSE_DECL (c);
2645 new_var = lookup_decl (var, ctx);
2646 if (is_reference (var))
2647 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2648 ref = build_outer_var_ref (var, ctx);
2649 code = OMP_CLAUSE_REDUCTION_CODE (c);
2650
2651 /* reduction(-:var) sums up the partial results, so it acts
2652 identically to reduction(+:var). */
2653 if (code == MINUS_EXPR)
2654 code = PLUS_EXPR;
2655
2656 if (count == 1)
2657 {
2658 tree addr = build_fold_addr_expr_loc (clause_loc, ref);
2659
2660 addr = save_expr (addr);
2661 ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
2662 x = fold_build2_loc (clause_loc, code, TREE_TYPE (ref), ref, new_var);
2663 x = build2 (OMP_ATOMIC, void_type_node, addr, x);
2664 gimplify_and_add (x, stmt_seqp);
2665 return;
2666 }
2667
2668 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2669 {
2670 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2671
2672 if (is_reference (var))
2673 ref = build_fold_addr_expr_loc (clause_loc, ref);
2674 SET_DECL_VALUE_EXPR (placeholder, ref);
2675 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2676 lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
2677 gimple_seq_add_seq (&sub_seq, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
2678 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
2679 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
2680 }
2681 else
2682 {
2683 x = build2 (code, TREE_TYPE (ref), ref, new_var);
2684 ref = build_outer_var_ref (var, ctx);
2685 gimplify_assign (ref, x, &sub_seq);
2686 }
2687 }
2688
2689 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ATOMIC_START], 0);
2690 gimple_seq_add_stmt (stmt_seqp, stmt);
2691
2692 gimple_seq_add_seq (stmt_seqp, sub_seq);
2693
2694 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ATOMIC_END], 0);
2695 gimple_seq_add_stmt (stmt_seqp, stmt);
2696 }
2697
2698
2699 /* Generate code to implement the COPYPRIVATE clauses. */
2700
2701 static void
2702 lower_copyprivate_clauses (tree clauses, gimple_seq *slist, gimple_seq *rlist,
2703 omp_context *ctx)
2704 {
2705 tree c;
2706
2707 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2708 {
2709 tree var, new_var, ref, x;
2710 bool by_ref;
2711 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2712
2713 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
2714 continue;
2715
2716 var = OMP_CLAUSE_DECL (c);
2717 by_ref = use_pointer_for_field (var, NULL);
2718
2719 ref = build_sender_ref (var, ctx);
2720 x = new_var = lookup_decl_in_outer_ctx (var, ctx);
2721 if (by_ref)
2722 {
2723 x = build_fold_addr_expr_loc (clause_loc, new_var);
2724 x = fold_convert_loc (clause_loc, TREE_TYPE (ref), x);
2725 }
2726 gimplify_assign (ref, x, slist);
2727
2728 ref = build_receiver_ref (var, false, ctx);
2729 if (by_ref)
2730 {
2731 ref = fold_convert_loc (clause_loc,
2732 build_pointer_type (TREE_TYPE (new_var)),
2733 ref);
2734 ref = build_fold_indirect_ref_loc (clause_loc, ref);
2735 }
2736 if (is_reference (var))
2737 {
2738 ref = fold_convert_loc (clause_loc, TREE_TYPE (new_var), ref);
2739 ref = build_simple_mem_ref_loc (clause_loc, ref);
2740 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2741 }
2742 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, ref);
2743 gimplify_and_add (x, rlist);
2744 }
2745 }
2746
2747
2748 /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
2749 and REDUCTION from the sender (aka parent) side. */
2750
2751 static void
2752 lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
2753 omp_context *ctx)
2754 {
2755 tree c;
2756
2757 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2758 {
2759 tree val, ref, x, var;
2760 bool by_ref, do_in = false, do_out = false;
2761 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2762
2763 switch (OMP_CLAUSE_CODE (c))
2764 {
2765 case OMP_CLAUSE_PRIVATE:
2766 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2767 break;
2768 continue;
2769 case OMP_CLAUSE_FIRSTPRIVATE:
2770 case OMP_CLAUSE_COPYIN:
2771 case OMP_CLAUSE_LASTPRIVATE:
2772 case OMP_CLAUSE_REDUCTION:
2773 break;
2774 default:
2775 continue;
2776 }
2777
2778 val = OMP_CLAUSE_DECL (c);
2779 var = lookup_decl_in_outer_ctx (val, ctx);
2780
2781 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
2782 && is_global_var (var))
2783 continue;
2784 if (is_variable_sized (val))
2785 continue;
2786 by_ref = use_pointer_for_field (val, NULL);
2787
2788 switch (OMP_CLAUSE_CODE (c))
2789 {
2790 case OMP_CLAUSE_PRIVATE:
2791 case OMP_CLAUSE_FIRSTPRIVATE:
2792 case OMP_CLAUSE_COPYIN:
2793 do_in = true;
2794 break;
2795
2796 case OMP_CLAUSE_LASTPRIVATE:
2797 if (by_ref || is_reference (val))
2798 {
2799 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2800 continue;
2801 do_in = true;
2802 }
2803 else
2804 {
2805 do_out = true;
2806 if (lang_hooks.decls.omp_private_outer_ref (val))
2807 do_in = true;
2808 }
2809 break;
2810
2811 case OMP_CLAUSE_REDUCTION:
2812 do_in = true;
2813 do_out = !(by_ref || is_reference (val));
2814 break;
2815
2816 default:
2817 gcc_unreachable ();
2818 }
2819
2820 if (do_in)
2821 {
2822 ref = build_sender_ref (val, ctx);
2823 x = by_ref ? build_fold_addr_expr_loc (clause_loc, var) : var;
2824 gimplify_assign (ref, x, ilist);
2825 if (is_task_ctx (ctx))
2826 DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL;
2827 }
2828
2829 if (do_out)
2830 {
2831 ref = build_sender_ref (val, ctx);
2832 gimplify_assign (var, ref, olist);
2833 }
2834 }
2835 }
2836
2837 /* Generate code to implement SHARED from the sender (aka parent)
2838 side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
2839 list things that got automatically shared. */
2840
2841 static void
2842 lower_send_shared_vars (gimple_seq *ilist, gimple_seq *olist, omp_context *ctx)
2843 {
2844 tree var, ovar, nvar, f, x, record_type;
2845
2846 if (ctx->record_type == NULL)
2847 return;
2848
2849 record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type;
2850 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
2851 {
2852 ovar = DECL_ABSTRACT_ORIGIN (f);
2853 nvar = maybe_lookup_decl (ovar, ctx);
2854 if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
2855 continue;
2856
2857 /* If CTX is a nested parallel directive. Find the immediately
2858 enclosing parallel or workshare construct that contains a
2859 mapping for OVAR. */
2860 var = lookup_decl_in_outer_ctx (ovar, ctx);
2861
2862 if (use_pointer_for_field (ovar, ctx))
2863 {
2864 x = build_sender_ref (ovar, ctx);
2865 var = build_fold_addr_expr (var);
2866 gimplify_assign (x, var, ilist);
2867 }
2868 else
2869 {
2870 x = build_sender_ref (ovar, ctx);
2871 gimplify_assign (x, var, ilist);
2872
2873 if (!TREE_READONLY (var)
2874 /* We don't need to receive a new reference to a result
2875 or parm decl. In fact we may not store to it as we will
2876 invalidate any pending RSO and generate wrong gimple
2877 during inlining. */
2878 && !((TREE_CODE (var) == RESULT_DECL
2879 || TREE_CODE (var) == PARM_DECL)
2880 && DECL_BY_REFERENCE (var)))
2881 {
2882 x = build_sender_ref (ovar, ctx);
2883 gimplify_assign (var, x, olist);
2884 }
2885 }
2886 }
2887 }
2888
2889
2890 /* A convenience function to build an empty GIMPLE_COND with just the
2891 condition. */
2892
2893 static gimple
2894 gimple_build_cond_empty (tree cond)
2895 {
2896 enum tree_code pred_code;
2897 tree lhs, rhs;
2898
2899 gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs);
2900 return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE);
2901 }
2902
2903
2904 /* Build the function calls to GOMP_parallel_start etc to actually
2905 generate the parallel operation. REGION is the parallel region
2906 being expanded. BB is the block where to insert the code. WS_ARGS
2907 will be set if this is a call to a combined parallel+workshare
2908 construct, it contains the list of additional arguments needed by
2909 the workshare construct. */
2910
2911 static void
2912 expand_parallel_call (struct omp_region *region, basic_block bb,
2913 gimple entry_stmt, VEC(tree,gc) *ws_args)
2914 {
2915 tree t, t1, t2, val, cond, c, clauses;
2916 gimple_stmt_iterator gsi;
2917 gimple stmt;
2918 int start_ix;
2919 location_t clause_loc;
2920 VEC(tree,gc) *args;
2921
2922 clauses = gimple_omp_parallel_clauses (entry_stmt);
2923
2924 /* Determine what flavor of GOMP_parallel_start we will be
2925 emitting. */
2926 start_ix = BUILT_IN_GOMP_PARALLEL_START;
2927 if (is_combined_parallel (region))
2928 {
2929 switch (region->inner->type)
2930 {
2931 case GIMPLE_OMP_FOR:
2932 gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
2933 start_ix = BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START
2934 + (region->inner->sched_kind
2935 == OMP_CLAUSE_SCHEDULE_RUNTIME
2936 ? 3 : region->inner->sched_kind);
2937 break;
2938 case GIMPLE_OMP_SECTIONS:
2939 start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS_START;
2940 break;
2941 default:
2942 gcc_unreachable ();
2943 }
2944 }
2945
2946 /* By default, the value of NUM_THREADS is zero (selected at run time)
2947 and there is no conditional. */
2948 cond = NULL_TREE;
2949 val = build_int_cst (unsigned_type_node, 0);
2950
2951 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
2952 if (c)
2953 cond = OMP_CLAUSE_IF_EXPR (c);
2954
2955 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
2956 if (c)
2957 {
2958 val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
2959 clause_loc = OMP_CLAUSE_LOCATION (c);
2960 }
2961 else
2962 clause_loc = gimple_location (entry_stmt);
2963
2964 /* Ensure 'val' is of the correct type. */
2965 val = fold_convert_loc (clause_loc, unsigned_type_node, val);
2966
2967 /* If we found the clause 'if (cond)', build either
2968 (cond != 0) or (cond ? val : 1u). */
2969 if (cond)
2970 {
2971 gimple_stmt_iterator gsi;
2972
2973 cond = gimple_boolify (cond);
2974
2975 if (integer_zerop (val))
2976 val = fold_build2_loc (clause_loc,
2977 EQ_EXPR, unsigned_type_node, cond,
2978 build_int_cst (TREE_TYPE (cond), 0));
2979 else
2980 {
2981 basic_block cond_bb, then_bb, else_bb;
2982 edge e, e_then, e_else;
2983 tree tmp_then, tmp_else, tmp_join, tmp_var;
2984
2985 tmp_var = create_tmp_var (TREE_TYPE (val), NULL);
2986 if (gimple_in_ssa_p (cfun))
2987 {
2988 tmp_then = make_ssa_name (tmp_var, NULL);
2989 tmp_else = make_ssa_name (tmp_var, NULL);
2990 tmp_join = make_ssa_name (tmp_var, NULL);
2991 }
2992 else
2993 {
2994 tmp_then = tmp_var;
2995 tmp_else = tmp_var;
2996 tmp_join = tmp_var;
2997 }
2998
2999 e = split_block (bb, NULL);
3000 cond_bb = e->src;
3001 bb = e->dest;
3002 remove_edge (e);
3003
3004 then_bb = create_empty_bb (cond_bb);
3005 else_bb = create_empty_bb (then_bb);
3006 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
3007 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
3008
3009 stmt = gimple_build_cond_empty (cond);
3010 gsi = gsi_start_bb (cond_bb);
3011 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3012
3013 gsi = gsi_start_bb (then_bb);
3014 stmt = gimple_build_assign (tmp_then, val);
3015 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3016
3017 gsi = gsi_start_bb (else_bb);
3018 stmt = gimple_build_assign
3019 (tmp_else, build_int_cst (unsigned_type_node, 1));
3020 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3021
3022 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
3023 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
3024 e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
3025 e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
3026
3027 if (gimple_in_ssa_p (cfun))
3028 {
3029 gimple phi = create_phi_node (tmp_join, bb);
3030 SSA_NAME_DEF_STMT (tmp_join) = phi;
3031 add_phi_arg (phi, tmp_then, e_then, UNKNOWN_LOCATION);
3032 add_phi_arg (phi, tmp_else, e_else, UNKNOWN_LOCATION);
3033 }
3034
3035 val = tmp_join;
3036 }
3037
3038 gsi = gsi_start_bb (bb);
3039 val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE,
3040 false, GSI_CONTINUE_LINKING);
3041 }
3042
3043 gsi = gsi_last_bb (bb);
3044 t = gimple_omp_parallel_data_arg (entry_stmt);
3045 if (t == NULL)
3046 t1 = null_pointer_node;
3047 else
3048 t1 = build_fold_addr_expr (t);
3049 t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
3050
3051 args = VEC_alloc (tree, gc, 3 + VEC_length (tree, ws_args));
3052 VEC_quick_push (tree, args, t2);
3053 VEC_quick_push (tree, args, t1);
3054 VEC_quick_push (tree, args, val);
3055 VEC_splice (tree, args, ws_args);
3056
3057 t = build_call_expr_loc_vec (UNKNOWN_LOCATION,
3058 built_in_decls[start_ix], args);
3059
3060 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3061 false, GSI_CONTINUE_LINKING);
3062
3063 t = gimple_omp_parallel_data_arg (entry_stmt);
3064 if (t == NULL)
3065 t = null_pointer_node;
3066 else
3067 t = build_fold_addr_expr (t);
3068 t = build_call_expr_loc (gimple_location (entry_stmt),
3069 gimple_omp_parallel_child_fn (entry_stmt), 1, t);
3070 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3071 false, GSI_CONTINUE_LINKING);
3072
3073 t = build_call_expr_loc (gimple_location (entry_stmt),
3074 built_in_decls[BUILT_IN_GOMP_PARALLEL_END], 0);
3075 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3076 false, GSI_CONTINUE_LINKING);
3077 }
3078
3079
3080 /* Build the function call to GOMP_task to actually
3081 generate the task operation. BB is the block where to insert the code. */
3082
3083 static void
3084 expand_task_call (basic_block bb, gimple entry_stmt)
3085 {
3086 tree t, t1, t2, t3, flags, cond, c, clauses;
3087 gimple_stmt_iterator gsi;
3088 location_t loc = gimple_location (entry_stmt);
3089
3090 clauses = gimple_omp_task_clauses (entry_stmt);
3091
3092 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
3093 if (c)
3094 cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (c));
3095 else
3096 cond = boolean_true_node;
3097
3098 c = find_omp_clause (clauses, OMP_CLAUSE_UNTIED);
3099 flags = build_int_cst (unsigned_type_node, (c ? 1 : 0));
3100
3101 gsi = gsi_last_bb (bb);
3102 t = gimple_omp_task_data_arg (entry_stmt);
3103 if (t == NULL)
3104 t2 = null_pointer_node;
3105 else
3106 t2 = build_fold_addr_expr_loc (loc, t);
3107 t1 = build_fold_addr_expr_loc (loc, gimple_omp_task_child_fn (entry_stmt));
3108 t = gimple_omp_task_copy_fn (entry_stmt);
3109 if (t == NULL)
3110 t3 = null_pointer_node;
3111 else
3112 t3 = build_fold_addr_expr_loc (loc, t);
3113
3114 t = build_call_expr (built_in_decls[BUILT_IN_GOMP_TASK], 7, t1, t2, t3,
3115 gimple_omp_task_arg_size (entry_stmt),
3116 gimple_omp_task_arg_align (entry_stmt), cond, flags);
3117
3118 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3119 false, GSI_CONTINUE_LINKING);
3120 }
3121
3122
3123 /* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
3124 catch handler and return it. This prevents programs from violating the
3125 structured block semantics with throws. */
3126
3127 static gimple_seq
3128 maybe_catch_exception (gimple_seq body)
3129 {
3130 gimple g;
3131 tree decl;
3132
3133 if (!flag_exceptions)
3134 return body;
3135
3136 if (lang_hooks.eh_protect_cleanup_actions != NULL)
3137 decl = lang_hooks.eh_protect_cleanup_actions ();
3138 else
3139 decl = built_in_decls[BUILT_IN_TRAP];
3140
3141 g = gimple_build_eh_must_not_throw (decl);
3142 g = gimple_build_try (body, gimple_seq_alloc_with_stmt (g),
3143 GIMPLE_TRY_CATCH);
3144
3145 return gimple_seq_alloc_with_stmt (g);
3146 }
3147
3148 /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
3149
3150 static tree
3151 vec2chain (VEC(tree,gc) *v)
3152 {
3153 tree chain = NULL_TREE, t;
3154 unsigned ix;
3155
3156 FOR_EACH_VEC_ELT_REVERSE (tree, v, ix, t)
3157 {
3158 DECL_CHAIN (t) = chain;
3159 chain = t;
3160 }
3161
3162 return chain;
3163 }
3164
3165
3166 /* Remove barriers in REGION->EXIT's block. Note that this is only
3167 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
3168 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
3169 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
3170 removed. */
3171
3172 static void
3173 remove_exit_barrier (struct omp_region *region)
3174 {
3175 gimple_stmt_iterator gsi;
3176 basic_block exit_bb;
3177 edge_iterator ei;
3178 edge e;
3179 gimple stmt;
3180 int any_addressable_vars = -1;
3181
3182 exit_bb = region->exit;
3183
3184 /* If the parallel region doesn't return, we don't have REGION->EXIT
3185 block at all. */
3186 if (! exit_bb)
3187 return;
3188
3189 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
3190 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
3191 statements that can appear in between are extremely limited -- no
3192 memory operations at all. Here, we allow nothing at all, so the
3193 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
3194 gsi = gsi_last_bb (exit_bb);
3195 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3196 gsi_prev (&gsi);
3197 if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
3198 return;
3199
3200 FOR_EACH_EDGE (e, ei, exit_bb->preds)
3201 {
3202 gsi = gsi_last_bb (e->src);
3203 if (gsi_end_p (gsi))
3204 continue;
3205 stmt = gsi_stmt (gsi);
3206 if (gimple_code (stmt) == GIMPLE_OMP_RETURN
3207 && !gimple_omp_return_nowait_p (stmt))
3208 {
3209 /* OpenMP 3.0 tasks unfortunately prevent this optimization
3210 in many cases. If there could be tasks queued, the barrier
3211 might be needed to let the tasks run before some local
3212 variable of the parallel that the task uses as shared
3213 runs out of scope. The task can be spawned either
3214 from within current function (this would be easy to check)
3215 or from some function it calls and gets passed an address
3216 of such a variable. */
3217 if (any_addressable_vars < 0)
3218 {
3219 gimple parallel_stmt = last_stmt (region->entry);
3220 tree child_fun = gimple_omp_parallel_child_fn (parallel_stmt);
3221 tree local_decls, block, decl;
3222 unsigned ix;
3223
3224 any_addressable_vars = 0;
3225 FOR_EACH_LOCAL_DECL (DECL_STRUCT_FUNCTION (child_fun), ix, decl)
3226 if (TREE_ADDRESSABLE (decl))
3227 {
3228 any_addressable_vars = 1;
3229 break;
3230 }
3231 for (block = gimple_block (stmt);
3232 !any_addressable_vars
3233 && block
3234 && TREE_CODE (block) == BLOCK;
3235 block = BLOCK_SUPERCONTEXT (block))
3236 {
3237 for (local_decls = BLOCK_VARS (block);
3238 local_decls;
3239 local_decls = DECL_CHAIN (local_decls))
3240 if (TREE_ADDRESSABLE (local_decls))
3241 {
3242 any_addressable_vars = 1;
3243 break;
3244 }
3245 if (block == gimple_block (parallel_stmt))
3246 break;
3247 }
3248 }
3249 if (!any_addressable_vars)
3250 gimple_omp_return_set_nowait (stmt);
3251 }
3252 }
3253 }
3254
3255 static void
3256 remove_exit_barriers (struct omp_region *region)
3257 {
3258 if (region->type == GIMPLE_OMP_PARALLEL)
3259 remove_exit_barrier (region);
3260
3261 if (region->inner)
3262 {
3263 region = region->inner;
3264 remove_exit_barriers (region);
3265 while (region->next)
3266 {
3267 region = region->next;
3268 remove_exit_barriers (region);
3269 }
3270 }
3271 }
3272
3273 /* Optimize omp_get_thread_num () and omp_get_num_threads ()
3274 calls. These can't be declared as const functions, but
3275 within one parallel body they are constant, so they can be
3276 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
3277 which are declared const. Similarly for task body, except
3278 that in untied task omp_get_thread_num () can change at any task
3279 scheduling point. */
3280
3281 static void
3282 optimize_omp_library_calls (gimple entry_stmt)
3283 {
3284 basic_block bb;
3285 gimple_stmt_iterator gsi;
3286 tree thr_num_id
3287 = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM]);
3288 tree num_thr_id
3289 = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS]);
3290 bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK
3291 && find_omp_clause (gimple_omp_task_clauses (entry_stmt),
3292 OMP_CLAUSE_UNTIED) != NULL);
3293
3294 FOR_EACH_BB (bb)
3295 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3296 {
3297 gimple call = gsi_stmt (gsi);
3298 tree decl;
3299
3300 if (is_gimple_call (call)
3301 && (decl = gimple_call_fndecl (call))
3302 && DECL_EXTERNAL (decl)
3303 && TREE_PUBLIC (decl)
3304 && DECL_INITIAL (decl) == NULL)
3305 {
3306 tree built_in;
3307
3308 if (DECL_NAME (decl) == thr_num_id)
3309 {
3310 /* In #pragma omp task untied omp_get_thread_num () can change
3311 during the execution of the task region. */
3312 if (untied_task)
3313 continue;
3314 built_in = built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM];
3315 }
3316 else if (DECL_NAME (decl) == num_thr_id)
3317 built_in = built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS];
3318 else
3319 continue;
3320
3321 if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
3322 || gimple_call_num_args (call) != 0)
3323 continue;
3324
3325 if (flag_exceptions && !TREE_NOTHROW (decl))
3326 continue;
3327
3328 if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
3329 || !types_compatible_p (TREE_TYPE (TREE_TYPE (decl)),
3330 TREE_TYPE (TREE_TYPE (built_in))))
3331 continue;
3332
3333 gimple_call_set_fndecl (call, built_in);
3334 }
3335 }
3336 }
3337
3338 /* Expand the OpenMP parallel or task directive starting at REGION. */
3339
3340 static void
3341 expand_omp_taskreg (struct omp_region *region)
3342 {
3343 basic_block entry_bb, exit_bb, new_bb;
3344 struct function *child_cfun;
3345 tree child_fn, block, t;
3346 tree save_current;
3347 gimple_stmt_iterator gsi;
3348 gimple entry_stmt, stmt;
3349 edge e;
3350 VEC(tree,gc) *ws_args;
3351
3352 entry_stmt = last_stmt (region->entry);
3353 child_fn = gimple_omp_taskreg_child_fn (entry_stmt);
3354 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
3355 /* If this function has been already instrumented, make sure
3356 the child function isn't instrumented again. */
3357 child_cfun->after_tree_profile = cfun->after_tree_profile;
3358
3359 entry_bb = region->entry;
3360 exit_bb = region->exit;
3361
3362 if (is_combined_parallel (region))
3363 ws_args = region->ws_args;
3364 else
3365 ws_args = NULL;
3366
3367 if (child_cfun->cfg)
3368 {
3369 /* Due to inlining, it may happen that we have already outlined
3370 the region, in which case all we need to do is make the
3371 sub-graph unreachable and emit the parallel call. */
3372 edge entry_succ_e, exit_succ_e;
3373 gimple_stmt_iterator gsi;
3374
3375 entry_succ_e = single_succ_edge (entry_bb);
3376
3377 gsi = gsi_last_bb (entry_bb);
3378 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL
3379 || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK);
3380 gsi_remove (&gsi, true);
3381
3382 new_bb = entry_bb;
3383 if (exit_bb)
3384 {
3385 exit_succ_e = single_succ_edge (exit_bb);
3386 make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
3387 }
3388 remove_edge_and_dominated_blocks (entry_succ_e);
3389 }
3390 else
3391 {
3392 unsigned srcidx, dstidx, num;
3393
3394 /* If the parallel region needs data sent from the parent
3395 function, then the very first statement (except possible
3396 tree profile counter updates) of the parallel body
3397 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
3398 &.OMP_DATA_O is passed as an argument to the child function,
3399 we need to replace it with the argument as seen by the child
3400 function.
3401
3402 In most cases, this will end up being the identity assignment
3403 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
3404 a function call that has been inlined, the original PARM_DECL
3405 .OMP_DATA_I may have been converted into a different local
3406 variable. In which case, we need to keep the assignment. */
3407 if (gimple_omp_taskreg_data_arg (entry_stmt))
3408 {
3409 basic_block entry_succ_bb = single_succ (entry_bb);
3410 gimple_stmt_iterator gsi;
3411 tree arg, narg;
3412 gimple parcopy_stmt = NULL;
3413
3414 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
3415 {
3416 gimple stmt;
3417
3418 gcc_assert (!gsi_end_p (gsi));
3419 stmt = gsi_stmt (gsi);
3420 if (gimple_code (stmt) != GIMPLE_ASSIGN)
3421 continue;
3422
3423 if (gimple_num_ops (stmt) == 2)
3424 {
3425 tree arg = gimple_assign_rhs1 (stmt);
3426
3427 /* We're ignore the subcode because we're
3428 effectively doing a STRIP_NOPS. */
3429
3430 if (TREE_CODE (arg) == ADDR_EXPR
3431 && TREE_OPERAND (arg, 0)
3432 == gimple_omp_taskreg_data_arg (entry_stmt))
3433 {
3434 parcopy_stmt = stmt;
3435 break;
3436 }
3437 }
3438 }
3439
3440 gcc_assert (parcopy_stmt != NULL);
3441 arg = DECL_ARGUMENTS (child_fn);
3442
3443 if (!gimple_in_ssa_p (cfun))
3444 {
3445 if (gimple_assign_lhs (parcopy_stmt) == arg)
3446 gsi_remove (&gsi, true);
3447 else
3448 {
3449 /* ?? Is setting the subcode really necessary ?? */
3450 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg));
3451 gimple_assign_set_rhs1 (parcopy_stmt, arg);
3452 }
3453 }
3454 else
3455 {
3456 /* If we are in ssa form, we must load the value from the default
3457 definition of the argument. That should not be defined now,
3458 since the argument is not used uninitialized. */
3459 gcc_assert (gimple_default_def (cfun, arg) == NULL);
3460 narg = make_ssa_name (arg, gimple_build_nop ());
3461 set_default_def (arg, narg);
3462 /* ?? Is setting the subcode really necessary ?? */
3463 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (narg));
3464 gimple_assign_set_rhs1 (parcopy_stmt, narg);
3465 update_stmt (parcopy_stmt);
3466 }
3467 }
3468
3469 /* Declare local variables needed in CHILD_CFUN. */
3470 block = DECL_INITIAL (child_fn);
3471 BLOCK_VARS (block) = vec2chain (child_cfun->local_decls);
3472 /* The gimplifier could record temporaries in parallel/task block
3473 rather than in containing function's local_decls chain,
3474 which would mean cgraph missed finalizing them. Do it now. */
3475 for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t))
3476 if (TREE_CODE (t) == VAR_DECL
3477 && TREE_STATIC (t)
3478 && !DECL_EXTERNAL (t))
3479 varpool_finalize_decl (t);
3480 DECL_SAVED_TREE (child_fn) = NULL;
3481 gimple_set_body (child_fn, bb_seq (single_succ (entry_bb)));
3482 TREE_USED (block) = 1;
3483
3484 /* Reset DECL_CONTEXT on function arguments. */
3485 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
3486 DECL_CONTEXT (t) = child_fn;
3487
3488 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
3489 so that it can be moved to the child function. */
3490 gsi = gsi_last_bb (entry_bb);
3491 stmt = gsi_stmt (gsi);
3492 gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
3493 || gimple_code (stmt) == GIMPLE_OMP_TASK));
3494 gsi_remove (&gsi, true);
3495 e = split_block (entry_bb, stmt);
3496 entry_bb = e->dest;
3497 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
3498
3499 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
3500 if (exit_bb)
3501 {
3502 gsi = gsi_last_bb (exit_bb);
3503 gcc_assert (!gsi_end_p (gsi)
3504 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3505 stmt = gimple_build_return (NULL);
3506 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
3507 gsi_remove (&gsi, true);
3508 }
3509
3510 /* Move the parallel region into CHILD_CFUN. */
3511
3512 if (gimple_in_ssa_p (cfun))
3513 {
3514 push_cfun (child_cfun);
3515 init_tree_ssa (child_cfun);
3516 init_ssa_operands ();
3517 cfun->gimple_df->in_ssa_p = true;
3518 pop_cfun ();
3519 block = NULL_TREE;
3520 }
3521 else
3522 block = gimple_block (entry_stmt);
3523
3524 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
3525 if (exit_bb)
3526 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
3527
3528 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
3529 num = VEC_length (tree, child_cfun->local_decls);
3530 for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
3531 {
3532 t = VEC_index (tree, child_cfun->local_decls, srcidx);
3533 if (DECL_CONTEXT (t) == cfun->decl)
3534 continue;
3535 if (srcidx != dstidx)
3536 VEC_replace (tree, child_cfun->local_decls, dstidx, t);
3537 dstidx++;
3538 }
3539 if (dstidx != num)
3540 VEC_truncate (tree, child_cfun->local_decls, dstidx);
3541
3542 /* Inform the callgraph about the new function. */
3543 DECL_STRUCT_FUNCTION (child_fn)->curr_properties
3544 = cfun->curr_properties;
3545 cgraph_add_new_function (child_fn, true);
3546
3547 /* Fix the callgraph edges for child_cfun. Those for cfun will be
3548 fixed in a following pass. */
3549 push_cfun (child_cfun);
3550 save_current = current_function_decl;
3551 current_function_decl = child_fn;
3552 if (optimize)
3553 optimize_omp_library_calls (entry_stmt);
3554 rebuild_cgraph_edges ();
3555
3556 /* Some EH regions might become dead, see PR34608. If
3557 pass_cleanup_cfg isn't the first pass to happen with the
3558 new child, these dead EH edges might cause problems.
3559 Clean them up now. */
3560 if (flag_exceptions)
3561 {
3562 basic_block bb;
3563 bool changed = false;
3564
3565 FOR_EACH_BB (bb)
3566 changed |= gimple_purge_dead_eh_edges (bb);
3567 if (changed)
3568 cleanup_tree_cfg ();
3569 }
3570 if (gimple_in_ssa_p (cfun))
3571 update_ssa (TODO_update_ssa);
3572 current_function_decl = save_current;
3573 pop_cfun ();
3574 }
3575
3576 /* Emit a library call to launch the children threads. */
3577 if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
3578 expand_parallel_call (region, new_bb, entry_stmt, ws_args);
3579 else
3580 expand_task_call (new_bb, entry_stmt);
3581 update_ssa (TODO_update_ssa_only_virtuals);
3582 }
3583
3584
3585 /* A subroutine of expand_omp_for. Generate code for a parallel
3586 loop with any schedule. Given parameters:
3587
3588 for (V = N1; V cond N2; V += STEP) BODY;
3589
3590 where COND is "<" or ">", we generate pseudocode
3591
3592 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
3593 if (more) goto L0; else goto L3;
3594 L0:
3595 V = istart0;
3596 iend = iend0;
3597 L1:
3598 BODY;
3599 V += STEP;
3600 if (V cond iend) goto L1; else goto L2;
3601 L2:
3602 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3603 L3:
3604
3605 If this is a combined omp parallel loop, instead of the call to
3606 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
3607
3608 For collapsed loops, given parameters:
3609 collapse(3)
3610 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
3611 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
3612 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
3613 BODY;
3614
3615 we generate pseudocode
3616
3617 if (cond3 is <)
3618 adj = STEP3 - 1;
3619 else
3620 adj = STEP3 + 1;
3621 count3 = (adj + N32 - N31) / STEP3;
3622 if (cond2 is <)
3623 adj = STEP2 - 1;
3624 else
3625 adj = STEP2 + 1;
3626 count2 = (adj + N22 - N21) / STEP2;
3627 if (cond1 is <)
3628 adj = STEP1 - 1;
3629 else
3630 adj = STEP1 + 1;
3631 count1 = (adj + N12 - N11) / STEP1;
3632 count = count1 * count2 * count3;
3633 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
3634 if (more) goto L0; else goto L3;
3635 L0:
3636 V = istart0;
3637 T = V;
3638 V3 = N31 + (T % count3) * STEP3;
3639 T = T / count3;
3640 V2 = N21 + (T % count2) * STEP2;
3641 T = T / count2;
3642 V1 = N11 + T * STEP1;
3643 iend = iend0;
3644 L1:
3645 BODY;
3646 V += 1;
3647 if (V < iend) goto L10; else goto L2;
3648 L10:
3649 V3 += STEP3;
3650 if (V3 cond3 N32) goto L1; else goto L11;
3651 L11:
3652 V3 = N31;
3653 V2 += STEP2;
3654 if (V2 cond2 N22) goto L1; else goto L12;
3655 L12:
3656 V2 = N21;
3657 V1 += STEP1;
3658 goto L1;
3659 L2:
3660 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3661 L3:
3662
3663 */
3664
3665 static void
3666 expand_omp_for_generic (struct omp_region *region,
3667 struct omp_for_data *fd,
3668 enum built_in_function start_fn,
3669 enum built_in_function next_fn)
3670 {
3671 tree type, istart0, iend0, iend;
3672 tree t, vmain, vback, bias = NULL_TREE;
3673 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb;
3674 basic_block l2_bb = NULL, l3_bb = NULL;
3675 gimple_stmt_iterator gsi;
3676 gimple stmt;
3677 bool in_combined_parallel = is_combined_parallel (region);
3678 bool broken_loop = region->cont == NULL;
3679 edge e, ne;
3680 tree *counts = NULL;
3681 int i;
3682
3683 gcc_assert (!broken_loop || !in_combined_parallel);
3684 gcc_assert (fd->iter_type == long_integer_type_node
3685 || !in_combined_parallel);
3686
3687 type = TREE_TYPE (fd->loop.v);
3688 istart0 = create_tmp_var (fd->iter_type, ".istart0");
3689 iend0 = create_tmp_var (fd->iter_type, ".iend0");
3690 TREE_ADDRESSABLE (istart0) = 1;
3691 TREE_ADDRESSABLE (iend0) = 1;
3692 if (gimple_in_ssa_p (cfun))
3693 {
3694 add_referenced_var (istart0);
3695 add_referenced_var (iend0);
3696 }
3697
3698 /* See if we need to bias by LLONG_MIN. */
3699 if (fd->iter_type == long_long_unsigned_type_node
3700 && TREE_CODE (type) == INTEGER_TYPE
3701 && !TYPE_UNSIGNED (type))
3702 {
3703 tree n1, n2;
3704
3705 if (fd->loop.cond_code == LT_EXPR)
3706 {
3707 n1 = fd->loop.n1;
3708 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
3709 }
3710 else
3711 {
3712 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
3713 n2 = fd->loop.n1;
3714 }
3715 if (TREE_CODE (n1) != INTEGER_CST
3716 || TREE_CODE (n2) != INTEGER_CST
3717 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
3718 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
3719 }
3720
3721 entry_bb = region->entry;
3722 cont_bb = region->cont;
3723 collapse_bb = NULL;
3724 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
3725 gcc_assert (broken_loop
3726 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
3727 l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
3728 l1_bb = single_succ (l0_bb);
3729 if (!broken_loop)
3730 {
3731 l2_bb = create_empty_bb (cont_bb);
3732 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb);
3733 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
3734 }
3735 else
3736 l2_bb = NULL;
3737 l3_bb = BRANCH_EDGE (entry_bb)->dest;
3738 exit_bb = region->exit;
3739
3740 gsi = gsi_last_bb (entry_bb);
3741
3742 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
3743 if (fd->collapse > 1)
3744 {
3745 /* collapsed loops need work for expansion in SSA form. */
3746 gcc_assert (!gimple_in_ssa_p (cfun));
3747 counts = (tree *) alloca (fd->collapse * sizeof (tree));
3748 for (i = 0; i < fd->collapse; i++)
3749 {
3750 tree itype = TREE_TYPE (fd->loops[i].v);
3751
3752 if (POINTER_TYPE_P (itype))
3753 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
3754 t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR
3755 ? -1 : 1));
3756 t = fold_build2 (PLUS_EXPR, itype,
3757 fold_convert (itype, fd->loops[i].step), t);
3758 t = fold_build2 (PLUS_EXPR, itype, t,
3759 fold_convert (itype, fd->loops[i].n2));
3760 t = fold_build2 (MINUS_EXPR, itype, t,
3761 fold_convert (itype, fd->loops[i].n1));
3762 if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
3763 t = fold_build2 (TRUNC_DIV_EXPR, itype,
3764 fold_build1 (NEGATE_EXPR, itype, t),
3765 fold_build1 (NEGATE_EXPR, itype,
3766 fold_convert (itype,
3767 fd->loops[i].step)));
3768 else
3769 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
3770 fold_convert (itype, fd->loops[i].step));
3771 t = fold_convert (type, t);
3772 if (TREE_CODE (t) == INTEGER_CST)
3773 counts[i] = t;
3774 else
3775 {
3776 counts[i] = create_tmp_var (type, ".count");
3777 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3778 true, GSI_SAME_STMT);
3779 stmt = gimple_build_assign (counts[i], t);
3780 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3781 }
3782 if (SSA_VAR_P (fd->loop.n2))
3783 {
3784 if (i == 0)
3785 t = counts[0];
3786 else
3787 {
3788 t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]);
3789 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3790 true, GSI_SAME_STMT);
3791 }
3792 stmt = gimple_build_assign (fd->loop.n2, t);
3793 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3794 }
3795 }
3796 }
3797 if (in_combined_parallel)
3798 {
3799 /* In a combined parallel loop, emit a call to
3800 GOMP_loop_foo_next. */
3801 t = build_call_expr (built_in_decls[next_fn], 2,
3802 build_fold_addr_expr (istart0),
3803 build_fold_addr_expr (iend0));
3804 }
3805 else
3806 {
3807 tree t0, t1, t2, t3, t4;
3808 /* If this is not a combined parallel loop, emit a call to
3809 GOMP_loop_foo_start in ENTRY_BB. */
3810 t4 = build_fold_addr_expr (iend0);
3811 t3 = build_fold_addr_expr (istart0);
3812 t2 = fold_convert (fd->iter_type, fd->loop.step);
3813 if (POINTER_TYPE_P (type)
3814 && TYPE_PRECISION (type) != TYPE_PRECISION (fd->iter_type))
3815 {
3816 /* Avoid casting pointers to integer of a different size. */
3817 tree itype
3818 = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
3819 t1 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n2));
3820 t0 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n1));
3821 }
3822 else
3823 {
3824 t1 = fold_convert (fd->iter_type, fd->loop.n2);
3825 t0 = fold_convert (fd->iter_type, fd->loop.n1);
3826 }
3827 if (bias)
3828 {
3829 t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
3830 t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
3831 }
3832 if (fd->iter_type == long_integer_type_node)
3833 {
3834 if (fd->chunk_size)
3835 {
3836 t = fold_convert (fd->iter_type, fd->chunk_size);
3837 t = build_call_expr (built_in_decls[start_fn], 6,
3838 t0, t1, t2, t, t3, t4);
3839 }
3840 else
3841 t = build_call_expr (built_in_decls[start_fn], 5,
3842 t0, t1, t2, t3, t4);
3843 }
3844 else
3845 {
3846 tree t5;
3847 tree c_bool_type;
3848
3849 /* The GOMP_loop_ull_*start functions have additional boolean
3850 argument, true for < loops and false for > loops.
3851 In Fortran, the C bool type can be different from
3852 boolean_type_node. */
3853 c_bool_type = TREE_TYPE (TREE_TYPE (built_in_decls[start_fn]));
3854 t5 = build_int_cst (c_bool_type,
3855 fd->loop.cond_code == LT_EXPR ? 1 : 0);
3856 if (fd->chunk_size)
3857 {
3858 t = fold_convert (fd->iter_type, fd->chunk_size);
3859 t = build_call_expr (built_in_decls[start_fn], 7,
3860 t5, t0, t1, t2, t, t3, t4);
3861 }
3862 else
3863 t = build_call_expr (built_in_decls[start_fn], 6,
3864 t5, t0, t1, t2, t3, t4);
3865 }
3866 }
3867 if (TREE_TYPE (t) != boolean_type_node)
3868 t = fold_build2 (NE_EXPR, boolean_type_node,
3869 t, build_int_cst (TREE_TYPE (t), 0));
3870 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3871 true, GSI_SAME_STMT);
3872 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
3873
3874 /* Remove the GIMPLE_OMP_FOR statement. */
3875 gsi_remove (&gsi, true);
3876
3877 /* Iteration setup for sequential loop goes in L0_BB. */
3878 gsi = gsi_start_bb (l0_bb);
3879 t = istart0;
3880 if (bias)
3881 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
3882 if (POINTER_TYPE_P (type))
3883 t = fold_convert (lang_hooks.types.type_for_size (TYPE_PRECISION (type),
3884 0), t);
3885 t = fold_convert (type, t);
3886 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3887 false, GSI_CONTINUE_LINKING);
3888 stmt = gimple_build_assign (fd->loop.v, t);
3889 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3890
3891 t = iend0;
3892 if (bias)
3893 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
3894 if (POINTER_TYPE_P (type))
3895 t = fold_convert (lang_hooks.types.type_for_size (TYPE_PRECISION (type),
3896 0), t);
3897 t = fold_convert (type, t);
3898 iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3899 false, GSI_CONTINUE_LINKING);
3900 if (fd->collapse > 1)
3901 {
3902 tree tem = create_tmp_var (type, ".tem");
3903
3904 stmt = gimple_build_assign (tem, fd->loop.v);
3905 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3906 for (i = fd->collapse - 1; i >= 0; i--)
3907 {
3908 tree vtype = TREE_TYPE (fd->loops[i].v), itype;
3909 itype = vtype;
3910 if (POINTER_TYPE_P (vtype))
3911 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (vtype), 0);
3912 t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]);
3913 t = fold_convert (itype, t);
3914 t = fold_build2 (MULT_EXPR, itype, t,
3915 fold_convert (itype, fd->loops[i].step));
3916 if (POINTER_TYPE_P (vtype))
3917 t = fold_build2 (POINTER_PLUS_EXPR, vtype,
3918 fd->loops[i].n1, fold_convert (sizetype, t));
3919 else
3920 t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t);
3921 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3922 false, GSI_CONTINUE_LINKING);
3923 stmt = gimple_build_assign (fd->loops[i].v, t);
3924 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3925 if (i != 0)
3926 {
3927 t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]);
3928 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3929 false, GSI_CONTINUE_LINKING);
3930 stmt = gimple_build_assign (tem, t);
3931 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3932 }
3933 }
3934 }
3935
3936 if (!broken_loop)
3937 {
3938 /* Code to control the increment and predicate for the sequential
3939 loop goes in the CONT_BB. */
3940 gsi = gsi_last_bb (cont_bb);
3941 stmt = gsi_stmt (gsi);
3942 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
3943 vmain = gimple_omp_continue_control_use (stmt);
3944 vback = gimple_omp_continue_control_def (stmt);
3945
3946 if (POINTER_TYPE_P (type))
3947 t = fold_build2 (POINTER_PLUS_EXPR, type, vmain,
3948 fold_convert (sizetype, fd->loop.step));
3949 else
3950 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
3951 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3952 true, GSI_SAME_STMT);
3953 stmt = gimple_build_assign (vback, t);
3954 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3955
3956 t = build2 (fd->loop.cond_code, boolean_type_node, vback, iend);
3957 stmt = gimple_build_cond_empty (t);
3958 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3959
3960 /* Remove GIMPLE_OMP_CONTINUE. */
3961 gsi_remove (&gsi, true);
3962
3963 if (fd->collapse > 1)
3964 {
3965 basic_block last_bb, bb;
3966
3967 last_bb = cont_bb;
3968 for (i = fd->collapse - 1; i >= 0; i--)
3969 {
3970 tree vtype = TREE_TYPE (fd->loops[i].v);
3971
3972 bb = create_empty_bb (last_bb);
3973 gsi = gsi_start_bb (bb);
3974
3975 if (i < fd->collapse - 1)
3976 {
3977 e = make_edge (last_bb, bb, EDGE_FALSE_VALUE);
3978 e->probability = REG_BR_PROB_BASE / 8;
3979
3980 t = fd->loops[i + 1].n1;
3981 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3982 false, GSI_CONTINUE_LINKING);
3983 stmt = gimple_build_assign (fd->loops[i + 1].v, t);
3984 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3985 }
3986 else
3987 collapse_bb = bb;
3988
3989 set_immediate_dominator (CDI_DOMINATORS, bb, last_bb);
3990
3991 if (POINTER_TYPE_P (vtype))
3992 t = fold_build2 (POINTER_PLUS_EXPR, vtype,
3993 fd->loops[i].v,
3994 fold_convert (sizetype, fd->loops[i].step));
3995 else
3996 t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v,
3997 fd->loops[i].step);
3998 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3999 false, GSI_CONTINUE_LINKING);
4000 stmt = gimple_build_assign (fd->loops[i].v, t);
4001 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4002
4003 if (i > 0)
4004 {
4005 t = fd->loops[i].n2;
4006 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4007 false, GSI_CONTINUE_LINKING);
4008 t = fold_build2 (fd->loops[i].cond_code, boolean_type_node,
4009 fd->loops[i].v, t);
4010 stmt = gimple_build_cond_empty (t);
4011 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4012 e = make_edge (bb, l1_bb, EDGE_TRUE_VALUE);
4013 e->probability = REG_BR_PROB_BASE * 7 / 8;
4014 }
4015 else
4016 make_edge (bb, l1_bb, EDGE_FALLTHRU);
4017 last_bb = bb;
4018 }
4019 }
4020
4021 /* Emit code to get the next parallel iteration in L2_BB. */
4022 gsi = gsi_start_bb (l2_bb);
4023
4024 t = build_call_expr (built_in_decls[next_fn], 2,
4025 build_fold_addr_expr (istart0),
4026 build_fold_addr_expr (iend0));
4027 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4028 false, GSI_CONTINUE_LINKING);
4029 if (TREE_TYPE (t) != boolean_type_node)
4030 t = fold_build2 (NE_EXPR, boolean_type_node,
4031 t, build_int_cst (TREE_TYPE (t), 0));
4032 stmt = gimple_build_cond_empty (t);
4033 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4034 }
4035
4036 /* Add the loop cleanup function. */
4037 gsi = gsi_last_bb (exit_bb);
4038 if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4039 t = built_in_decls[BUILT_IN_GOMP_LOOP_END_NOWAIT];
4040 else
4041 t = built_in_decls[BUILT_IN_GOMP_LOOP_END];
4042 stmt = gimple_build_call (t, 0);
4043 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
4044 gsi_remove (&gsi, true);
4045
4046 /* Connect the new blocks. */
4047 find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
4048 find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
4049
4050 if (!broken_loop)
4051 {
4052 gimple_seq phis;
4053
4054 e = find_edge (cont_bb, l3_bb);
4055 ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
4056
4057 phis = phi_nodes (l3_bb);
4058 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
4059 {
4060 gimple phi = gsi_stmt (gsi);
4061 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
4062 PHI_ARG_DEF_FROM_EDGE (phi, e));
4063 }
4064 remove_edge (e);
4065
4066 make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
4067 if (fd->collapse > 1)
4068 {
4069 e = find_edge (cont_bb, l1_bb);
4070 remove_edge (e);
4071 e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
4072 }
4073 else
4074 {
4075 e = find_edge (cont_bb, l1_bb);
4076 e->flags = EDGE_TRUE_VALUE;
4077 }
4078 e->probability = REG_BR_PROB_BASE * 7 / 8;
4079 find_edge (cont_bb, l2_bb)->probability = REG_BR_PROB_BASE / 8;
4080 make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
4081
4082 set_immediate_dominator (CDI_DOMINATORS, l2_bb,
4083 recompute_dominator (CDI_DOMINATORS, l2_bb));
4084 set_immediate_dominator (CDI_DOMINATORS, l3_bb,
4085 recompute_dominator (CDI_DOMINATORS, l3_bb));
4086 set_immediate_dominator (CDI_DOMINATORS, l0_bb,
4087 recompute_dominator (CDI_DOMINATORS, l0_bb));
4088 set_immediate_dominator (CDI_DOMINATORS, l1_bb,
4089 recompute_dominator (CDI_DOMINATORS, l1_bb));
4090 }
4091 }
4092
4093
4094 /* A subroutine of expand_omp_for. Generate code for a parallel
4095 loop with static schedule and no specified chunk size. Given
4096 parameters:
4097
4098 for (V = N1; V cond N2; V += STEP) BODY;
4099
4100 where COND is "<" or ">", we generate pseudocode
4101
4102 if (cond is <)
4103 adj = STEP - 1;
4104 else
4105 adj = STEP + 1;
4106 if ((__typeof (V)) -1 > 0 && cond is >)
4107 n = -(adj + N2 - N1) / -STEP;
4108 else
4109 n = (adj + N2 - N1) / STEP;
4110 q = n / nthreads;
4111 tt = n % nthreads;
4112 if (threadid < tt) goto L3; else goto L4;
4113 L3:
4114 tt = 0;
4115 q = q + 1;
4116 L4:
4117 s0 = q * threadid + tt;
4118 e0 = s0 + q;
4119 V = s0 * STEP + N1;
4120 if (s0 >= e0) goto L2; else goto L0;
4121 L0:
4122 e = e0 * STEP + N1;
4123 L1:
4124 BODY;
4125 V += STEP;
4126 if (V cond e) goto L1;
4127 L2:
4128 */
4129
4130 static void
4131 expand_omp_for_static_nochunk (struct omp_region *region,
4132 struct omp_for_data *fd)
4133 {
4134 tree n, q, s0, e0, e, t, tt, nthreads, threadid;
4135 tree type, itype, vmain, vback;
4136 basic_block entry_bb, second_bb, third_bb, exit_bb, seq_start_bb;
4137 basic_block body_bb, cont_bb;
4138 basic_block fin_bb;
4139 gimple_stmt_iterator gsi;
4140 gimple stmt;
4141 edge ep;
4142
4143 itype = type = TREE_TYPE (fd->loop.v);
4144 if (POINTER_TYPE_P (type))
4145 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
4146
4147 entry_bb = region->entry;
4148 cont_bb = region->cont;
4149 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
4150 gcc_assert (BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
4151 seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
4152 body_bb = single_succ (seq_start_bb);
4153 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4154 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4155 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4156 exit_bb = region->exit;
4157
4158 /* Iteration space partitioning goes in ENTRY_BB. */
4159 gsi = gsi_last_bb (entry_bb);
4160 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4161
4162 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
4163 t = fold_convert (itype, t);
4164 nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4165 true, GSI_SAME_STMT);
4166
4167 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
4168 t = fold_convert (itype, t);
4169 threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4170 true, GSI_SAME_STMT);
4171
4172 fd->loop.n1
4173 = force_gimple_operand_gsi (&gsi, fold_convert (type, fd->loop.n1),
4174 true, NULL_TREE, true, GSI_SAME_STMT);
4175 fd->loop.n2
4176 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.n2),
4177 true, NULL_TREE, true, GSI_SAME_STMT);
4178 fd->loop.step
4179 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.step),
4180 true, NULL_TREE, true, GSI_SAME_STMT);
4181
4182 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4183 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4184 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4185 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4186 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4187 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4188 fold_build1 (NEGATE_EXPR, itype, t),
4189 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4190 else
4191 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4192 t = fold_convert (itype, t);
4193 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4194
4195 q = create_tmp_var (itype, "q");
4196 t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads);
4197 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
4198 gsi_insert_before (&gsi, gimple_build_assign (q, t), GSI_SAME_STMT);
4199
4200 tt = create_tmp_var (itype, "tt");
4201 t = fold_build2 (TRUNC_MOD_EXPR, itype, n, nthreads);
4202 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
4203 gsi_insert_before (&gsi, gimple_build_assign (tt, t), GSI_SAME_STMT);
4204
4205 t = build2 (LT_EXPR, boolean_type_node, threadid, tt);
4206 stmt = gimple_build_cond_empty (t);
4207 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4208
4209 second_bb = split_block (entry_bb, stmt)->dest;
4210 gsi = gsi_last_bb (second_bb);
4211 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4212
4213 gsi_insert_before (&gsi, gimple_build_assign (tt, build_int_cst (itype, 0)),
4214 GSI_SAME_STMT);
4215 stmt = gimple_build_assign_with_ops (PLUS_EXPR, q, q,
4216 build_int_cst (itype, 1));
4217 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4218
4219 third_bb = split_block (second_bb, stmt)->dest;
4220 gsi = gsi_last_bb (third_bb);
4221 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4222
4223 t = build2 (MULT_EXPR, itype, q, threadid);
4224 t = build2 (PLUS_EXPR, itype, t, tt);
4225 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4226
4227 t = fold_build2 (PLUS_EXPR, itype, s0, q);
4228 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4229
4230 t = build2 (GE_EXPR, boolean_type_node, s0, e0);
4231 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4232
4233 /* Remove the GIMPLE_OMP_FOR statement. */
4234 gsi_remove (&gsi, true);
4235
4236 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4237 gsi = gsi_start_bb (seq_start_bb);
4238
4239 t = fold_convert (itype, s0);
4240 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4241 if (POINTER_TYPE_P (type))
4242 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4243 fold_convert (sizetype, t));
4244 else
4245 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4246 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4247 false, GSI_CONTINUE_LINKING);
4248 stmt = gimple_build_assign (fd->loop.v, t);
4249 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4250
4251 t = fold_convert (itype, e0);
4252 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4253 if (POINTER_TYPE_P (type))
4254 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4255 fold_convert (sizetype, t));
4256 else
4257 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4258 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4259 false, GSI_CONTINUE_LINKING);
4260
4261 /* The code controlling the sequential loop replaces the
4262 GIMPLE_OMP_CONTINUE. */
4263 gsi = gsi_last_bb (cont_bb);
4264 stmt = gsi_stmt (gsi);
4265 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4266 vmain = gimple_omp_continue_control_use (stmt);
4267 vback = gimple_omp_continue_control_def (stmt);
4268
4269 if (POINTER_TYPE_P (type))
4270 t = fold_build2 (POINTER_PLUS_EXPR, type, vmain,
4271 fold_convert (sizetype, fd->loop.step));
4272 else
4273 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
4274 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4275 true, GSI_SAME_STMT);
4276 stmt = gimple_build_assign (vback, t);
4277 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4278
4279 t = build2 (fd->loop.cond_code, boolean_type_node, vback, e);
4280 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4281
4282 /* Remove the GIMPLE_OMP_CONTINUE statement. */
4283 gsi_remove (&gsi, true);
4284
4285 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4286 gsi = gsi_last_bb (exit_bb);
4287 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4288 force_gimple_operand_gsi (&gsi, build_omp_barrier (), false, NULL_TREE,
4289 false, GSI_SAME_STMT);
4290 gsi_remove (&gsi, true);
4291
4292 /* Connect all the blocks. */
4293 ep = make_edge (entry_bb, third_bb, EDGE_FALSE_VALUE);
4294 ep->probability = REG_BR_PROB_BASE / 4 * 3;
4295 ep = find_edge (entry_bb, second_bb);
4296 ep->flags = EDGE_TRUE_VALUE;
4297 ep->probability = REG_BR_PROB_BASE / 4;
4298 find_edge (third_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
4299 find_edge (third_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
4300
4301 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4302 find_edge (cont_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4303
4304 set_immediate_dominator (CDI_DOMINATORS, second_bb, entry_bb);
4305 set_immediate_dominator (CDI_DOMINATORS, third_bb, entry_bb);
4306 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, third_bb);
4307 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4308 recompute_dominator (CDI_DOMINATORS, body_bb));
4309 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4310 recompute_dominator (CDI_DOMINATORS, fin_bb));
4311 }
4312
4313
4314 /* A subroutine of expand_omp_for. Generate code for a parallel
4315 loop with static schedule and a specified chunk size. Given
4316 parameters:
4317
4318 for (V = N1; V cond N2; V += STEP) BODY;
4319
4320 where COND is "<" or ">", we generate pseudocode
4321
4322 if (cond is <)
4323 adj = STEP - 1;
4324 else
4325 adj = STEP + 1;
4326 if ((__typeof (V)) -1 > 0 && cond is >)
4327 n = -(adj + N2 - N1) / -STEP;
4328 else
4329 n = (adj + N2 - N1) / STEP;
4330 trip = 0;
4331 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
4332 here so that V is defined
4333 if the loop is not entered
4334 L0:
4335 s0 = (trip * nthreads + threadid) * CHUNK;
4336 e0 = min(s0 + CHUNK, n);
4337 if (s0 < n) goto L1; else goto L4;
4338 L1:
4339 V = s0 * STEP + N1;
4340 e = e0 * STEP + N1;
4341 L2:
4342 BODY;
4343 V += STEP;
4344 if (V cond e) goto L2; else goto L3;
4345 L3:
4346 trip += 1;
4347 goto L0;
4348 L4:
4349 */
4350
4351 static void
4352 expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
4353 {
4354 tree n, s0, e0, e, t;
4355 tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
4356 tree type, itype, v_main, v_back, v_extra;
4357 basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
4358 basic_block trip_update_bb, cont_bb, fin_bb;
4359 gimple_stmt_iterator si;
4360 gimple stmt;
4361 edge se;
4362
4363 itype = type = TREE_TYPE (fd->loop.v);
4364 if (POINTER_TYPE_P (type))
4365 itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
4366
4367 entry_bb = region->entry;
4368 se = split_block (entry_bb, last_stmt (entry_bb));
4369 entry_bb = se->src;
4370 iter_part_bb = se->dest;
4371 cont_bb = region->cont;
4372 gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
4373 gcc_assert (BRANCH_EDGE (iter_part_bb)->dest
4374 == FALLTHRU_EDGE (cont_bb)->dest);
4375 seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
4376 body_bb = single_succ (seq_start_bb);
4377 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4378 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4379 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4380 trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
4381 exit_bb = region->exit;
4382
4383 /* Trip and adjustment setup goes in ENTRY_BB. */
4384 si = gsi_last_bb (entry_bb);
4385 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_FOR);
4386
4387 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
4388 t = fold_convert (itype, t);
4389 nthreads = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4390 true, GSI_SAME_STMT);
4391
4392 t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
4393 t = fold_convert (itype, t);
4394 threadid = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4395 true, GSI_SAME_STMT);
4396
4397 fd->loop.n1
4398 = force_gimple_operand_gsi (&si, fold_convert (type, fd->loop.n1),
4399 true, NULL_TREE, true, GSI_SAME_STMT);
4400 fd->loop.n2
4401 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.n2),
4402 true, NULL_TREE, true, GSI_SAME_STMT);
4403 fd->loop.step
4404 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.step),
4405 true, NULL_TREE, true, GSI_SAME_STMT);
4406 fd->chunk_size
4407 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->chunk_size),
4408 true, NULL_TREE, true, GSI_SAME_STMT);
4409
4410 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4411 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4412 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4413 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4414 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4415 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4416 fold_build1 (NEGATE_EXPR, itype, t),
4417 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4418 else
4419 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4420 t = fold_convert (itype, t);
4421 n = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4422 true, GSI_SAME_STMT);
4423
4424 trip_var = create_tmp_var (itype, ".trip");
4425 if (gimple_in_ssa_p (cfun))
4426 {
4427 add_referenced_var (trip_var);
4428 trip_init = make_ssa_name (trip_var, NULL);
4429 trip_main = make_ssa_name (trip_var, NULL);
4430 trip_back = make_ssa_name (trip_var, NULL);
4431 }
4432 else
4433 {
4434 trip_init = trip_var;
4435 trip_main = trip_var;
4436 trip_back = trip_var;
4437 }
4438
4439 stmt = gimple_build_assign (trip_init, build_int_cst (itype, 0));
4440 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4441
4442 t = fold_build2 (MULT_EXPR, itype, threadid, fd->chunk_size);
4443 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4444 if (POINTER_TYPE_P (type))
4445 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4446 fold_convert (sizetype, t));
4447 else
4448 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4449 v_extra = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4450 true, GSI_SAME_STMT);
4451
4452 /* Remove the GIMPLE_OMP_FOR. */
4453 gsi_remove (&si, true);
4454
4455 /* Iteration space partitioning goes in ITER_PART_BB. */
4456 si = gsi_last_bb (iter_part_bb);
4457
4458 t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads);
4459 t = fold_build2 (PLUS_EXPR, itype, t, threadid);
4460 t = fold_build2 (MULT_EXPR, itype, t, fd->chunk_size);
4461 s0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4462 false, GSI_CONTINUE_LINKING);
4463
4464 t = fold_build2 (PLUS_EXPR, itype, s0, fd->chunk_size);
4465 t = fold_build2 (MIN_EXPR, itype, t, n);
4466 e0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4467 false, GSI_CONTINUE_LINKING);
4468
4469 t = build2 (LT_EXPR, boolean_type_node, s0, n);
4470 gsi_insert_after (&si, gimple_build_cond_empty (t), GSI_CONTINUE_LINKING);
4471
4472 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4473 si = gsi_start_bb (seq_start_bb);
4474
4475 t = fold_convert (itype, s0);
4476 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4477 if (POINTER_TYPE_P (type))
4478 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4479 fold_convert (sizetype, t));
4480 else
4481 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4482 t = force_gimple_operand_gsi (&si, t, false, NULL_TREE,
4483 false, GSI_CONTINUE_LINKING);
4484 stmt = gimple_build_assign (fd->loop.v, t);
4485 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4486
4487 t = fold_convert (itype, e0);
4488 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4489 if (POINTER_TYPE_P (type))
4490 t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4491 fold_convert (sizetype, t));
4492 else
4493 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4494 e = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4495 false, GSI_CONTINUE_LINKING);
4496
4497 /* The code controlling the sequential loop goes in CONT_BB,
4498 replacing the GIMPLE_OMP_CONTINUE. */
4499 si = gsi_last_bb (cont_bb);
4500 stmt = gsi_stmt (si);
4501 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4502 v_main = gimple_omp_continue_control_use (stmt);
4503 v_back = gimple_omp_continue_control_def (stmt);
4504
4505 if (POINTER_TYPE_P (type))
4506 t = fold_build2 (POINTER_PLUS_EXPR, type, v_main,
4507 fold_convert (sizetype, fd->loop.step));
4508 else
4509 t = fold_build2 (PLUS_EXPR, type, v_main, fd->loop.step);
4510 stmt = gimple_build_assign (v_back, t);
4511 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4512
4513 t = build2 (fd->loop.cond_code, boolean_type_node, v_back, e);
4514 gsi_insert_before (&si, gimple_build_cond_empty (t), GSI_SAME_STMT);
4515
4516 /* Remove GIMPLE_OMP_CONTINUE. */
4517 gsi_remove (&si, true);
4518
4519 /* Trip update code goes into TRIP_UPDATE_BB. */
4520 si = gsi_start_bb (trip_update_bb);
4521
4522 t = build_int_cst (itype, 1);
4523 t = build2 (PLUS_EXPR, itype, trip_main, t);
4524 stmt = gimple_build_assign (trip_back, t);
4525 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4526
4527 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
4528 si = gsi_last_bb (exit_bb);
4529 if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
4530 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4531 false, GSI_SAME_STMT);
4532 gsi_remove (&si, true);
4533
4534 /* Connect the new blocks. */
4535 find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
4536 find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4537
4538 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4539 find_edge (cont_bb, trip_update_bb)->flags = EDGE_FALSE_VALUE;
4540
4541 redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
4542
4543 if (gimple_in_ssa_p (cfun))
4544 {
4545 gimple_stmt_iterator psi;
4546 gimple phi;
4547 edge re, ene;
4548 edge_var_map_vector head;
4549 edge_var_map *vm;
4550 size_t i;
4551
4552 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
4553 remove arguments of the phi nodes in fin_bb. We need to create
4554 appropriate phi nodes in iter_part_bb instead. */
4555 se = single_pred_edge (fin_bb);
4556 re = single_succ_edge (trip_update_bb);
4557 head = redirect_edge_var_map_vector (re);
4558 ene = single_succ_edge (entry_bb);
4559
4560 psi = gsi_start_phis (fin_bb);
4561 for (i = 0; !gsi_end_p (psi) && VEC_iterate (edge_var_map, head, i, vm);
4562 gsi_next (&psi), ++i)
4563 {
4564 gimple nphi;
4565 source_location locus;
4566
4567 phi = gsi_stmt (psi);
4568 t = gimple_phi_result (phi);
4569 gcc_assert (t == redirect_edge_var_map_result (vm));
4570 nphi = create_phi_node (t, iter_part_bb);
4571 SSA_NAME_DEF_STMT (t) = nphi;
4572
4573 t = PHI_ARG_DEF_FROM_EDGE (phi, se);
4574 locus = gimple_phi_arg_location_from_edge (phi, se);
4575
4576 /* A special case -- fd->loop.v is not yet computed in
4577 iter_part_bb, we need to use v_extra instead. */
4578 if (t == fd->loop.v)
4579 t = v_extra;
4580 add_phi_arg (nphi, t, ene, locus);
4581 locus = redirect_edge_var_map_location (vm);
4582 add_phi_arg (nphi, redirect_edge_var_map_def (vm), re, locus);
4583 }
4584 gcc_assert (!gsi_end_p (psi) && i == VEC_length (edge_var_map, head));
4585 redirect_edge_var_map_clear (re);
4586 while (1)
4587 {
4588 psi = gsi_start_phis (fin_bb);
4589 if (gsi_end_p (psi))
4590 break;
4591 remove_phi_node (&psi, false);
4592 }
4593
4594 /* Make phi node for trip. */
4595 phi = create_phi_node (trip_main, iter_part_bb);
4596 SSA_NAME_DEF_STMT (trip_main) = phi;
4597 add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb),
4598 UNKNOWN_LOCATION);
4599 add_phi_arg (phi, trip_init, single_succ_edge (entry_bb),
4600 UNKNOWN_LOCATION);
4601 }
4602
4603 set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
4604 set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
4605 recompute_dominator (CDI_DOMINATORS, iter_part_bb));
4606 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4607 recompute_dominator (CDI_DOMINATORS, fin_bb));
4608 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
4609 recompute_dominator (CDI_DOMINATORS, seq_start_bb));
4610 set_immediate_dominator (CDI_DOMINATORS, body_bb,
4611 recompute_dominator (CDI_DOMINATORS, body_bb));
4612 }
4613
4614
4615 /* Expand the OpenMP loop defined by REGION. */
4616
4617 static void
4618 expand_omp_for (struct omp_region *region)
4619 {
4620 struct omp_for_data fd;
4621 struct omp_for_data_loop *loops;
4622
4623 loops
4624 = (struct omp_for_data_loop *)
4625 alloca (gimple_omp_for_collapse (last_stmt (region->entry))
4626 * sizeof (struct omp_for_data_loop));
4627 extract_omp_for_data (last_stmt (region->entry), &fd, loops);
4628 region->sched_kind = fd.sched_kind;
4629
4630 gcc_assert (EDGE_COUNT (region->entry->succs) == 2);
4631 BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4632 FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4633 if (region->cont)
4634 {
4635 gcc_assert (EDGE_COUNT (region->cont->succs) == 2);
4636 BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4637 FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4638 }
4639
4640 if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
4641 && !fd.have_ordered
4642 && fd.collapse == 1
4643 && region->cont != NULL)
4644 {
4645 if (fd.chunk_size == NULL)
4646 expand_omp_for_static_nochunk (region, &fd);
4647 else
4648 expand_omp_for_static_chunk (region, &fd);
4649 }
4650 else
4651 {
4652 int fn_index, start_ix, next_ix;
4653
4654 gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
4655 fn_index = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
4656 ? 3 : fd.sched_kind;
4657 fn_index += fd.have_ordered * 4;
4658 start_ix = BUILT_IN_GOMP_LOOP_STATIC_START + fn_index;
4659 next_ix = BUILT_IN_GOMP_LOOP_STATIC_NEXT + fn_index;
4660 if (fd.iter_type == long_long_unsigned_type_node)
4661 {
4662 start_ix += BUILT_IN_GOMP_LOOP_ULL_STATIC_START
4663 - BUILT_IN_GOMP_LOOP_STATIC_START;
4664 next_ix += BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
4665 - BUILT_IN_GOMP_LOOP_STATIC_NEXT;
4666 }
4667 expand_omp_for_generic (region, &fd, (enum built_in_function) start_ix,
4668 (enum built_in_function) next_ix);
4669 }
4670
4671 update_ssa (TODO_update_ssa_only_virtuals);
4672 }
4673
4674
4675 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
4676
4677 v = GOMP_sections_start (n);
4678 L0:
4679 switch (v)
4680 {
4681 case 0:
4682 goto L2;
4683 case 1:
4684 section 1;
4685 goto L1;
4686 case 2:
4687 ...
4688 case n:
4689 ...
4690 default:
4691 abort ();
4692 }
4693 L1:
4694 v = GOMP_sections_next ();
4695 goto L0;
4696 L2:
4697 reduction;
4698
4699 If this is a combined parallel sections, replace the call to
4700 GOMP_sections_start with call to GOMP_sections_next. */
4701
4702 static void
4703 expand_omp_sections (struct omp_region *region)
4704 {
4705 tree t, u, vin = NULL, vmain, vnext, l2;
4706 VEC (tree,heap) *label_vec;
4707 unsigned len;
4708 basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
4709 gimple_stmt_iterator si, switch_si;
4710 gimple sections_stmt, stmt, cont;
4711 edge_iterator ei;
4712 edge e;
4713 struct omp_region *inner;
4714 unsigned i, casei;
4715 bool exit_reachable = region->cont != NULL;
4716
4717 gcc_assert (exit_reachable == (region->exit != NULL));
4718 entry_bb = region->entry;
4719 l0_bb = single_succ (entry_bb);
4720 l1_bb = region->cont;
4721 l2_bb = region->exit;
4722 if (exit_reachable)
4723 {
4724 if (single_pred_p (l2_bb) && single_pred (l2_bb) == l0_bb)
4725 l2 = gimple_block_label (l2_bb);
4726 else
4727 {
4728 /* This can happen if there are reductions. */
4729 len = EDGE_COUNT (l0_bb->succs);
4730 gcc_assert (len > 0);
4731 e = EDGE_SUCC (l0_bb, len - 1);
4732 si = gsi_last_bb (e->dest);
4733 l2 = NULL_TREE;
4734 if (gsi_end_p (si)
4735 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4736 l2 = gimple_block_label (e->dest);
4737 else
4738 FOR_EACH_EDGE (e, ei, l0_bb->succs)
4739 {
4740 si = gsi_last_bb (e->dest);
4741 if (gsi_end_p (si)
4742 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4743 {
4744 l2 = gimple_block_label (e->dest);
4745 break;
4746 }
4747 }
4748 }
4749 default_bb = create_empty_bb (l1_bb->prev_bb);
4750 }
4751 else
4752 {
4753 default_bb = create_empty_bb (l0_bb);
4754 l2 = gimple_block_label (default_bb);
4755 }
4756
4757 /* We will build a switch() with enough cases for all the
4758 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
4759 and a default case to abort if something goes wrong. */
4760 len = EDGE_COUNT (l0_bb->succs);
4761
4762 /* Use VEC_quick_push on label_vec throughout, since we know the size
4763 in advance. */
4764 label_vec = VEC_alloc (tree, heap, len);
4765
4766 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
4767 GIMPLE_OMP_SECTIONS statement. */
4768 si = gsi_last_bb (entry_bb);
4769 sections_stmt = gsi_stmt (si);
4770 gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS);
4771 vin = gimple_omp_sections_control (sections_stmt);
4772 if (!is_combined_parallel (region))
4773 {
4774 /* If we are not inside a combined parallel+sections region,
4775 call GOMP_sections_start. */
4776 t = build_int_cst (unsigned_type_node,
4777 exit_reachable ? len - 1 : len);
4778 u = built_in_decls[BUILT_IN_GOMP_SECTIONS_START];
4779 stmt = gimple_build_call (u, 1, t);
4780 }
4781 else
4782 {
4783 /* Otherwise, call GOMP_sections_next. */
4784 u = built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT];
4785 stmt = gimple_build_call (u, 0);
4786 }
4787 gimple_call_set_lhs (stmt, vin);
4788 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4789 gsi_remove (&si, true);
4790
4791 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
4792 L0_BB. */
4793 switch_si = gsi_last_bb (l0_bb);
4794 gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH);
4795 if (exit_reachable)
4796 {
4797 cont = last_stmt (l1_bb);
4798 gcc_assert (gimple_code (cont) == GIMPLE_OMP_CONTINUE);
4799 vmain = gimple_omp_continue_control_use (cont);
4800 vnext = gimple_omp_continue_control_def (cont);
4801 }
4802 else
4803 {
4804 vmain = vin;
4805 vnext = NULL_TREE;
4806 }
4807
4808 i = 0;
4809 if (exit_reachable)
4810 {
4811 t = build_case_label (build_int_cst (unsigned_type_node, 0), NULL, l2);
4812 VEC_quick_push (tree, label_vec, t);
4813 i++;
4814 }
4815
4816 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
4817 for (inner = region->inner, casei = 1;
4818 inner;
4819 inner = inner->next, i++, casei++)
4820 {
4821 basic_block s_entry_bb, s_exit_bb;
4822
4823 /* Skip optional reduction region. */
4824 if (inner->type == GIMPLE_OMP_ATOMIC_LOAD)
4825 {
4826 --i;
4827 --casei;
4828 continue;
4829 }
4830
4831 s_entry_bb = inner->entry;
4832 s_exit_bb = inner->exit;
4833
4834 t = gimple_block_label (s_entry_bb);
4835 u = build_int_cst (unsigned_type_node, casei);
4836 u = build_case_label (u, NULL, t);
4837 VEC_quick_push (tree, label_vec, u);
4838
4839 si = gsi_last_bb (s_entry_bb);
4840 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION);
4841 gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si)));
4842 gsi_remove (&si, true);
4843 single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
4844
4845 if (s_exit_bb == NULL)
4846 continue;
4847
4848 si = gsi_last_bb (s_exit_bb);
4849 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4850 gsi_remove (&si, true);
4851
4852 single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
4853 }
4854
4855 /* Error handling code goes in DEFAULT_BB. */
4856 t = gimple_block_label (default_bb);
4857 u = build_case_label (NULL, NULL, t);
4858 make_edge (l0_bb, default_bb, 0);
4859
4860 stmt = gimple_build_switch_vec (vmain, u, label_vec);
4861 gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT);
4862 gsi_remove (&switch_si, true);
4863 VEC_free (tree, heap, label_vec);
4864
4865 si = gsi_start_bb (default_bb);
4866 stmt = gimple_build_call (built_in_decls[BUILT_IN_TRAP], 0);
4867 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4868
4869 if (exit_reachable)
4870 {
4871 /* Code to get the next section goes in L1_BB. */
4872 si = gsi_last_bb (l1_bb);
4873 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE);
4874
4875 stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT], 0);
4876 gimple_call_set_lhs (stmt, vnext);
4877 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4878 gsi_remove (&si, true);
4879
4880 single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
4881
4882 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
4883 si = gsi_last_bb (l2_bb);
4884 if (gimple_omp_return_nowait_p (gsi_stmt (si)))
4885 t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END_NOWAIT];
4886 else
4887 t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END];
4888 stmt = gimple_build_call (t, 0);
4889 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4890 gsi_remove (&si, true);
4891 }
4892
4893 set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
4894 }
4895
4896
4897 /* Expand code for an OpenMP single directive. We've already expanded
4898 much of the code, here we simply place the GOMP_barrier call. */
4899
4900 static void
4901 expand_omp_single (struct omp_region *region)
4902 {
4903 basic_block entry_bb, exit_bb;
4904 gimple_stmt_iterator si;
4905 bool need_barrier = false;
4906
4907 entry_bb = region->entry;
4908 exit_bb = region->exit;
4909
4910 si = gsi_last_bb (entry_bb);
4911 /* The terminal barrier at the end of a GOMP_single_copy sequence cannot
4912 be removed. We need to ensure that the thread that entered the single
4913 does not exit before the data is copied out by the other threads. */
4914 if (find_omp_clause (gimple_omp_single_clauses (gsi_stmt (si)),
4915 OMP_CLAUSE_COPYPRIVATE))
4916 need_barrier = true;
4917 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE);
4918 gsi_remove (&si, true);
4919 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4920
4921 si = gsi_last_bb (exit_bb);
4922 if (!gimple_omp_return_nowait_p (gsi_stmt (si)) || need_barrier)
4923 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4924 false, GSI_SAME_STMT);
4925 gsi_remove (&si, true);
4926 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4927 }
4928
4929
4930 /* Generic expansion for OpenMP synchronization directives: master,
4931 ordered and critical. All we need to do here is remove the entry
4932 and exit markers for REGION. */
4933
4934 static void
4935 expand_omp_synch (struct omp_region *region)
4936 {
4937 basic_block entry_bb, exit_bb;
4938 gimple_stmt_iterator si;
4939
4940 entry_bb = region->entry;
4941 exit_bb = region->exit;
4942
4943 si = gsi_last_bb (entry_bb);
4944 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE
4945 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER
4946 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED
4947 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL);
4948 gsi_remove (&si, true);
4949 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4950
4951 if (exit_bb)
4952 {
4953 si = gsi_last_bb (exit_bb);
4954 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4955 gsi_remove (&si, true);
4956 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4957 }
4958 }
4959
4960 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
4961 operation as a __sync_fetch_and_op builtin. INDEX is log2 of the
4962 size of the data type, and thus usable to find the index of the builtin
4963 decl. Returns false if the expression is not of the proper form. */
4964
4965 static bool
4966 expand_omp_atomic_fetch_op (basic_block load_bb,
4967 tree addr, tree loaded_val,
4968 tree stored_val, int index)
4969 {
4970 enum built_in_function base;
4971 tree decl, itype, call;
4972 direct_optab optab;
4973 tree rhs;
4974 basic_block store_bb = single_succ (load_bb);
4975 gimple_stmt_iterator gsi;
4976 gimple stmt;
4977 location_t loc;
4978
4979 /* We expect to find the following sequences:
4980
4981 load_bb:
4982 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
4983
4984 store_bb:
4985 val = tmp OP something; (or: something OP tmp)
4986 GIMPLE_OMP_STORE (val)
4987
4988 ???FIXME: Allow a more flexible sequence.
4989 Perhaps use data flow to pick the statements.
4990
4991 */
4992
4993 gsi = gsi_after_labels (store_bb);
4994 stmt = gsi_stmt (gsi);
4995 loc = gimple_location (stmt);
4996 if (!is_gimple_assign (stmt))
4997 return false;
4998 gsi_next (&gsi);
4999 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE)
5000 return false;
5001
5002 if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0))
5003 return false;
5004
5005 /* Check for one of the supported fetch-op operations. */
5006 switch (gimple_assign_rhs_code (stmt))
5007 {
5008 case PLUS_EXPR:
5009 case POINTER_PLUS_EXPR:
5010 base = BUILT_IN_SYNC_FETCH_AND_ADD_N;
5011 optab = sync_add_optab;
5012 break;
5013 case MINUS_EXPR:
5014 base = BUILT_IN_SYNC_FETCH_AND_SUB_N;
5015 optab = sync_add_optab;
5016 break;
5017 case BIT_AND_EXPR:
5018 base = BUILT_IN_SYNC_FETCH_AND_AND_N;
5019 optab = sync_and_optab;
5020 break;
5021 case BIT_IOR_EXPR:
5022 base = BUILT_IN_SYNC_FETCH_AND_OR_N;
5023 optab = sync_ior_optab;
5024 break;
5025 case BIT_XOR_EXPR:
5026 base = BUILT_IN_SYNC_FETCH_AND_XOR_N;
5027 optab = sync_xor_optab;
5028 break;
5029 default:
5030 return false;
5031 }
5032 /* Make sure the expression is of the proper form. */
5033 if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0))
5034 rhs = gimple_assign_rhs2 (stmt);
5035 else if (commutative_tree_code (gimple_assign_rhs_code (stmt))
5036 && operand_equal_p (gimple_assign_rhs2 (stmt), loaded_val, 0))
5037 rhs = gimple_assign_rhs1 (stmt);
5038 else
5039 return false;
5040
5041 decl = built_in_decls[base + index + 1];
5042 if (decl == NULL_TREE)
5043 return false;
5044 itype = TREE_TYPE (TREE_TYPE (decl));
5045
5046 if (direct_optab_handler (optab, TYPE_MODE (itype)) == CODE_FOR_nothing)
5047 return false;
5048
5049 gsi = gsi_last_bb (load_bb);
5050 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD);
5051 call = build_call_expr_loc (loc,
5052 decl, 2, addr,
5053 fold_convert_loc (loc, itype, rhs));
5054 call = fold_convert_loc (loc, void_type_node, call);
5055 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
5056 gsi_remove (&gsi, true);
5057
5058 gsi = gsi_last_bb (store_bb);
5059 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
5060 gsi_remove (&gsi, true);
5061 gsi = gsi_last_bb (store_bb);
5062 gsi_remove (&gsi, true);
5063
5064 if (gimple_in_ssa_p (cfun))
5065 update_ssa (TODO_update_ssa_no_phi);
5066
5067 return true;
5068 }
5069
5070 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5071
5072 oldval = *addr;
5073 repeat:
5074 newval = rhs; // with oldval replacing *addr in rhs
5075 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
5076 if (oldval != newval)
5077 goto repeat;
5078
5079 INDEX is log2 of the size of the data type, and thus usable to find the
5080 index of the builtin decl. */
5081
5082 static bool
5083 expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
5084 tree addr, tree loaded_val, tree stored_val,
5085 int index)
5086 {
5087 tree loadedi, storedi, initial, new_storedi, old_vali;
5088 tree type, itype, cmpxchg, iaddr;
5089 gimple_stmt_iterator si;
5090 basic_block loop_header = single_succ (load_bb);
5091 gimple phi, stmt;
5092 edge e;
5093
5094 cmpxchg = built_in_decls[BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_N + index + 1];
5095 if (cmpxchg == NULL_TREE)
5096 return false;
5097 type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5098 itype = TREE_TYPE (TREE_TYPE (cmpxchg));
5099
5100 if (direct_optab_handler (sync_compare_and_swap_optab, TYPE_MODE (itype))
5101 == CODE_FOR_nothing)
5102 return false;
5103
5104 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
5105 si = gsi_last_bb (load_bb);
5106 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
5107
5108 /* For floating-point values, we'll need to view-convert them to integers
5109 so that we can perform the atomic compare and swap. Simplify the
5110 following code by always setting up the "i"ntegral variables. */
5111 if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
5112 {
5113 tree iaddr_val;
5114
5115 iaddr = create_tmp_var (build_pointer_type_for_mode (itype, ptr_mode,
5116 true), NULL);
5117 iaddr_val
5118 = force_gimple_operand_gsi (&si,
5119 fold_convert (TREE_TYPE (iaddr), addr),
5120 false, NULL_TREE, true, GSI_SAME_STMT);
5121 stmt = gimple_build_assign (iaddr, iaddr_val);
5122 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5123 loadedi = create_tmp_var (itype, NULL);
5124 if (gimple_in_ssa_p (cfun))
5125 {
5126 add_referenced_var (iaddr);
5127 add_referenced_var (loadedi);
5128 loadedi = make_ssa_name (loadedi, NULL);
5129 }
5130 }
5131 else
5132 {
5133 iaddr = addr;
5134 loadedi = loaded_val;
5135 }
5136
5137 initial
5138 = force_gimple_operand_gsi (&si,
5139 build2 (MEM_REF, TREE_TYPE (TREE_TYPE (iaddr)),
5140 iaddr,
5141 build_int_cst (TREE_TYPE (iaddr), 0)),
5142 true, NULL_TREE, true, GSI_SAME_STMT);
5143
5144 /* Move the value to the LOADEDI temporary. */
5145 if (gimple_in_ssa_p (cfun))
5146 {
5147 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header)));
5148 phi = create_phi_node (loadedi, loop_header);
5149 SSA_NAME_DEF_STMT (loadedi) = phi;
5150 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
5151 initial);
5152 }
5153 else
5154 gsi_insert_before (&si,
5155 gimple_build_assign (loadedi, initial),
5156 GSI_SAME_STMT);
5157 if (loadedi != loaded_val)
5158 {
5159 gimple_stmt_iterator gsi2;
5160 tree x;
5161
5162 x = build1 (VIEW_CONVERT_EXPR, type, loadedi);
5163 gsi2 = gsi_start_bb (loop_header);
5164 if (gimple_in_ssa_p (cfun))
5165 {
5166 gimple stmt;
5167 x = force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5168 true, GSI_SAME_STMT);
5169 stmt = gimple_build_assign (loaded_val, x);
5170 gsi_insert_before (&gsi2, stmt, GSI_SAME_STMT);
5171 }
5172 else
5173 {
5174 x = build2 (MODIFY_EXPR, TREE_TYPE (loaded_val), loaded_val, x);
5175 force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5176 true, GSI_SAME_STMT);
5177 }
5178 }
5179 gsi_remove (&si, true);
5180
5181 si = gsi_last_bb (store_bb);
5182 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
5183
5184 if (iaddr == addr)
5185 storedi = stored_val;
5186 else
5187 storedi =
5188 force_gimple_operand_gsi (&si,
5189 build1 (VIEW_CONVERT_EXPR, itype,
5190 stored_val), true, NULL_TREE, true,
5191 GSI_SAME_STMT);
5192
5193 /* Build the compare&swap statement. */
5194 new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
5195 new_storedi = force_gimple_operand_gsi (&si,
5196 fold_convert (TREE_TYPE (loadedi),
5197 new_storedi),
5198 true, NULL_TREE,
5199 true, GSI_SAME_STMT);
5200
5201 if (gimple_in_ssa_p (cfun))
5202 old_vali = loadedi;
5203 else
5204 {
5205 old_vali = create_tmp_var (TREE_TYPE (loadedi), NULL);
5206 if (gimple_in_ssa_p (cfun))
5207 add_referenced_var (old_vali);
5208 stmt = gimple_build_assign (old_vali, loadedi);
5209 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5210
5211 stmt = gimple_build_assign (loadedi, new_storedi);
5212 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5213 }
5214
5215 /* Note that we always perform the comparison as an integer, even for
5216 floating point. This allows the atomic operation to properly
5217 succeed even with NaNs and -0.0. */
5218 stmt = gimple_build_cond_empty
5219 (build2 (NE_EXPR, boolean_type_node,
5220 new_storedi, old_vali));
5221 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5222
5223 /* Update cfg. */
5224 e = single_succ_edge (store_bb);
5225 e->flags &= ~EDGE_FALLTHRU;
5226 e->flags |= EDGE_FALSE_VALUE;
5227
5228 e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
5229
5230 /* Copy the new value to loadedi (we already did that before the condition
5231 if we are not in SSA). */
5232 if (gimple_in_ssa_p (cfun))
5233 {
5234 phi = gimple_seq_first_stmt (phi_nodes (loop_header));
5235 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi);
5236 }
5237
5238 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
5239 gsi_remove (&si, true);
5240
5241 if (gimple_in_ssa_p (cfun))
5242 update_ssa (TODO_update_ssa_no_phi);
5243
5244 return true;
5245 }
5246
5247 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
5248
5249 GOMP_atomic_start ();
5250 *addr = rhs;
5251 GOMP_atomic_end ();
5252
5253 The result is not globally atomic, but works so long as all parallel
5254 references are within #pragma omp atomic directives. According to
5255 responses received from omp@openmp.org, appears to be within spec.
5256 Which makes sense, since that's how several other compilers handle
5257 this situation as well.
5258 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
5259 expanding. STORED_VAL is the operand of the matching
5260 GIMPLE_OMP_ATOMIC_STORE.
5261
5262 We replace
5263 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
5264 loaded_val = *addr;
5265
5266 and replace
5267 GIMPLE_OMP_ATOMIC_ATORE (stored_val) with
5268 *addr = stored_val;
5269 */
5270
5271 static bool
5272 expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
5273 tree addr, tree loaded_val, tree stored_val)
5274 {
5275 gimple_stmt_iterator si;
5276 gimple stmt;
5277 tree t;
5278
5279 si = gsi_last_bb (load_bb);
5280 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
5281
5282 t = built_in_decls[BUILT_IN_GOMP_ATOMIC_START];
5283 t = build_call_expr (t, 0);
5284 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5285
5286 stmt = gimple_build_assign (loaded_val, build_simple_mem_ref (addr));
5287 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5288 gsi_remove (&si, true);
5289
5290 si = gsi_last_bb (store_bb);
5291 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
5292
5293 stmt = gimple_build_assign (build_simple_mem_ref (unshare_expr (addr)),
5294 stored_val);
5295 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5296
5297 t = built_in_decls[BUILT_IN_GOMP_ATOMIC_END];
5298 t = build_call_expr (t, 0);
5299 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5300 gsi_remove (&si, true);
5301
5302 if (gimple_in_ssa_p (cfun))
5303 update_ssa (TODO_update_ssa_no_phi);
5304 return true;
5305 }
5306
5307 /* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
5308 using expand_omp_atomic_fetch_op. If it failed, we try to
5309 call expand_omp_atomic_pipeline, and if it fails too, the
5310 ultimate fallback is wrapping the operation in a mutex
5311 (expand_omp_atomic_mutex). REGION is the atomic region built
5312 by build_omp_regions_1(). */
5313
5314 static void
5315 expand_omp_atomic (struct omp_region *region)
5316 {
5317 basic_block load_bb = region->entry, store_bb = region->exit;
5318 gimple load = last_stmt (load_bb), store = last_stmt (store_bb);
5319 tree loaded_val = gimple_omp_atomic_load_lhs (load);
5320 tree addr = gimple_omp_atomic_load_rhs (load);
5321 tree stored_val = gimple_omp_atomic_store_val (store);
5322 tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5323 HOST_WIDE_INT index;
5324
5325 /* Make sure the type is one of the supported sizes. */
5326 index = tree_low_cst (TYPE_SIZE_UNIT (type), 1);
5327 index = exact_log2 (index);
5328 if (index >= 0 && index <= 4)
5329 {
5330 unsigned int align = TYPE_ALIGN_UNIT (type);
5331
5332 /* __sync builtins require strict data alignment. */
5333 if (exact_log2 (align) >= index)
5334 {
5335 /* When possible, use specialized atomic update functions. */
5336 if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
5337 && store_bb == single_succ (load_bb))
5338 {
5339 if (expand_omp_atomic_fetch_op (load_bb, addr,
5340 loaded_val, stored_val, index))
5341 return;
5342 }
5343
5344 /* If we don't have specialized __sync builtins, try and implement
5345 as a compare and swap loop. */
5346 if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
5347 loaded_val, stored_val, index))
5348 return;
5349 }
5350 }
5351
5352 /* The ultimate fallback is wrapping the operation in a mutex. */
5353 expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
5354 }
5355
5356
5357 /* Expand the parallel region tree rooted at REGION. Expansion
5358 proceeds in depth-first order. Innermost regions are expanded
5359 first. This way, parallel regions that require a new function to
5360 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
5361 internal dependencies in their body. */
5362
5363 static void
5364 expand_omp (struct omp_region *region)
5365 {
5366 while (region)
5367 {
5368 location_t saved_location;
5369
5370 /* First, determine whether this is a combined parallel+workshare
5371 region. */
5372 if (region->type == GIMPLE_OMP_PARALLEL)
5373 determine_parallel_type (region);
5374
5375 if (region->inner)
5376 expand_omp (region->inner);
5377
5378 saved_location = input_location;
5379 if (gimple_has_location (last_stmt (region->entry)))
5380 input_location = gimple_location (last_stmt (region->entry));
5381
5382 switch (region->type)
5383 {
5384 case GIMPLE_OMP_PARALLEL:
5385 case GIMPLE_OMP_TASK:
5386 expand_omp_taskreg (region);
5387 break;
5388
5389 case GIMPLE_OMP_FOR:
5390 expand_omp_for (region);
5391 break;
5392
5393 case GIMPLE_OMP_SECTIONS:
5394 expand_omp_sections (region);
5395 break;
5396
5397 case GIMPLE_OMP_SECTION:
5398 /* Individual omp sections are handled together with their
5399 parent GIMPLE_OMP_SECTIONS region. */
5400 break;
5401
5402 case GIMPLE_OMP_SINGLE:
5403 expand_omp_single (region);
5404 break;
5405
5406 case GIMPLE_OMP_MASTER:
5407 case GIMPLE_OMP_ORDERED:
5408 case GIMPLE_OMP_CRITICAL:
5409 expand_omp_synch (region);
5410 break;
5411
5412 case GIMPLE_OMP_ATOMIC_LOAD:
5413 expand_omp_atomic (region);
5414 break;
5415
5416 default:
5417 gcc_unreachable ();
5418 }
5419
5420 input_location = saved_location;
5421 region = region->next;
5422 }
5423 }
5424
5425
5426 /* Helper for build_omp_regions. Scan the dominator tree starting at
5427 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
5428 true, the function ends once a single tree is built (otherwise, whole
5429 forest of OMP constructs may be built). */
5430
5431 static void
5432 build_omp_regions_1 (basic_block bb, struct omp_region *parent,
5433 bool single_tree)
5434 {
5435 gimple_stmt_iterator gsi;
5436 gimple stmt;
5437 basic_block son;
5438
5439 gsi = gsi_last_bb (bb);
5440 if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi)))
5441 {
5442 struct omp_region *region;
5443 enum gimple_code code;
5444
5445 stmt = gsi_stmt (gsi);
5446 code = gimple_code (stmt);
5447 if (code == GIMPLE_OMP_RETURN)
5448 {
5449 /* STMT is the return point out of region PARENT. Mark it
5450 as the exit point and make PARENT the immediately
5451 enclosing region. */
5452 gcc_assert (parent);
5453 region = parent;
5454 region->exit = bb;
5455 parent = parent->outer;
5456 }
5457 else if (code == GIMPLE_OMP_ATOMIC_STORE)
5458 {
5459 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
5460 GIMPLE_OMP_RETURN, but matches with
5461 GIMPLE_OMP_ATOMIC_LOAD. */
5462 gcc_assert (parent);
5463 gcc_assert (parent->type == GIMPLE_OMP_ATOMIC_LOAD);
5464 region = parent;
5465 region->exit = bb;
5466 parent = parent->outer;
5467 }
5468
5469 else if (code == GIMPLE_OMP_CONTINUE)
5470 {
5471 gcc_assert (parent);
5472 parent->cont = bb;
5473 }
5474 else if (code == GIMPLE_OMP_SECTIONS_SWITCH)
5475 {
5476 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
5477 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
5478 ;
5479 }
5480 else
5481 {
5482 /* Otherwise, this directive becomes the parent for a new
5483 region. */
5484 region = new_omp_region (bb, code, parent);
5485 parent = region;
5486 }
5487 }
5488
5489 if (single_tree && !parent)
5490 return;
5491
5492 for (son = first_dom_son (CDI_DOMINATORS, bb);
5493 son;
5494 son = next_dom_son (CDI_DOMINATORS, son))
5495 build_omp_regions_1 (son, parent, single_tree);
5496 }
5497
5498 /* Builds the tree of OMP regions rooted at ROOT, storing it to
5499 root_omp_region. */
5500
5501 static void
5502 build_omp_regions_root (basic_block root)
5503 {
5504 gcc_assert (root_omp_region == NULL);
5505 build_omp_regions_1 (root, NULL, true);
5506 gcc_assert (root_omp_region != NULL);
5507 }
5508
5509 /* Expands omp construct (and its subconstructs) starting in HEAD. */
5510
5511 void
5512 omp_expand_local (basic_block head)
5513 {
5514 build_omp_regions_root (head);
5515 if (dump_file && (dump_flags & TDF_DETAILS))
5516 {
5517 fprintf (dump_file, "\nOMP region tree\n\n");
5518 dump_omp_region (dump_file, root_omp_region, 0);
5519 fprintf (dump_file, "\n");
5520 }
5521
5522 remove_exit_barriers (root_omp_region);
5523 expand_omp (root_omp_region);
5524
5525 free_omp_regions ();
5526 }
5527
5528 /* Scan the CFG and build a tree of OMP regions. Return the root of
5529 the OMP region tree. */
5530
5531 static void
5532 build_omp_regions (void)
5533 {
5534 gcc_assert (root_omp_region == NULL);
5535 calculate_dominance_info (CDI_DOMINATORS);
5536 build_omp_regions_1 (ENTRY_BLOCK_PTR, NULL, false);
5537 }
5538
5539 /* Main entry point for expanding OMP-GIMPLE into runtime calls. */
5540
5541 static unsigned int
5542 execute_expand_omp (void)
5543 {
5544 build_omp_regions ();
5545
5546 if (!root_omp_region)
5547 return 0;
5548
5549 if (dump_file)
5550 {
5551 fprintf (dump_file, "\nOMP region tree\n\n");
5552 dump_omp_region (dump_file, root_omp_region, 0);
5553 fprintf (dump_file, "\n");
5554 }
5555
5556 remove_exit_barriers (root_omp_region);
5557
5558 expand_omp (root_omp_region);
5559
5560 cleanup_tree_cfg ();
5561
5562 free_omp_regions ();
5563
5564 return 0;
5565 }
5566
5567 /* OMP expansion -- the default pass, run before creation of SSA form. */
5568
5569 static bool
5570 gate_expand_omp (void)
5571 {
5572 return (flag_openmp != 0 && !seen_error ());
5573 }
5574
5575 struct gimple_opt_pass pass_expand_omp =
5576 {
5577 {
5578 GIMPLE_PASS,
5579 "ompexp", /* name */
5580 gate_expand_omp, /* gate */
5581 execute_expand_omp, /* execute */
5582 NULL, /* sub */
5583 NULL, /* next */
5584 0, /* static_pass_number */
5585 TV_NONE, /* tv_id */
5586 PROP_gimple_any, /* properties_required */
5587 0, /* properties_provided */
5588 0, /* properties_destroyed */
5589 0, /* todo_flags_start */
5590 0 /* todo_flags_finish */
5591 }
5592 };
5593 \f
5594 /* Routines to lower OpenMP directives into OMP-GIMPLE. */
5595
5596 /* Lower the OpenMP sections directive in the current statement in GSI_P.
5597 CTX is the enclosing OMP context for the current statement. */
5598
5599 static void
5600 lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5601 {
5602 tree block, control;
5603 gimple_stmt_iterator tgsi;
5604 unsigned i, len;
5605 gimple stmt, new_stmt, bind, t;
5606 gimple_seq ilist, dlist, olist, new_body, body;
5607 struct gimplify_ctx gctx;
5608
5609 stmt = gsi_stmt (*gsi_p);
5610
5611 push_gimplify_context (&gctx);
5612
5613 dlist = NULL;
5614 ilist = NULL;
5615 lower_rec_input_clauses (gimple_omp_sections_clauses (stmt),
5616 &ilist, &dlist, ctx);
5617
5618 tgsi = gsi_start (gimple_omp_body (stmt));
5619 for (len = 0; !gsi_end_p (tgsi); len++, gsi_next (&tgsi))
5620 continue;
5621
5622 tgsi = gsi_start (gimple_omp_body (stmt));
5623 body = NULL;
5624 for (i = 0; i < len; i++, gsi_next (&tgsi))
5625 {
5626 omp_context *sctx;
5627 gimple sec_start;
5628
5629 sec_start = gsi_stmt (tgsi);
5630 sctx = maybe_lookup_ctx (sec_start);
5631 gcc_assert (sctx);
5632
5633 gimple_seq_add_stmt (&body, sec_start);
5634
5635 lower_omp (gimple_omp_body (sec_start), sctx);
5636 gimple_seq_add_seq (&body, gimple_omp_body (sec_start));
5637 gimple_omp_set_body (sec_start, NULL);
5638
5639 if (i == len - 1)
5640 {
5641 gimple_seq l = NULL;
5642 lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt), NULL,
5643 &l, ctx);
5644 gimple_seq_add_seq (&body, l);
5645 gimple_omp_section_set_last (sec_start);
5646 }
5647
5648 gimple_seq_add_stmt (&body, gimple_build_omp_return (false));
5649 }
5650
5651 block = make_node (BLOCK);
5652 bind = gimple_build_bind (NULL, body, block);
5653
5654 olist = NULL;
5655 lower_reduction_clauses (gimple_omp_sections_clauses (stmt), &olist, ctx);
5656
5657 block = make_node (BLOCK);
5658 new_stmt = gimple_build_bind (NULL, NULL, block);
5659
5660 pop_gimplify_context (new_stmt);
5661 gimple_bind_append_vars (new_stmt, ctx->block_vars);
5662 BLOCK_VARS (block) = gimple_bind_vars (bind);
5663 if (BLOCK_VARS (block))
5664 TREE_USED (block) = 1;
5665
5666 new_body = NULL;
5667 gimple_seq_add_seq (&new_body, ilist);
5668 gimple_seq_add_stmt (&new_body, stmt);
5669 gimple_seq_add_stmt (&new_body, gimple_build_omp_sections_switch ());
5670 gimple_seq_add_stmt (&new_body, bind);
5671
5672 control = create_tmp_var (unsigned_type_node, ".section");
5673 t = gimple_build_omp_continue (control, control);
5674 gimple_omp_sections_set_control (stmt, control);
5675 gimple_seq_add_stmt (&new_body, t);
5676
5677 gimple_seq_add_seq (&new_body, olist);
5678 gimple_seq_add_seq (&new_body, dlist);
5679
5680 new_body = maybe_catch_exception (new_body);
5681
5682 t = gimple_build_omp_return
5683 (!!find_omp_clause (gimple_omp_sections_clauses (stmt),
5684 OMP_CLAUSE_NOWAIT));
5685 gimple_seq_add_stmt (&new_body, t);
5686
5687 gimple_bind_set_body (new_stmt, new_body);
5688 gimple_omp_set_body (stmt, NULL);
5689
5690 gsi_replace (gsi_p, new_stmt, true);
5691 }
5692
5693
5694 /* A subroutine of lower_omp_single. Expand the simple form of
5695 a GIMPLE_OMP_SINGLE, without a copyprivate clause:
5696
5697 if (GOMP_single_start ())
5698 BODY;
5699 [ GOMP_barrier (); ] -> unless 'nowait' is present.
5700
5701 FIXME. It may be better to delay expanding the logic of this until
5702 pass_expand_omp. The expanded logic may make the job more difficult
5703 to a synchronization analysis pass. */
5704
5705 static void
5706 lower_omp_single_simple (gimple single_stmt, gimple_seq *pre_p)
5707 {
5708 location_t loc = gimple_location (single_stmt);
5709 tree tlabel = create_artificial_label (loc);
5710 tree flabel = create_artificial_label (loc);
5711 gimple call, cond;
5712 tree lhs, decl;
5713
5714 decl = built_in_decls[BUILT_IN_GOMP_SINGLE_START];
5715 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (decl)), NULL);
5716 call = gimple_build_call (decl, 0);
5717 gimple_call_set_lhs (call, lhs);
5718 gimple_seq_add_stmt (pre_p, call);
5719
5720 cond = gimple_build_cond (EQ_EXPR, lhs,
5721 fold_convert_loc (loc, TREE_TYPE (lhs),
5722 boolean_true_node),
5723 tlabel, flabel);
5724 gimple_seq_add_stmt (pre_p, cond);
5725 gimple_seq_add_stmt (pre_p, gimple_build_label (tlabel));
5726 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5727 gimple_seq_add_stmt (pre_p, gimple_build_label (flabel));
5728 }
5729
5730
5731 /* A subroutine of lower_omp_single. Expand the simple form of
5732 a GIMPLE_OMP_SINGLE, with a copyprivate clause:
5733
5734 #pragma omp single copyprivate (a, b, c)
5735
5736 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
5737
5738 {
5739 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
5740 {
5741 BODY;
5742 copyout.a = a;
5743 copyout.b = b;
5744 copyout.c = c;
5745 GOMP_single_copy_end (&copyout);
5746 }
5747 else
5748 {
5749 a = copyout_p->a;
5750 b = copyout_p->b;
5751 c = copyout_p->c;
5752 }
5753 GOMP_barrier ();
5754 }
5755
5756 FIXME. It may be better to delay expanding the logic of this until
5757 pass_expand_omp. The expanded logic may make the job more difficult
5758 to a synchronization analysis pass. */
5759
5760 static void
5761 lower_omp_single_copy (gimple single_stmt, gimple_seq *pre_p, omp_context *ctx)
5762 {
5763 tree ptr_type, t, l0, l1, l2;
5764 gimple_seq copyin_seq;
5765 location_t loc = gimple_location (single_stmt);
5766
5767 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
5768
5769 ptr_type = build_pointer_type (ctx->record_type);
5770 ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
5771
5772 l0 = create_artificial_label (loc);
5773 l1 = create_artificial_label (loc);
5774 l2 = create_artificial_label (loc);
5775
5776 t = build_call_expr_loc (loc, built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_START], 0);
5777 t = fold_convert_loc (loc, ptr_type, t);
5778 gimplify_assign (ctx->receiver_decl, t, pre_p);
5779
5780 t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
5781 build_int_cst (ptr_type, 0));
5782 t = build3 (COND_EXPR, void_type_node, t,
5783 build_and_jump (&l0), build_and_jump (&l1));
5784 gimplify_and_add (t, pre_p);
5785
5786 gimple_seq_add_stmt (pre_p, gimple_build_label (l0));
5787
5788 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5789
5790 copyin_seq = NULL;
5791 lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt), pre_p,
5792 &copyin_seq, ctx);
5793
5794 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
5795 t = build_call_expr_loc (loc, built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_END],
5796 1, t);
5797 gimplify_and_add (t, pre_p);
5798
5799 t = build_and_jump (&l2);
5800 gimplify_and_add (t, pre_p);
5801
5802 gimple_seq_add_stmt (pre_p, gimple_build_label (l1));
5803
5804 gimple_seq_add_seq (pre_p, copyin_seq);
5805
5806 gimple_seq_add_stmt (pre_p, gimple_build_label (l2));
5807 }
5808
5809
5810 /* Expand code for an OpenMP single directive. */
5811
5812 static void
5813 lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5814 {
5815 tree block;
5816 gimple t, bind, single_stmt = gsi_stmt (*gsi_p);
5817 gimple_seq bind_body, dlist;
5818 struct gimplify_ctx gctx;
5819
5820 push_gimplify_context (&gctx);
5821
5822 bind_body = NULL;
5823 lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt),
5824 &bind_body, &dlist, ctx);
5825 lower_omp (gimple_omp_body (single_stmt), ctx);
5826
5827 gimple_seq_add_stmt (&bind_body, single_stmt);
5828
5829 if (ctx->record_type)
5830 lower_omp_single_copy (single_stmt, &bind_body, ctx);
5831 else
5832 lower_omp_single_simple (single_stmt, &bind_body);
5833
5834 gimple_omp_set_body (single_stmt, NULL);
5835
5836 gimple_seq_add_seq (&bind_body, dlist);
5837
5838 bind_body = maybe_catch_exception (bind_body);
5839
5840 t = gimple_build_omp_return
5841 (!!find_omp_clause (gimple_omp_single_clauses (single_stmt),
5842 OMP_CLAUSE_NOWAIT));
5843 gimple_seq_add_stmt (&bind_body, t);
5844
5845 block = make_node (BLOCK);
5846 bind = gimple_build_bind (NULL, bind_body, block);
5847
5848 pop_gimplify_context (bind);
5849
5850 gimple_bind_append_vars (bind, ctx->block_vars);
5851 BLOCK_VARS (block) = ctx->block_vars;
5852 gsi_replace (gsi_p, bind, true);
5853 if (BLOCK_VARS (block))
5854 TREE_USED (block) = 1;
5855 }
5856
5857
5858 /* Expand code for an OpenMP master directive. */
5859
5860 static void
5861 lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5862 {
5863 tree block, lab = NULL, x;
5864 gimple stmt = gsi_stmt (*gsi_p), bind;
5865 location_t loc = gimple_location (stmt);
5866 gimple_seq tseq;
5867 struct gimplify_ctx gctx;
5868
5869 push_gimplify_context (&gctx);
5870
5871 block = make_node (BLOCK);
5872 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
5873 block);
5874
5875 x = build_call_expr_loc (loc, built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
5876 x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
5877 x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
5878 tseq = NULL;
5879 gimplify_and_add (x, &tseq);
5880 gimple_bind_add_seq (bind, tseq);
5881
5882 lower_omp (gimple_omp_body (stmt), ctx);
5883 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5884 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5885 gimple_omp_set_body (stmt, NULL);
5886
5887 gimple_bind_add_stmt (bind, gimple_build_label (lab));
5888
5889 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
5890
5891 pop_gimplify_context (bind);
5892
5893 gimple_bind_append_vars (bind, ctx->block_vars);
5894 BLOCK_VARS (block) = ctx->block_vars;
5895 gsi_replace (gsi_p, bind, true);
5896 }
5897
5898
5899 /* Expand code for an OpenMP ordered directive. */
5900
5901 static void
5902 lower_omp_ordered (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5903 {
5904 tree block;
5905 gimple stmt = gsi_stmt (*gsi_p), bind, x;
5906 struct gimplify_ctx gctx;
5907
5908 push_gimplify_context (&gctx);
5909
5910 block = make_node (BLOCK);
5911 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
5912 block);
5913
5914 x = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ORDERED_START], 0);
5915 gimple_bind_add_stmt (bind, x);
5916
5917 lower_omp (gimple_omp_body (stmt), ctx);
5918 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5919 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5920 gimple_omp_set_body (stmt, NULL);
5921
5922 x = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ORDERED_END], 0);
5923 gimple_bind_add_stmt (bind, x);
5924
5925 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
5926
5927 pop_gimplify_context (bind);
5928
5929 gimple_bind_append_vars (bind, ctx->block_vars);
5930 BLOCK_VARS (block) = gimple_bind_vars (bind);
5931 gsi_replace (gsi_p, bind, true);
5932 }
5933
5934
5935 /* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
5936 substitution of a couple of function calls. But in the NAMED case,
5937 requires that languages coordinate a symbol name. It is therefore
5938 best put here in common code. */
5939
5940 static GTY((param1_is (tree), param2_is (tree)))
5941 splay_tree critical_name_mutexes;
5942
5943 static void
5944 lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5945 {
5946 tree block;
5947 tree name, lock, unlock;
5948 gimple stmt = gsi_stmt (*gsi_p), bind;
5949 location_t loc = gimple_location (stmt);
5950 gimple_seq tbody;
5951 struct gimplify_ctx gctx;
5952
5953 name = gimple_omp_critical_name (stmt);
5954 if (name)
5955 {
5956 tree decl;
5957 splay_tree_node n;
5958
5959 if (!critical_name_mutexes)
5960 critical_name_mutexes
5961 = splay_tree_new_ggc (splay_tree_compare_pointers,
5962 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_s,
5963 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_node_s);
5964
5965 n = splay_tree_lookup (critical_name_mutexes, (splay_tree_key) name);
5966 if (n == NULL)
5967 {
5968 char *new_str;
5969
5970 decl = create_tmp_var_raw (ptr_type_node, NULL);
5971
5972 new_str = ACONCAT ((".gomp_critical_user_",
5973 IDENTIFIER_POINTER (name), NULL));
5974 DECL_NAME (decl) = get_identifier (new_str);
5975 TREE_PUBLIC (decl) = 1;
5976 TREE_STATIC (decl) = 1;
5977 DECL_COMMON (decl) = 1;
5978 DECL_ARTIFICIAL (decl) = 1;
5979 DECL_IGNORED_P (decl) = 1;
5980 varpool_finalize_decl (decl);
5981
5982 splay_tree_insert (critical_name_mutexes, (splay_tree_key) name,
5983 (splay_tree_value) decl);
5984 }
5985 else
5986 decl = (tree) n->value;
5987
5988 lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_START];
5989 lock = build_call_expr_loc (loc, lock, 1, build_fold_addr_expr_loc (loc, decl));
5990
5991 unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_END];
5992 unlock = build_call_expr_loc (loc, unlock, 1,
5993 build_fold_addr_expr_loc (loc, decl));
5994 }
5995 else
5996 {
5997 lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_START];
5998 lock = build_call_expr_loc (loc, lock, 0);
5999
6000 unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_END];
6001 unlock = build_call_expr_loc (loc, unlock, 0);
6002 }
6003
6004 push_gimplify_context (&gctx);
6005
6006 block = make_node (BLOCK);
6007 bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt), block);
6008
6009 tbody = gimple_bind_body (bind);
6010 gimplify_and_add (lock, &tbody);
6011 gimple_bind_set_body (bind, tbody);
6012
6013 lower_omp (gimple_omp_body (stmt), ctx);
6014 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
6015 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
6016 gimple_omp_set_body (stmt, NULL);
6017
6018 tbody = gimple_bind_body (bind);
6019 gimplify_and_add (unlock, &tbody);
6020 gimple_bind_set_body (bind, tbody);
6021
6022 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
6023
6024 pop_gimplify_context (bind);
6025 gimple_bind_append_vars (bind, ctx->block_vars);
6026 BLOCK_VARS (block) = gimple_bind_vars (bind);
6027 gsi_replace (gsi_p, bind, true);
6028 }
6029
6030
6031 /* A subroutine of lower_omp_for. Generate code to emit the predicate
6032 for a lastprivate clause. Given a loop control predicate of (V
6033 cond N2), we gate the clause on (!(V cond N2)). The lowered form
6034 is appended to *DLIST, iterator initialization is appended to
6035 *BODY_P. */
6036
6037 static void
6038 lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p,
6039 gimple_seq *dlist, struct omp_context *ctx)
6040 {
6041 tree clauses, cond, vinit;
6042 enum tree_code cond_code;
6043 gimple_seq stmts;
6044
6045 cond_code = fd->loop.cond_code;
6046 cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
6047
6048 /* When possible, use a strict equality expression. This can let VRP
6049 type optimizations deduce the value and remove a copy. */
6050 if (host_integerp (fd->loop.step, 0))
6051 {
6052 HOST_WIDE_INT step = TREE_INT_CST_LOW (fd->loop.step);
6053 if (step == 1 || step == -1)
6054 cond_code = EQ_EXPR;
6055 }
6056
6057 cond = build2 (cond_code, boolean_type_node, fd->loop.v, fd->loop.n2);
6058
6059 clauses = gimple_omp_for_clauses (fd->for_stmt);
6060 stmts = NULL;
6061 lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
6062 if (!gimple_seq_empty_p (stmts))
6063 {
6064 gimple_seq_add_seq (&stmts, *dlist);
6065 *dlist = stmts;
6066
6067 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
6068 vinit = fd->loop.n1;
6069 if (cond_code == EQ_EXPR
6070 && host_integerp (fd->loop.n2, 0)
6071 && ! integer_zerop (fd->loop.n2))
6072 vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0);
6073
6074 /* Initialize the iterator variable, so that threads that don't execute
6075 any iterations don't execute the lastprivate clauses by accident. */
6076 gimplify_assign (fd->loop.v, vinit, body_p);
6077 }
6078 }
6079
6080
6081 /* Lower code for an OpenMP loop directive. */
6082
6083 static void
6084 lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6085 {
6086 tree *rhs_p, block;
6087 struct omp_for_data fd;
6088 gimple stmt = gsi_stmt (*gsi_p), new_stmt;
6089 gimple_seq omp_for_body, body, dlist;
6090 size_t i;
6091 struct gimplify_ctx gctx;
6092
6093 push_gimplify_context (&gctx);
6094
6095 lower_omp (gimple_omp_for_pre_body (stmt), ctx);
6096 lower_omp (gimple_omp_body (stmt), ctx);
6097
6098 block = make_node (BLOCK);
6099 new_stmt = gimple_build_bind (NULL, NULL, block);
6100
6101 /* Move declaration of temporaries in the loop body before we make
6102 it go away. */
6103 omp_for_body = gimple_omp_body (stmt);
6104 if (!gimple_seq_empty_p (omp_for_body)
6105 && gimple_code (gimple_seq_first_stmt (omp_for_body)) == GIMPLE_BIND)
6106 {
6107 tree vars = gimple_bind_vars (gimple_seq_first_stmt (omp_for_body));
6108 gimple_bind_append_vars (new_stmt, vars);
6109 }
6110
6111 /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
6112 dlist = NULL;
6113 body = NULL;
6114 lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx);
6115 gimple_seq_add_seq (&body, gimple_omp_for_pre_body (stmt));
6116
6117 /* Lower the header expressions. At this point, we can assume that
6118 the header is of the form:
6119
6120 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
6121
6122 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
6123 using the .omp_data_s mapping, if needed. */
6124 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
6125 {
6126 rhs_p = gimple_omp_for_initial_ptr (stmt, i);
6127 if (!is_gimple_min_invariant (*rhs_p))
6128 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6129
6130 rhs_p = gimple_omp_for_final_ptr (stmt, i);
6131 if (!is_gimple_min_invariant (*rhs_p))
6132 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6133
6134 rhs_p = &TREE_OPERAND (gimple_omp_for_incr (stmt, i), 1);
6135 if (!is_gimple_min_invariant (*rhs_p))
6136 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6137 }
6138
6139 /* Once lowered, extract the bounds and clauses. */
6140 extract_omp_for_data (stmt, &fd, NULL);
6141
6142 lower_omp_for_lastprivate (&fd, &body, &dlist, ctx);
6143
6144 gimple_seq_add_stmt (&body, stmt);
6145 gimple_seq_add_seq (&body, gimple_omp_body (stmt));
6146
6147 gimple_seq_add_stmt (&body, gimple_build_omp_continue (fd.loop.v,
6148 fd.loop.v));
6149
6150 /* After the loop, add exit clauses. */
6151 lower_reduction_clauses (gimple_omp_for_clauses (stmt), &body, ctx);
6152 gimple_seq_add_seq (&body, dlist);
6153
6154 body = maybe_catch_exception (body);
6155
6156 /* Region exit marker goes at the end of the loop body. */
6157 gimple_seq_add_stmt (&body, gimple_build_omp_return (fd.have_nowait));
6158
6159 pop_gimplify_context (new_stmt);
6160
6161 gimple_bind_append_vars (new_stmt, ctx->block_vars);
6162 BLOCK_VARS (block) = gimple_bind_vars (new_stmt);
6163 if (BLOCK_VARS (block))
6164 TREE_USED (block) = 1;
6165
6166 gimple_bind_set_body (new_stmt, body);
6167 gimple_omp_set_body (stmt, NULL);
6168 gimple_omp_for_set_pre_body (stmt, NULL);
6169 gsi_replace (gsi_p, new_stmt, true);
6170 }
6171
6172 /* Callback for walk_stmts. Check if the current statement only contains
6173 GIMPLE_OMP_FOR or GIMPLE_OMP_PARALLEL. */
6174
6175 static tree
6176 check_combined_parallel (gimple_stmt_iterator *gsi_p,
6177 bool *handled_ops_p,
6178 struct walk_stmt_info *wi)
6179 {
6180 int *info = (int *) wi->info;
6181 gimple stmt = gsi_stmt (*gsi_p);
6182
6183 *handled_ops_p = true;
6184 switch (gimple_code (stmt))
6185 {
6186 WALK_SUBSTMTS;
6187
6188 case GIMPLE_OMP_FOR:
6189 case GIMPLE_OMP_SECTIONS:
6190 *info = *info == 0 ? 1 : -1;
6191 break;
6192 default:
6193 *info = -1;
6194 break;
6195 }
6196 return NULL;
6197 }
6198
6199 struct omp_taskcopy_context
6200 {
6201 /* This field must be at the beginning, as we do "inheritance": Some
6202 callback functions for tree-inline.c (e.g., omp_copy_decl)
6203 receive a copy_body_data pointer that is up-casted to an
6204 omp_context pointer. */
6205 copy_body_data cb;
6206 omp_context *ctx;
6207 };
6208
6209 static tree
6210 task_copyfn_copy_decl (tree var, copy_body_data *cb)
6211 {
6212 struct omp_taskcopy_context *tcctx = (struct omp_taskcopy_context *) cb;
6213
6214 if (splay_tree_lookup (tcctx->ctx->sfield_map, (splay_tree_key) var))
6215 return create_tmp_var (TREE_TYPE (var), NULL);
6216
6217 return var;
6218 }
6219
6220 static tree
6221 task_copyfn_remap_type (struct omp_taskcopy_context *tcctx, tree orig_type)
6222 {
6223 tree name, new_fields = NULL, type, f;
6224
6225 type = lang_hooks.types.make_type (RECORD_TYPE);
6226 name = DECL_NAME (TYPE_NAME (orig_type));
6227 name = build_decl (gimple_location (tcctx->ctx->stmt),
6228 TYPE_DECL, name, type);
6229 TYPE_NAME (type) = name;
6230
6231 for (f = TYPE_FIELDS (orig_type); f ; f = TREE_CHAIN (f))
6232 {
6233 tree new_f = copy_node (f);
6234 DECL_CONTEXT (new_f) = type;
6235 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &tcctx->cb);
6236 TREE_CHAIN (new_f) = new_fields;
6237 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6238 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6239 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
6240 &tcctx->cb, NULL);
6241 new_fields = new_f;
6242 *pointer_map_insert (tcctx->cb.decl_map, f) = new_f;
6243 }
6244 TYPE_FIELDS (type) = nreverse (new_fields);
6245 layout_type (type);
6246 return type;
6247 }
6248
6249 /* Create task copyfn. */
6250
6251 static void
6252 create_task_copyfn (gimple task_stmt, omp_context *ctx)
6253 {
6254 struct function *child_cfun;
6255 tree child_fn, t, c, src, dst, f, sf, arg, sarg, decl;
6256 tree record_type, srecord_type, bind, list;
6257 bool record_needs_remap = false, srecord_needs_remap = false;
6258 splay_tree_node n;
6259 struct omp_taskcopy_context tcctx;
6260 struct gimplify_ctx gctx;
6261 location_t loc = gimple_location (task_stmt);
6262
6263 child_fn = gimple_omp_task_copy_fn (task_stmt);
6264 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
6265 gcc_assert (child_cfun->cfg == NULL);
6266 DECL_SAVED_TREE (child_fn) = alloc_stmt_list ();
6267
6268 /* Reset DECL_CONTEXT on function arguments. */
6269 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
6270 DECL_CONTEXT (t) = child_fn;
6271
6272 /* Populate the function. */
6273 push_gimplify_context (&gctx);
6274 current_function_decl = child_fn;
6275
6276 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
6277 TREE_SIDE_EFFECTS (bind) = 1;
6278 list = NULL;
6279 DECL_SAVED_TREE (child_fn) = bind;
6280 DECL_SOURCE_LOCATION (child_fn) = gimple_location (task_stmt);
6281
6282 /* Remap src and dst argument types if needed. */
6283 record_type = ctx->record_type;
6284 srecord_type = ctx->srecord_type;
6285 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
6286 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6287 {
6288 record_needs_remap = true;
6289 break;
6290 }
6291 for (f = TYPE_FIELDS (srecord_type); f ; f = DECL_CHAIN (f))
6292 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6293 {
6294 srecord_needs_remap = true;
6295 break;
6296 }
6297
6298 if (record_needs_remap || srecord_needs_remap)
6299 {
6300 memset (&tcctx, '\0', sizeof (tcctx));
6301 tcctx.cb.src_fn = ctx->cb.src_fn;
6302 tcctx.cb.dst_fn = child_fn;
6303 tcctx.cb.src_node = cgraph_get_node (tcctx.cb.src_fn);
6304 gcc_checking_assert (tcctx.cb.src_node);
6305 tcctx.cb.dst_node = tcctx.cb.src_node;
6306 tcctx.cb.src_cfun = ctx->cb.src_cfun;
6307 tcctx.cb.copy_decl = task_copyfn_copy_decl;
6308 tcctx.cb.eh_lp_nr = 0;
6309 tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE;
6310 tcctx.cb.decl_map = pointer_map_create ();
6311 tcctx.ctx = ctx;
6312
6313 if (record_needs_remap)
6314 record_type = task_copyfn_remap_type (&tcctx, record_type);
6315 if (srecord_needs_remap)
6316 srecord_type = task_copyfn_remap_type (&tcctx, srecord_type);
6317 }
6318 else
6319 tcctx.cb.decl_map = NULL;
6320
6321 push_cfun (child_cfun);
6322
6323 arg = DECL_ARGUMENTS (child_fn);
6324 TREE_TYPE (arg) = build_pointer_type (record_type);
6325 sarg = DECL_CHAIN (arg);
6326 TREE_TYPE (sarg) = build_pointer_type (srecord_type);
6327
6328 /* First pass: initialize temporaries used in record_type and srecord_type
6329 sizes and field offsets. */
6330 if (tcctx.cb.decl_map)
6331 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6332 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6333 {
6334 tree *p;
6335
6336 decl = OMP_CLAUSE_DECL (c);
6337 p = (tree *) pointer_map_contains (tcctx.cb.decl_map, decl);
6338 if (p == NULL)
6339 continue;
6340 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6341 sf = (tree) n->value;
6342 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6343 src = build_simple_mem_ref_loc (loc, sarg);
6344 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6345 t = build2 (MODIFY_EXPR, TREE_TYPE (*p), *p, src);
6346 append_to_statement_list (t, &list);
6347 }
6348
6349 /* Second pass: copy shared var pointers and copy construct non-VLA
6350 firstprivate vars. */
6351 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6352 switch (OMP_CLAUSE_CODE (c))
6353 {
6354 case OMP_CLAUSE_SHARED:
6355 decl = OMP_CLAUSE_DECL (c);
6356 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6357 if (n == NULL)
6358 break;
6359 f = (tree) n->value;
6360 if (tcctx.cb.decl_map)
6361 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6362 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6363 sf = (tree) n->value;
6364 if (tcctx.cb.decl_map)
6365 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6366 src = build_simple_mem_ref_loc (loc, sarg);
6367 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6368 dst = build_simple_mem_ref_loc (loc, arg);
6369 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6370 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
6371 append_to_statement_list (t, &list);
6372 break;
6373 case OMP_CLAUSE_FIRSTPRIVATE:
6374 decl = OMP_CLAUSE_DECL (c);
6375 if (is_variable_sized (decl))
6376 break;
6377 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6378 if (n == NULL)
6379 break;
6380 f = (tree) n->value;
6381 if (tcctx.cb.decl_map)
6382 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6383 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6384 if (n != NULL)
6385 {
6386 sf = (tree) n->value;
6387 if (tcctx.cb.decl_map)
6388 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6389 src = build_simple_mem_ref_loc (loc, sarg);
6390 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6391 if (use_pointer_for_field (decl, NULL) || is_reference (decl))
6392 src = build_simple_mem_ref_loc (loc, src);
6393 }
6394 else
6395 src = decl;
6396 dst = build_simple_mem_ref_loc (loc, arg);
6397 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6398 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6399 append_to_statement_list (t, &list);
6400 break;
6401 case OMP_CLAUSE_PRIVATE:
6402 if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c))
6403 break;
6404 decl = OMP_CLAUSE_DECL (c);
6405 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6406 f = (tree) n->value;
6407 if (tcctx.cb.decl_map)
6408 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6409 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6410 if (n != NULL)
6411 {
6412 sf = (tree) n->value;
6413 if (tcctx.cb.decl_map)
6414 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6415 src = build_simple_mem_ref_loc (loc, sarg);
6416 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6417 if (use_pointer_for_field (decl, NULL))
6418 src = build_simple_mem_ref_loc (loc, src);
6419 }
6420 else
6421 src = decl;
6422 dst = build_simple_mem_ref_loc (loc, arg);
6423 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6424 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
6425 append_to_statement_list (t, &list);
6426 break;
6427 default:
6428 break;
6429 }
6430
6431 /* Last pass: handle VLA firstprivates. */
6432 if (tcctx.cb.decl_map)
6433 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6434 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6435 {
6436 tree ind, ptr, df;
6437
6438 decl = OMP_CLAUSE_DECL (c);
6439 if (!is_variable_sized (decl))
6440 continue;
6441 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6442 if (n == NULL)
6443 continue;
6444 f = (tree) n->value;
6445 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6446 gcc_assert (DECL_HAS_VALUE_EXPR_P (decl));
6447 ind = DECL_VALUE_EXPR (decl);
6448 gcc_assert (TREE_CODE (ind) == INDIRECT_REF);
6449 gcc_assert (DECL_P (TREE_OPERAND (ind, 0)));
6450 n = splay_tree_lookup (ctx->sfield_map,
6451 (splay_tree_key) TREE_OPERAND (ind, 0));
6452 sf = (tree) n->value;
6453 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6454 src = build_simple_mem_ref_loc (loc, sarg);
6455 src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6456 src = build_simple_mem_ref_loc (loc, src);
6457 dst = build_simple_mem_ref_loc (loc, arg);
6458 dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6459 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6460 append_to_statement_list (t, &list);
6461 n = splay_tree_lookup (ctx->field_map,
6462 (splay_tree_key) TREE_OPERAND (ind, 0));
6463 df = (tree) n->value;
6464 df = *(tree *) pointer_map_contains (tcctx.cb.decl_map, df);
6465 ptr = build_simple_mem_ref_loc (loc, arg);
6466 ptr = build3 (COMPONENT_REF, TREE_TYPE (df), ptr, df, NULL);
6467 t = build2 (MODIFY_EXPR, TREE_TYPE (ptr), ptr,
6468 build_fold_addr_expr_loc (loc, dst));
6469 append_to_statement_list (t, &list);
6470 }
6471
6472 t = build1 (RETURN_EXPR, void_type_node, NULL);
6473 append_to_statement_list (t, &list);
6474
6475 if (tcctx.cb.decl_map)
6476 pointer_map_destroy (tcctx.cb.decl_map);
6477 pop_gimplify_context (NULL);
6478 BIND_EXPR_BODY (bind) = list;
6479 pop_cfun ();
6480 current_function_decl = ctx->cb.src_fn;
6481 }
6482
6483 /* Lower the OpenMP parallel or task directive in the current statement
6484 in GSI_P. CTX holds context information for the directive. */
6485
6486 static void
6487 lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6488 {
6489 tree clauses;
6490 tree child_fn, t;
6491 gimple stmt = gsi_stmt (*gsi_p);
6492 gimple par_bind, bind;
6493 gimple_seq par_body, olist, ilist, par_olist, par_ilist, new_body;
6494 struct gimplify_ctx gctx;
6495 location_t loc = gimple_location (stmt);
6496
6497 clauses = gimple_omp_taskreg_clauses (stmt);
6498 par_bind = gimple_seq_first_stmt (gimple_omp_body (stmt));
6499 par_body = gimple_bind_body (par_bind);
6500 child_fn = ctx->cb.dst_fn;
6501 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
6502 && !gimple_omp_parallel_combined_p (stmt))
6503 {
6504 struct walk_stmt_info wi;
6505 int ws_num = 0;
6506
6507 memset (&wi, 0, sizeof (wi));
6508 wi.info = &ws_num;
6509 wi.val_only = true;
6510 walk_gimple_seq (par_body, check_combined_parallel, NULL, &wi);
6511 if (ws_num == 1)
6512 gimple_omp_parallel_set_combined_p (stmt, true);
6513 }
6514 if (ctx->srecord_type)
6515 create_task_copyfn (stmt, ctx);
6516
6517 push_gimplify_context (&gctx);
6518
6519 par_olist = NULL;
6520 par_ilist = NULL;
6521 lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx);
6522 lower_omp (par_body, ctx);
6523 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL)
6524 lower_reduction_clauses (clauses, &par_olist, ctx);
6525
6526 /* Declare all the variables created by mapping and the variables
6527 declared in the scope of the parallel body. */
6528 record_vars_into (ctx->block_vars, child_fn);
6529 record_vars_into (gimple_bind_vars (par_bind), child_fn);
6530
6531 if (ctx->record_type)
6532 {
6533 ctx->sender_decl
6534 = create_tmp_var (ctx->srecord_type ? ctx->srecord_type
6535 : ctx->record_type, ".omp_data_o");
6536 DECL_NAMELESS (ctx->sender_decl) = 1;
6537 TREE_ADDRESSABLE (ctx->sender_decl) = 1;
6538 gimple_omp_taskreg_set_data_arg (stmt, ctx->sender_decl);
6539 }
6540
6541 olist = NULL;
6542 ilist = NULL;
6543 lower_send_clauses (clauses, &ilist, &olist, ctx);
6544 lower_send_shared_vars (&ilist, &olist, ctx);
6545
6546 /* Once all the expansions are done, sequence all the different
6547 fragments inside gimple_omp_body. */
6548
6549 new_body = NULL;
6550
6551 if (ctx->record_type)
6552 {
6553 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
6554 /* fixup_child_record_type might have changed receiver_decl's type. */
6555 t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
6556 gimple_seq_add_stmt (&new_body,
6557 gimple_build_assign (ctx->receiver_decl, t));
6558 }
6559
6560 gimple_seq_add_seq (&new_body, par_ilist);
6561 gimple_seq_add_seq (&new_body, par_body);
6562 gimple_seq_add_seq (&new_body, par_olist);
6563 new_body = maybe_catch_exception (new_body);
6564 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
6565 gimple_omp_set_body (stmt, new_body);
6566
6567 bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
6568 gimple_bind_add_stmt (bind, stmt);
6569 if (ilist || olist)
6570 {
6571 gimple_seq_add_stmt (&ilist, bind);
6572 gimple_seq_add_seq (&ilist, olist);
6573 bind = gimple_build_bind (NULL, ilist, NULL);
6574 }
6575
6576 gsi_replace (gsi_p, bind, true);
6577
6578 pop_gimplify_context (NULL);
6579 }
6580
6581 /* Callback for lower_omp_1. Return non-NULL if *tp needs to be
6582 regimplified. If DATA is non-NULL, lower_omp_1 is outside
6583 of OpenMP context, but with task_shared_vars set. */
6584
6585 static tree
6586 lower_omp_regimplify_p (tree *tp, int *walk_subtrees,
6587 void *data)
6588 {
6589 tree t = *tp;
6590
6591 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
6592 if (TREE_CODE (t) == VAR_DECL && data == NULL && DECL_HAS_VALUE_EXPR_P (t))
6593 return t;
6594
6595 if (task_shared_vars
6596 && DECL_P (t)
6597 && bitmap_bit_p (task_shared_vars, DECL_UID (t)))
6598 return t;
6599
6600 /* If a global variable has been privatized, TREE_CONSTANT on
6601 ADDR_EXPR might be wrong. */
6602 if (data == NULL && TREE_CODE (t) == ADDR_EXPR)
6603 recompute_tree_invariant_for_addr_expr (t);
6604
6605 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
6606 return NULL_TREE;
6607 }
6608
6609 static void
6610 lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6611 {
6612 gimple stmt = gsi_stmt (*gsi_p);
6613 struct walk_stmt_info wi;
6614
6615 if (gimple_has_location (stmt))
6616 input_location = gimple_location (stmt);
6617
6618 if (task_shared_vars)
6619 memset (&wi, '\0', sizeof (wi));
6620
6621 /* If we have issued syntax errors, avoid doing any heavy lifting.
6622 Just replace the OpenMP directives with a NOP to avoid
6623 confusing RTL expansion. */
6624 if (seen_error () && is_gimple_omp (stmt))
6625 {
6626 gsi_replace (gsi_p, gimple_build_nop (), true);
6627 return;
6628 }
6629
6630 switch (gimple_code (stmt))
6631 {
6632 case GIMPLE_COND:
6633 if ((ctx || task_shared_vars)
6634 && (walk_tree (gimple_cond_lhs_ptr (stmt), lower_omp_regimplify_p,
6635 ctx ? NULL : &wi, NULL)
6636 || walk_tree (gimple_cond_rhs_ptr (stmt), lower_omp_regimplify_p,
6637 ctx ? NULL : &wi, NULL)))
6638 gimple_regimplify_operands (stmt, gsi_p);
6639 break;
6640 case GIMPLE_CATCH:
6641 lower_omp (gimple_catch_handler (stmt), ctx);
6642 break;
6643 case GIMPLE_EH_FILTER:
6644 lower_omp (gimple_eh_filter_failure (stmt), ctx);
6645 break;
6646 case GIMPLE_TRY:
6647 lower_omp (gimple_try_eval (stmt), ctx);
6648 lower_omp (gimple_try_cleanup (stmt), ctx);
6649 break;
6650 case GIMPLE_BIND:
6651 lower_omp (gimple_bind_body (stmt), ctx);
6652 break;
6653 case GIMPLE_OMP_PARALLEL:
6654 case GIMPLE_OMP_TASK:
6655 ctx = maybe_lookup_ctx (stmt);
6656 lower_omp_taskreg (gsi_p, ctx);
6657 break;
6658 case GIMPLE_OMP_FOR:
6659 ctx = maybe_lookup_ctx (stmt);
6660 gcc_assert (ctx);
6661 lower_omp_for (gsi_p, ctx);
6662 break;
6663 case GIMPLE_OMP_SECTIONS:
6664 ctx = maybe_lookup_ctx (stmt);
6665 gcc_assert (ctx);
6666 lower_omp_sections (gsi_p, ctx);
6667 break;
6668 case GIMPLE_OMP_SINGLE:
6669 ctx = maybe_lookup_ctx (stmt);
6670 gcc_assert (ctx);
6671 lower_omp_single (gsi_p, ctx);
6672 break;
6673 case GIMPLE_OMP_MASTER:
6674 ctx = maybe_lookup_ctx (stmt);
6675 gcc_assert (ctx);
6676 lower_omp_master (gsi_p, ctx);
6677 break;
6678 case GIMPLE_OMP_ORDERED:
6679 ctx = maybe_lookup_ctx (stmt);
6680 gcc_assert (ctx);
6681 lower_omp_ordered (gsi_p, ctx);
6682 break;
6683 case GIMPLE_OMP_CRITICAL:
6684 ctx = maybe_lookup_ctx (stmt);
6685 gcc_assert (ctx);
6686 lower_omp_critical (gsi_p, ctx);
6687 break;
6688 case GIMPLE_OMP_ATOMIC_LOAD:
6689 if ((ctx || task_shared_vars)
6690 && walk_tree (gimple_omp_atomic_load_rhs_ptr (stmt),
6691 lower_omp_regimplify_p, ctx ? NULL : &wi, NULL))
6692 gimple_regimplify_operands (stmt, gsi_p);
6693 break;
6694 default:
6695 if ((ctx || task_shared_vars)
6696 && walk_gimple_op (stmt, lower_omp_regimplify_p,
6697 ctx ? NULL : &wi))
6698 gimple_regimplify_operands (stmt, gsi_p);
6699 break;
6700 }
6701 }
6702
6703 static void
6704 lower_omp (gimple_seq body, omp_context *ctx)
6705 {
6706 location_t saved_location = input_location;
6707 gimple_stmt_iterator gsi = gsi_start (body);
6708 for (gsi = gsi_start (body); !gsi_end_p (gsi); gsi_next (&gsi))
6709 lower_omp_1 (&gsi, ctx);
6710 input_location = saved_location;
6711 }
6712 \f
6713 /* Main entry point. */
6714
6715 static unsigned int
6716 execute_lower_omp (void)
6717 {
6718 gimple_seq body;
6719
6720 /* This pass always runs, to provide PROP_gimple_lomp.
6721 But there is nothing to do unless -fopenmp is given. */
6722 if (flag_openmp == 0)
6723 return 0;
6724
6725 all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
6726 delete_omp_context);
6727
6728 body = gimple_body (current_function_decl);
6729 scan_omp (body, NULL);
6730 gcc_assert (taskreg_nesting_level == 0);
6731
6732 if (all_contexts->root)
6733 {
6734 struct gimplify_ctx gctx;
6735
6736 if (task_shared_vars)
6737 push_gimplify_context (&gctx);
6738 lower_omp (body, NULL);
6739 if (task_shared_vars)
6740 pop_gimplify_context (NULL);
6741 }
6742
6743 if (all_contexts)
6744 {
6745 splay_tree_delete (all_contexts);
6746 all_contexts = NULL;
6747 }
6748 BITMAP_FREE (task_shared_vars);
6749 return 0;
6750 }
6751
6752 struct gimple_opt_pass pass_lower_omp =
6753 {
6754 {
6755 GIMPLE_PASS,
6756 "omplower", /* name */
6757 NULL, /* gate */
6758 execute_lower_omp, /* execute */
6759 NULL, /* sub */
6760 NULL, /* next */
6761 0, /* static_pass_number */
6762 TV_NONE, /* tv_id */
6763 PROP_gimple_any, /* properties_required */
6764 PROP_gimple_lomp, /* properties_provided */
6765 0, /* properties_destroyed */
6766 0, /* todo_flags_start */
6767 0 /* todo_flags_finish */
6768 }
6769 };
6770 \f
6771 /* The following is a utility to diagnose OpenMP structured block violations.
6772 It is not part of the "omplower" pass, as that's invoked too late. It
6773 should be invoked by the respective front ends after gimplification. */
6774
6775 static splay_tree all_labels;
6776
6777 /* Check for mismatched contexts and generate an error if needed. Return
6778 true if an error is detected. */
6779
6780 static bool
6781 diagnose_sb_0 (gimple_stmt_iterator *gsi_p,
6782 gimple branch_ctx, gimple label_ctx)
6783 {
6784 if (label_ctx == branch_ctx)
6785 return false;
6786
6787
6788 /*
6789 Previously we kept track of the label's entire context in diagnose_sb_[12]
6790 so we could traverse it and issue a correct "exit" or "enter" error
6791 message upon a structured block violation.
6792
6793 We built the context by building a list with tree_cons'ing, but there is
6794 no easy counterpart in gimple tuples. It seems like far too much work
6795 for issuing exit/enter error messages. If someone really misses the
6796 distinct error message... patches welcome.
6797 */
6798
6799 #if 0
6800 /* Try to avoid confusing the user by producing and error message
6801 with correct "exit" or "enter" verbiage. We prefer "exit"
6802 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
6803 if (branch_ctx == NULL)
6804 exit_p = false;
6805 else
6806 {
6807 while (label_ctx)
6808 {
6809 if (TREE_VALUE (label_ctx) == branch_ctx)
6810 {
6811 exit_p = false;
6812 break;
6813 }
6814 label_ctx = TREE_CHAIN (label_ctx);
6815 }
6816 }
6817
6818 if (exit_p)
6819 error ("invalid exit from OpenMP structured block");
6820 else
6821 error ("invalid entry to OpenMP structured block");
6822 #endif
6823
6824 /* If it's obvious we have an invalid entry, be specific about the error. */
6825 if (branch_ctx == NULL)
6826 error ("invalid entry to OpenMP structured block");
6827 else
6828 /* Otherwise, be vague and lazy, but efficient. */
6829 error ("invalid branch to/from an OpenMP structured block");
6830
6831 gsi_replace (gsi_p, gimple_build_nop (), false);
6832 return true;
6833 }
6834
6835 /* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
6836 where each label is found. */
6837
6838 static tree
6839 diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
6840 struct walk_stmt_info *wi)
6841 {
6842 gimple context = (gimple) wi->info;
6843 gimple inner_context;
6844 gimple stmt = gsi_stmt (*gsi_p);
6845
6846 *handled_ops_p = true;
6847
6848 switch (gimple_code (stmt))
6849 {
6850 WALK_SUBSTMTS;
6851
6852 case GIMPLE_OMP_PARALLEL:
6853 case GIMPLE_OMP_TASK:
6854 case GIMPLE_OMP_SECTIONS:
6855 case GIMPLE_OMP_SINGLE:
6856 case GIMPLE_OMP_SECTION:
6857 case GIMPLE_OMP_MASTER:
6858 case GIMPLE_OMP_ORDERED:
6859 case GIMPLE_OMP_CRITICAL:
6860 /* The minimal context here is just the current OMP construct. */
6861 inner_context = stmt;
6862 wi->info = inner_context;
6863 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
6864 wi->info = context;
6865 break;
6866
6867 case GIMPLE_OMP_FOR:
6868 inner_context = stmt;
6869 wi->info = inner_context;
6870 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
6871 walk them. */
6872 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
6873 diagnose_sb_1, NULL, wi);
6874 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
6875 wi->info = context;
6876 break;
6877
6878 case GIMPLE_LABEL:
6879 splay_tree_insert (all_labels, (splay_tree_key) gimple_label_label (stmt),
6880 (splay_tree_value) context);
6881 break;
6882
6883 default:
6884 break;
6885 }
6886
6887 return NULL_TREE;
6888 }
6889
6890 /* Pass 2: Check each branch and see if its context differs from that of
6891 the destination label's context. */
6892
6893 static tree
6894 diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
6895 struct walk_stmt_info *wi)
6896 {
6897 gimple context = (gimple) wi->info;
6898 splay_tree_node n;
6899 gimple stmt = gsi_stmt (*gsi_p);
6900
6901 *handled_ops_p = true;
6902
6903 switch (gimple_code (stmt))
6904 {
6905 WALK_SUBSTMTS;
6906
6907 case GIMPLE_OMP_PARALLEL:
6908 case GIMPLE_OMP_TASK:
6909 case GIMPLE_OMP_SECTIONS:
6910 case GIMPLE_OMP_SINGLE:
6911 case GIMPLE_OMP_SECTION:
6912 case GIMPLE_OMP_MASTER:
6913 case GIMPLE_OMP_ORDERED:
6914 case GIMPLE_OMP_CRITICAL:
6915 wi->info = stmt;
6916 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
6917 wi->info = context;
6918 break;
6919
6920 case GIMPLE_OMP_FOR:
6921 wi->info = stmt;
6922 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
6923 walk them. */
6924 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
6925 diagnose_sb_2, NULL, wi);
6926 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
6927 wi->info = context;
6928 break;
6929
6930 case GIMPLE_COND:
6931 {
6932 tree lab = gimple_cond_true_label (stmt);
6933 if (lab)
6934 {
6935 n = splay_tree_lookup (all_labels,
6936 (splay_tree_key) lab);
6937 diagnose_sb_0 (gsi_p, context,
6938 n ? (gimple) n->value : NULL);
6939 }
6940 lab = gimple_cond_false_label (stmt);
6941 if (lab)
6942 {
6943 n = splay_tree_lookup (all_labels,
6944 (splay_tree_key) lab);
6945 diagnose_sb_0 (gsi_p, context,
6946 n ? (gimple) n->value : NULL);
6947 }
6948 }
6949 break;
6950
6951 case GIMPLE_GOTO:
6952 {
6953 tree lab = gimple_goto_dest (stmt);
6954 if (TREE_CODE (lab) != LABEL_DECL)
6955 break;
6956
6957 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
6958 diagnose_sb_0 (gsi_p, context, n ? (gimple) n->value : NULL);
6959 }
6960 break;
6961
6962 case GIMPLE_SWITCH:
6963 {
6964 unsigned int i;
6965 for (i = 0; i < gimple_switch_num_labels (stmt); ++i)
6966 {
6967 tree lab = CASE_LABEL (gimple_switch_label (stmt, i));
6968 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
6969 if (n && diagnose_sb_0 (gsi_p, context, (gimple) n->value))
6970 break;
6971 }
6972 }
6973 break;
6974
6975 case GIMPLE_RETURN:
6976 diagnose_sb_0 (gsi_p, context, NULL);
6977 break;
6978
6979 default:
6980 break;
6981 }
6982
6983 return NULL_TREE;
6984 }
6985
6986 static unsigned int
6987 diagnose_omp_structured_block_errors (void)
6988 {
6989 struct walk_stmt_info wi;
6990 gimple_seq body = gimple_body (current_function_decl);
6991
6992 all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
6993
6994 memset (&wi, 0, sizeof (wi));
6995 walk_gimple_seq (body, diagnose_sb_1, NULL, &wi);
6996
6997 memset (&wi, 0, sizeof (wi));
6998 wi.want_locations = true;
6999 walk_gimple_seq (body, diagnose_sb_2, NULL, &wi);
7000
7001 splay_tree_delete (all_labels);
7002 all_labels = NULL;
7003
7004 return 0;
7005 }
7006
7007 static bool
7008 gate_diagnose_omp_blocks (void)
7009 {
7010 return flag_openmp != 0;
7011 }
7012
7013 struct gimple_opt_pass pass_diagnose_omp_blocks =
7014 {
7015 {
7016 GIMPLE_PASS,
7017 "*diagnose_omp_blocks", /* name */
7018 gate_diagnose_omp_blocks, /* gate */
7019 diagnose_omp_structured_block_errors, /* execute */
7020 NULL, /* sub */
7021 NULL, /* next */
7022 0, /* static_pass_number */
7023 TV_NONE, /* tv_id */
7024 PROP_gimple_any, /* properties_required */
7025 0, /* properties_provided */
7026 0, /* properties_destroyed */
7027 0, /* todo_flags_start */
7028 0, /* todo_flags_finish */
7029 }
7030 };
7031
7032 #include "gt-omp-low.h"