Makefile.in (C_COMMON_OBJS): Depend on c-cilkplus.o.
[gcc.git] / gcc / omp-low.c
1 /* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
5
6 Copyright (C) 2005-2013 Free Software Foundation, Inc.
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
14
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "tree.h"
29 #include "rtl.h"
30 #include "gimple.h"
31 #include "gimplify.h"
32 #include "gimple-iterator.h"
33 #include "gimplify-me.h"
34 #include "gimple-walk.h"
35 #include "tree-iterator.h"
36 #include "tree-inline.h"
37 #include "langhooks.h"
38 #include "diagnostic-core.h"
39 #include "gimple-ssa.h"
40 #include "cgraph.h"
41 #include "tree-cfg.h"
42 #include "tree-phinodes.h"
43 #include "ssa-iterators.h"
44 #include "tree-ssanames.h"
45 #include "tree-into-ssa.h"
46 #include "tree-dfa.h"
47 #include "tree-ssa.h"
48 #include "flags.h"
49 #include "function.h"
50 #include "expr.h"
51 #include "tree-pass.h"
52 #include "ggc.h"
53 #include "except.h"
54 #include "splay-tree.h"
55 #include "optabs.h"
56 #include "cfgloop.h"
57 #include "target.h"
58 #include "omp-low.h"
59 #include "gimple-low.h"
60 #include "tree-cfgcleanup.h"
61
62
63 /* Lowering of OpenMP parallel and workshare constructs proceeds in two
64 phases. The first phase scans the function looking for OMP statements
65 and then for variables that must be replaced to satisfy data sharing
66 clauses. The second phase expands code for the constructs, as well as
67 re-gimplifying things when variables have been replaced with complex
68 expressions.
69
70 Final code generation is done by pass_expand_omp. The flowgraph is
71 scanned for parallel regions which are then moved to a new
72 function, to be invoked by the thread library. */
73
74 /* Parallel region information. Every parallel and workshare
75 directive is enclosed between two markers, the OMP_* directive
76 and a corresponding OMP_RETURN statement. */
77
78 struct omp_region
79 {
80 /* The enclosing region. */
81 struct omp_region *outer;
82
83 /* First child region. */
84 struct omp_region *inner;
85
86 /* Next peer region. */
87 struct omp_region *next;
88
89 /* Block containing the omp directive as its last stmt. */
90 basic_block entry;
91
92 /* Block containing the OMP_RETURN as its last stmt. */
93 basic_block exit;
94
95 /* Block containing the OMP_CONTINUE as its last stmt. */
96 basic_block cont;
97
98 /* If this is a combined parallel+workshare region, this is a list
99 of additional arguments needed by the combined parallel+workshare
100 library call. */
101 vec<tree, va_gc> *ws_args;
102
103 /* The code for the omp directive of this region. */
104 enum gimple_code type;
105
106 /* Schedule kind, only used for OMP_FOR type regions. */
107 enum omp_clause_schedule_kind sched_kind;
108
109 /* True if this is a combined parallel+workshare region. */
110 bool is_combined_parallel;
111 };
112
113 /* Context structure. Used to store information about each parallel
114 directive in the code. */
115
116 typedef struct omp_context
117 {
118 /* This field must be at the beginning, as we do "inheritance": Some
119 callback functions for tree-inline.c (e.g., omp_copy_decl)
120 receive a copy_body_data pointer that is up-casted to an
121 omp_context pointer. */
122 copy_body_data cb;
123
124 /* The tree of contexts corresponding to the encountered constructs. */
125 struct omp_context *outer;
126 gimple stmt;
127
128 /* Map variables to fields in a structure that allows communication
129 between sending and receiving threads. */
130 splay_tree field_map;
131 tree record_type;
132 tree sender_decl;
133 tree receiver_decl;
134
135 /* These are used just by task contexts, if task firstprivate fn is
136 needed. srecord_type is used to communicate from the thread
137 that encountered the task construct to task firstprivate fn,
138 record_type is allocated by GOMP_task, initialized by task firstprivate
139 fn and passed to the task body fn. */
140 splay_tree sfield_map;
141 tree srecord_type;
142
143 /* A chain of variables to add to the top-level block surrounding the
144 construct. In the case of a parallel, this is in the child function. */
145 tree block_vars;
146
147 /* Label to which GOMP_cancel{,llation_point} and explicit and implicit
148 barriers should jump to during omplower pass. */
149 tree cancel_label;
150
151 /* What to do with variables with implicitly determined sharing
152 attributes. */
153 enum omp_clause_default_kind default_kind;
154
155 /* Nesting depth of this context. Used to beautify error messages re
156 invalid gotos. The outermost ctx is depth 1, with depth 0 being
157 reserved for the main body of the function. */
158 int depth;
159
160 /* True if this parallel directive is nested within another. */
161 bool is_nested;
162
163 /* True if this construct can be cancelled. */
164 bool cancellable;
165 } omp_context;
166
167
168 struct omp_for_data_loop
169 {
170 tree v, n1, n2, step;
171 enum tree_code cond_code;
172 };
173
174 /* A structure describing the main elements of a parallel loop. */
175
176 struct omp_for_data
177 {
178 struct omp_for_data_loop loop;
179 tree chunk_size;
180 gimple for_stmt;
181 tree pre, iter_type;
182 int collapse;
183 bool have_nowait, have_ordered;
184 enum omp_clause_schedule_kind sched_kind;
185 struct omp_for_data_loop *loops;
186 };
187
188
189 static splay_tree all_contexts;
190 static int taskreg_nesting_level;
191 static int target_nesting_level;
192 static struct omp_region *root_omp_region;
193 static bitmap task_shared_vars;
194
195 static void scan_omp (gimple_seq *, omp_context *);
196 static tree scan_omp_1_op (tree *, int *, void *);
197
198 #define WALK_SUBSTMTS \
199 case GIMPLE_BIND: \
200 case GIMPLE_TRY: \
201 case GIMPLE_CATCH: \
202 case GIMPLE_EH_FILTER: \
203 case GIMPLE_TRANSACTION: \
204 /* The sub-statements for these should be walked. */ \
205 *handled_ops_p = false; \
206 break;
207
208 /* Convenience function for calling scan_omp_1_op on tree operands. */
209
210 static inline tree
211 scan_omp_op (tree *tp, omp_context *ctx)
212 {
213 struct walk_stmt_info wi;
214
215 memset (&wi, 0, sizeof (wi));
216 wi.info = ctx;
217 wi.want_locations = true;
218
219 return walk_tree (tp, scan_omp_1_op, &wi, NULL);
220 }
221
222 static void lower_omp (gimple_seq *, omp_context *);
223 static tree lookup_decl_in_outer_ctx (tree, omp_context *);
224 static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
225
226 /* Find an OpenMP clause of type KIND within CLAUSES. */
227
228 tree
229 find_omp_clause (tree clauses, enum omp_clause_code kind)
230 {
231 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
232 if (OMP_CLAUSE_CODE (clauses) == kind)
233 return clauses;
234
235 return NULL_TREE;
236 }
237
238 /* Return true if CTX is for an omp parallel. */
239
240 static inline bool
241 is_parallel_ctx (omp_context *ctx)
242 {
243 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL;
244 }
245
246
247 /* Return true if CTX is for an omp task. */
248
249 static inline bool
250 is_task_ctx (omp_context *ctx)
251 {
252 return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
253 }
254
255
256 /* Return true if CTX is for an omp parallel or omp task. */
257
258 static inline bool
259 is_taskreg_ctx (omp_context *ctx)
260 {
261 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL
262 || gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
263 }
264
265
266 /* Return true if REGION is a combined parallel+workshare region. */
267
268 static inline bool
269 is_combined_parallel (struct omp_region *region)
270 {
271 return region->is_combined_parallel;
272 }
273
274
275 /* Extract the header elements of parallel loop FOR_STMT and store
276 them into *FD. */
277
278 static void
279 extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
280 struct omp_for_data_loop *loops)
281 {
282 tree t, var, *collapse_iter, *collapse_count;
283 tree count = NULL_TREE, iter_type = long_integer_type_node;
284 struct omp_for_data_loop *loop;
285 int i;
286 struct omp_for_data_loop dummy_loop;
287 location_t loc = gimple_location (for_stmt);
288 bool simd = gimple_omp_for_kind (for_stmt) & GF_OMP_FOR_KIND_SIMD;
289 bool distribute = gimple_omp_for_kind (for_stmt)
290 == GF_OMP_FOR_KIND_DISTRIBUTE;
291
292 fd->for_stmt = for_stmt;
293 fd->pre = NULL;
294 fd->collapse = gimple_omp_for_collapse (for_stmt);
295 if (fd->collapse > 1)
296 fd->loops = loops;
297 else
298 fd->loops = &fd->loop;
299
300 fd->have_nowait = distribute || simd;
301 fd->have_ordered = false;
302 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
303 fd->chunk_size = NULL_TREE;
304 collapse_iter = NULL;
305 collapse_count = NULL;
306
307 for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
308 switch (OMP_CLAUSE_CODE (t))
309 {
310 case OMP_CLAUSE_NOWAIT:
311 fd->have_nowait = true;
312 break;
313 case OMP_CLAUSE_ORDERED:
314 fd->have_ordered = true;
315 break;
316 case OMP_CLAUSE_SCHEDULE:
317 gcc_assert (!distribute);
318 fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
319 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
320 break;
321 case OMP_CLAUSE_DIST_SCHEDULE:
322 gcc_assert (distribute);
323 fd->chunk_size = OMP_CLAUSE_DIST_SCHEDULE_CHUNK_EXPR (t);
324 break;
325 case OMP_CLAUSE_COLLAPSE:
326 if (fd->collapse > 1)
327 {
328 collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
329 collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
330 }
331 default:
332 break;
333 }
334
335 /* FIXME: for now map schedule(auto) to schedule(static).
336 There should be analysis to determine whether all iterations
337 are approximately the same amount of work (then schedule(static)
338 is best) or if it varies (then schedule(dynamic,N) is better). */
339 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
340 {
341 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
342 gcc_assert (fd->chunk_size == NULL);
343 }
344 gcc_assert (fd->collapse == 1 || collapse_iter != NULL);
345 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
346 gcc_assert (fd->chunk_size == NULL);
347 else if (fd->chunk_size == NULL)
348 {
349 /* We only need to compute a default chunk size for ordered
350 static loops and dynamic loops. */
351 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
352 || fd->have_ordered)
353 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
354 ? integer_zero_node : integer_one_node;
355 }
356
357 for (i = 0; i < fd->collapse; i++)
358 {
359 if (fd->collapse == 1)
360 loop = &fd->loop;
361 else if (loops != NULL)
362 loop = loops + i;
363 else
364 loop = &dummy_loop;
365
366 loop->v = gimple_omp_for_index (for_stmt, i);
367 gcc_assert (SSA_VAR_P (loop->v));
368 gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
369 || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
370 var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
371 loop->n1 = gimple_omp_for_initial (for_stmt, i);
372
373 loop->cond_code = gimple_omp_for_cond (for_stmt, i);
374 loop->n2 = gimple_omp_for_final (for_stmt, i);
375 switch (loop->cond_code)
376 {
377 case LT_EXPR:
378 case GT_EXPR:
379 break;
380 case NE_EXPR:
381 gcc_assert (gimple_omp_for_kind (for_stmt)
382 == GF_OMP_FOR_KIND_CILKSIMD);
383 break;
384 case LE_EXPR:
385 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
386 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, 1);
387 else
388 loop->n2 = fold_build2_loc (loc,
389 PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
390 build_int_cst (TREE_TYPE (loop->n2), 1));
391 loop->cond_code = LT_EXPR;
392 break;
393 case GE_EXPR:
394 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
395 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, -1);
396 else
397 loop->n2 = fold_build2_loc (loc,
398 MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
399 build_int_cst (TREE_TYPE (loop->n2), 1));
400 loop->cond_code = GT_EXPR;
401 break;
402 default:
403 gcc_unreachable ();
404 }
405
406 t = gimple_omp_for_incr (for_stmt, i);
407 gcc_assert (TREE_OPERAND (t, 0) == var);
408 switch (TREE_CODE (t))
409 {
410 case PLUS_EXPR:
411 loop->step = TREE_OPERAND (t, 1);
412 break;
413 case POINTER_PLUS_EXPR:
414 loop->step = fold_convert (ssizetype, TREE_OPERAND (t, 1));
415 break;
416 case MINUS_EXPR:
417 loop->step = TREE_OPERAND (t, 1);
418 loop->step = fold_build1_loc (loc,
419 NEGATE_EXPR, TREE_TYPE (loop->step),
420 loop->step);
421 break;
422 default:
423 gcc_unreachable ();
424 }
425
426 if (simd
427 || (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
428 && !fd->have_ordered))
429 {
430 if (fd->collapse == 1)
431 iter_type = TREE_TYPE (loop->v);
432 else if (i == 0
433 || TYPE_PRECISION (iter_type)
434 < TYPE_PRECISION (TREE_TYPE (loop->v)))
435 iter_type
436 = build_nonstandard_integer_type
437 (TYPE_PRECISION (TREE_TYPE (loop->v)), 1);
438 }
439 else if (iter_type != long_long_unsigned_type_node)
440 {
441 if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
442 iter_type = long_long_unsigned_type_node;
443 else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
444 && TYPE_PRECISION (TREE_TYPE (loop->v))
445 >= TYPE_PRECISION (iter_type))
446 {
447 tree n;
448
449 if (loop->cond_code == LT_EXPR)
450 n = fold_build2_loc (loc,
451 PLUS_EXPR, TREE_TYPE (loop->v),
452 loop->n2, loop->step);
453 else
454 n = loop->n1;
455 if (TREE_CODE (n) != INTEGER_CST
456 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
457 iter_type = long_long_unsigned_type_node;
458 }
459 else if (TYPE_PRECISION (TREE_TYPE (loop->v))
460 > TYPE_PRECISION (iter_type))
461 {
462 tree n1, n2;
463
464 if (loop->cond_code == LT_EXPR)
465 {
466 n1 = loop->n1;
467 n2 = fold_build2_loc (loc,
468 PLUS_EXPR, TREE_TYPE (loop->v),
469 loop->n2, loop->step);
470 }
471 else
472 {
473 n1 = fold_build2_loc (loc,
474 MINUS_EXPR, TREE_TYPE (loop->v),
475 loop->n2, loop->step);
476 n2 = loop->n1;
477 }
478 if (TREE_CODE (n1) != INTEGER_CST
479 || TREE_CODE (n2) != INTEGER_CST
480 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
481 || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
482 iter_type = long_long_unsigned_type_node;
483 }
484 }
485
486 if (collapse_count && *collapse_count == NULL)
487 {
488 t = fold_binary (loop->cond_code, boolean_type_node,
489 fold_convert (TREE_TYPE (loop->v), loop->n1),
490 fold_convert (TREE_TYPE (loop->v), loop->n2));
491 if (t && integer_zerop (t))
492 count = build_zero_cst (long_long_unsigned_type_node);
493 else if ((i == 0 || count != NULL_TREE)
494 && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
495 && TREE_CONSTANT (loop->n1)
496 && TREE_CONSTANT (loop->n2)
497 && TREE_CODE (loop->step) == INTEGER_CST)
498 {
499 tree itype = TREE_TYPE (loop->v);
500
501 if (POINTER_TYPE_P (itype))
502 itype = signed_type_for (itype);
503 t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
504 t = fold_build2_loc (loc,
505 PLUS_EXPR, itype,
506 fold_convert_loc (loc, itype, loop->step), t);
507 t = fold_build2_loc (loc, PLUS_EXPR, itype, t,
508 fold_convert_loc (loc, itype, loop->n2));
509 t = fold_build2_loc (loc, MINUS_EXPR, itype, t,
510 fold_convert_loc (loc, itype, loop->n1));
511 if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
512 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype,
513 fold_build1_loc (loc, NEGATE_EXPR, itype, t),
514 fold_build1_loc (loc, NEGATE_EXPR, itype,
515 fold_convert_loc (loc, itype,
516 loop->step)));
517 else
518 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, t,
519 fold_convert_loc (loc, itype, loop->step));
520 t = fold_convert_loc (loc, long_long_unsigned_type_node, t);
521 if (count != NULL_TREE)
522 count = fold_build2_loc (loc,
523 MULT_EXPR, long_long_unsigned_type_node,
524 count, t);
525 else
526 count = t;
527 if (TREE_CODE (count) != INTEGER_CST)
528 count = NULL_TREE;
529 }
530 else if (count && !integer_zerop (count))
531 count = NULL_TREE;
532 }
533 }
534
535 if (count
536 && !simd
537 && (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
538 || fd->have_ordered))
539 {
540 if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
541 iter_type = long_long_unsigned_type_node;
542 else
543 iter_type = long_integer_type_node;
544 }
545 else if (collapse_iter && *collapse_iter != NULL)
546 iter_type = TREE_TYPE (*collapse_iter);
547 fd->iter_type = iter_type;
548 if (collapse_iter && *collapse_iter == NULL)
549 *collapse_iter = create_tmp_var (iter_type, ".iter");
550 if (collapse_count && *collapse_count == NULL)
551 {
552 if (count)
553 *collapse_count = fold_convert_loc (loc, iter_type, count);
554 else
555 *collapse_count = create_tmp_var (iter_type, ".count");
556 }
557
558 if (fd->collapse > 1)
559 {
560 fd->loop.v = *collapse_iter;
561 fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
562 fd->loop.n2 = *collapse_count;
563 fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
564 fd->loop.cond_code = LT_EXPR;
565 }
566 }
567
568
569 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
570 is the immediate dominator of PAR_ENTRY_BB, return true if there
571 are no data dependencies that would prevent expanding the parallel
572 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
573
574 When expanding a combined parallel+workshare region, the call to
575 the child function may need additional arguments in the case of
576 GIMPLE_OMP_FOR regions. In some cases, these arguments are
577 computed out of variables passed in from the parent to the child
578 via 'struct .omp_data_s'. For instance:
579
580 #pragma omp parallel for schedule (guided, i * 4)
581 for (j ...)
582
583 Is lowered into:
584
585 # BLOCK 2 (PAR_ENTRY_BB)
586 .omp_data_o.i = i;
587 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
588
589 # BLOCK 3 (WS_ENTRY_BB)
590 .omp_data_i = &.omp_data_o;
591 D.1667 = .omp_data_i->i;
592 D.1598 = D.1667 * 4;
593 #pragma omp for schedule (guided, D.1598)
594
595 When we outline the parallel region, the call to the child function
596 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
597 that value is computed *after* the call site. So, in principle we
598 cannot do the transformation.
599
600 To see whether the code in WS_ENTRY_BB blocks the combined
601 parallel+workshare call, we collect all the variables used in the
602 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
603 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
604 call.
605
606 FIXME. If we had the SSA form built at this point, we could merely
607 hoist the code in block 3 into block 2 and be done with it. But at
608 this point we don't have dataflow information and though we could
609 hack something up here, it is really not worth the aggravation. */
610
611 static bool
612 workshare_safe_to_combine_p (basic_block ws_entry_bb)
613 {
614 struct omp_for_data fd;
615 gimple ws_stmt = last_stmt (ws_entry_bb);
616
617 if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
618 return true;
619
620 gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR);
621
622 extract_omp_for_data (ws_stmt, &fd, NULL);
623
624 if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
625 return false;
626 if (fd.iter_type != long_integer_type_node)
627 return false;
628
629 /* FIXME. We give up too easily here. If any of these arguments
630 are not constants, they will likely involve variables that have
631 been mapped into fields of .omp_data_s for sharing with the child
632 function. With appropriate data flow, it would be possible to
633 see through this. */
634 if (!is_gimple_min_invariant (fd.loop.n1)
635 || !is_gimple_min_invariant (fd.loop.n2)
636 || !is_gimple_min_invariant (fd.loop.step)
637 || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
638 return false;
639
640 return true;
641 }
642
643
644 /* Collect additional arguments needed to emit a combined
645 parallel+workshare call. WS_STMT is the workshare directive being
646 expanded. */
647
648 static vec<tree, va_gc> *
649 get_ws_args_for (gimple par_stmt, gimple ws_stmt)
650 {
651 tree t;
652 location_t loc = gimple_location (ws_stmt);
653 vec<tree, va_gc> *ws_args;
654
655 if (gimple_code (ws_stmt) == GIMPLE_OMP_FOR)
656 {
657 struct omp_for_data fd;
658 tree n1, n2;
659
660 extract_omp_for_data (ws_stmt, &fd, NULL);
661 n1 = fd.loop.n1;
662 n2 = fd.loop.n2;
663
664 if (gimple_omp_for_combined_into_p (ws_stmt))
665 {
666 tree innerc
667 = find_omp_clause (gimple_omp_parallel_clauses (par_stmt),
668 OMP_CLAUSE__LOOPTEMP_);
669 gcc_assert (innerc);
670 n1 = OMP_CLAUSE_DECL (innerc);
671 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
672 OMP_CLAUSE__LOOPTEMP_);
673 gcc_assert (innerc);
674 n2 = OMP_CLAUSE_DECL (innerc);
675 }
676
677 vec_alloc (ws_args, 3 + (fd.chunk_size != 0));
678
679 t = fold_convert_loc (loc, long_integer_type_node, n1);
680 ws_args->quick_push (t);
681
682 t = fold_convert_loc (loc, long_integer_type_node, n2);
683 ws_args->quick_push (t);
684
685 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.step);
686 ws_args->quick_push (t);
687
688 if (fd.chunk_size)
689 {
690 t = fold_convert_loc (loc, long_integer_type_node, fd.chunk_size);
691 ws_args->quick_push (t);
692 }
693
694 return ws_args;
695 }
696 else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
697 {
698 /* Number of sections is equal to the number of edges from the
699 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
700 the exit of the sections region. */
701 basic_block bb = single_succ (gimple_bb (ws_stmt));
702 t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
703 vec_alloc (ws_args, 1);
704 ws_args->quick_push (t);
705 return ws_args;
706 }
707
708 gcc_unreachable ();
709 }
710
711
712 /* Discover whether REGION is a combined parallel+workshare region. */
713
714 static void
715 determine_parallel_type (struct omp_region *region)
716 {
717 basic_block par_entry_bb, par_exit_bb;
718 basic_block ws_entry_bb, ws_exit_bb;
719
720 if (region == NULL || region->inner == NULL
721 || region->exit == NULL || region->inner->exit == NULL
722 || region->inner->cont == NULL)
723 return;
724
725 /* We only support parallel+for and parallel+sections. */
726 if (region->type != GIMPLE_OMP_PARALLEL
727 || (region->inner->type != GIMPLE_OMP_FOR
728 && region->inner->type != GIMPLE_OMP_SECTIONS))
729 return;
730
731 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
732 WS_EXIT_BB -> PAR_EXIT_BB. */
733 par_entry_bb = region->entry;
734 par_exit_bb = region->exit;
735 ws_entry_bb = region->inner->entry;
736 ws_exit_bb = region->inner->exit;
737
738 if (single_succ (par_entry_bb) == ws_entry_bb
739 && single_succ (ws_exit_bb) == par_exit_bb
740 && workshare_safe_to_combine_p (ws_entry_bb)
741 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb))
742 || (last_and_only_stmt (ws_entry_bb)
743 && last_and_only_stmt (par_exit_bb))))
744 {
745 gimple par_stmt = last_stmt (par_entry_bb);
746 gimple ws_stmt = last_stmt (ws_entry_bb);
747
748 if (region->inner->type == GIMPLE_OMP_FOR)
749 {
750 /* If this is a combined parallel loop, we need to determine
751 whether or not to use the combined library calls. There
752 are two cases where we do not apply the transformation:
753 static loops and any kind of ordered loop. In the first
754 case, we already open code the loop so there is no need
755 to do anything else. In the latter case, the combined
756 parallel loop call would still need extra synchronization
757 to implement ordered semantics, so there would not be any
758 gain in using the combined call. */
759 tree clauses = gimple_omp_for_clauses (ws_stmt);
760 tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
761 if (c == NULL
762 || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC
763 || find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
764 {
765 region->is_combined_parallel = false;
766 region->inner->is_combined_parallel = false;
767 return;
768 }
769 }
770
771 region->is_combined_parallel = true;
772 region->inner->is_combined_parallel = true;
773 region->ws_args = get_ws_args_for (par_stmt, ws_stmt);
774 }
775 }
776
777
778 /* Return true if EXPR is variable sized. */
779
780 static inline bool
781 is_variable_sized (const_tree expr)
782 {
783 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
784 }
785
786 /* Return true if DECL is a reference type. */
787
788 static inline bool
789 is_reference (tree decl)
790 {
791 return lang_hooks.decls.omp_privatize_by_reference (decl);
792 }
793
794 /* Lookup variables in the decl or field splay trees. The "maybe" form
795 allows for the variable form to not have been entered, otherwise we
796 assert that the variable must have been entered. */
797
798 static inline tree
799 lookup_decl (tree var, omp_context *ctx)
800 {
801 tree *n;
802 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
803 return *n;
804 }
805
806 static inline tree
807 maybe_lookup_decl (const_tree var, omp_context *ctx)
808 {
809 tree *n;
810 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
811 return n ? *n : NULL_TREE;
812 }
813
814 static inline tree
815 lookup_field (tree var, omp_context *ctx)
816 {
817 splay_tree_node n;
818 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
819 return (tree) n->value;
820 }
821
822 static inline tree
823 lookup_sfield (tree var, omp_context *ctx)
824 {
825 splay_tree_node n;
826 n = splay_tree_lookup (ctx->sfield_map
827 ? ctx->sfield_map : ctx->field_map,
828 (splay_tree_key) var);
829 return (tree) n->value;
830 }
831
832 static inline tree
833 maybe_lookup_field (tree var, omp_context *ctx)
834 {
835 splay_tree_node n;
836 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
837 return n ? (tree) n->value : NULL_TREE;
838 }
839
840 /* Return true if DECL should be copied by pointer. SHARED_CTX is
841 the parallel context if DECL is to be shared. */
842
843 static bool
844 use_pointer_for_field (tree decl, omp_context *shared_ctx)
845 {
846 if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
847 return true;
848
849 /* We can only use copy-in/copy-out semantics for shared variables
850 when we know the value is not accessible from an outer scope. */
851 if (shared_ctx)
852 {
853 /* ??? Trivially accessible from anywhere. But why would we even
854 be passing an address in this case? Should we simply assert
855 this to be false, or should we have a cleanup pass that removes
856 these from the list of mappings? */
857 if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
858 return true;
859
860 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
861 without analyzing the expression whether or not its location
862 is accessible to anyone else. In the case of nested parallel
863 regions it certainly may be. */
864 if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
865 return true;
866
867 /* Do not use copy-in/copy-out for variables that have their
868 address taken. */
869 if (TREE_ADDRESSABLE (decl))
870 return true;
871
872 /* lower_send_shared_vars only uses copy-in, but not copy-out
873 for these. */
874 if (TREE_READONLY (decl)
875 || ((TREE_CODE (decl) == RESULT_DECL
876 || TREE_CODE (decl) == PARM_DECL)
877 && DECL_BY_REFERENCE (decl)))
878 return false;
879
880 /* Disallow copy-in/out in nested parallel if
881 decl is shared in outer parallel, otherwise
882 each thread could store the shared variable
883 in its own copy-in location, making the
884 variable no longer really shared. */
885 if (shared_ctx->is_nested)
886 {
887 omp_context *up;
888
889 for (up = shared_ctx->outer; up; up = up->outer)
890 if (is_taskreg_ctx (up) && maybe_lookup_decl (decl, up))
891 break;
892
893 if (up)
894 {
895 tree c;
896
897 for (c = gimple_omp_taskreg_clauses (up->stmt);
898 c; c = OMP_CLAUSE_CHAIN (c))
899 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
900 && OMP_CLAUSE_DECL (c) == decl)
901 break;
902
903 if (c)
904 goto maybe_mark_addressable_and_ret;
905 }
906 }
907
908 /* For tasks avoid using copy-in/out. As tasks can be
909 deferred or executed in different thread, when GOMP_task
910 returns, the task hasn't necessarily terminated. */
911 if (is_task_ctx (shared_ctx))
912 {
913 tree outer;
914 maybe_mark_addressable_and_ret:
915 outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
916 if (is_gimple_reg (outer))
917 {
918 /* Taking address of OUTER in lower_send_shared_vars
919 might need regimplification of everything that uses the
920 variable. */
921 if (!task_shared_vars)
922 task_shared_vars = BITMAP_ALLOC (NULL);
923 bitmap_set_bit (task_shared_vars, DECL_UID (outer));
924 TREE_ADDRESSABLE (outer) = 1;
925 }
926 return true;
927 }
928 }
929
930 return false;
931 }
932
933 /* Construct a new automatic decl similar to VAR. */
934
935 static tree
936 omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
937 {
938 tree copy = copy_var_decl (var, name, type);
939
940 DECL_CONTEXT (copy) = current_function_decl;
941 DECL_CHAIN (copy) = ctx->block_vars;
942 ctx->block_vars = copy;
943
944 return copy;
945 }
946
947 static tree
948 omp_copy_decl_1 (tree var, omp_context *ctx)
949 {
950 return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
951 }
952
953 /* Build COMPONENT_REF and set TREE_THIS_VOLATILE and TREE_READONLY on it
954 as appropriate. */
955 static tree
956 omp_build_component_ref (tree obj, tree field)
957 {
958 tree ret = build3 (COMPONENT_REF, TREE_TYPE (field), obj, field, NULL);
959 if (TREE_THIS_VOLATILE (field))
960 TREE_THIS_VOLATILE (ret) |= 1;
961 if (TREE_READONLY (field))
962 TREE_READONLY (ret) |= 1;
963 return ret;
964 }
965
966 /* Build tree nodes to access the field for VAR on the receiver side. */
967
968 static tree
969 build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
970 {
971 tree x, field = lookup_field (var, ctx);
972
973 /* If the receiver record type was remapped in the child function,
974 remap the field into the new record type. */
975 x = maybe_lookup_field (field, ctx);
976 if (x != NULL)
977 field = x;
978
979 x = build_simple_mem_ref (ctx->receiver_decl);
980 x = omp_build_component_ref (x, field);
981 if (by_ref)
982 x = build_simple_mem_ref (x);
983
984 return x;
985 }
986
987 /* Build tree nodes to access VAR in the scope outer to CTX. In the case
988 of a parallel, this is a component reference; for workshare constructs
989 this is some variable. */
990
991 static tree
992 build_outer_var_ref (tree var, omp_context *ctx)
993 {
994 tree x;
995
996 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
997 x = var;
998 else if (is_variable_sized (var))
999 {
1000 x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
1001 x = build_outer_var_ref (x, ctx);
1002 x = build_simple_mem_ref (x);
1003 }
1004 else if (is_taskreg_ctx (ctx))
1005 {
1006 bool by_ref = use_pointer_for_field (var, NULL);
1007 x = build_receiver_ref (var, by_ref, ctx);
1008 }
1009 else if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
1010 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_KIND_SIMD)
1011 {
1012 /* #pragma omp simd isn't a worksharing construct, and can reference even
1013 private vars in its linear etc. clauses. */
1014 x = NULL_TREE;
1015 if (ctx->outer && is_taskreg_ctx (ctx))
1016 x = lookup_decl (var, ctx->outer);
1017 else if (ctx->outer)
1018 x = maybe_lookup_decl_in_outer_ctx (var, ctx);
1019 if (x == NULL_TREE)
1020 x = var;
1021 }
1022 else if (ctx->outer)
1023 x = lookup_decl (var, ctx->outer);
1024 else if (is_reference (var))
1025 /* This can happen with orphaned constructs. If var is reference, it is
1026 possible it is shared and as such valid. */
1027 x = var;
1028 else
1029 gcc_unreachable ();
1030
1031 if (is_reference (var))
1032 x = build_simple_mem_ref (x);
1033
1034 return x;
1035 }
1036
1037 /* Build tree nodes to access the field for VAR on the sender side. */
1038
1039 static tree
1040 build_sender_ref (tree var, omp_context *ctx)
1041 {
1042 tree field = lookup_sfield (var, ctx);
1043 return omp_build_component_ref (ctx->sender_decl, field);
1044 }
1045
1046 /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
1047
1048 static void
1049 install_var_field (tree var, bool by_ref, int mask, omp_context *ctx)
1050 {
1051 tree field, type, sfield = NULL_TREE;
1052
1053 gcc_assert ((mask & 1) == 0
1054 || !splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
1055 gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
1056 || !splay_tree_lookup (ctx->sfield_map, (splay_tree_key) var));
1057
1058 type = TREE_TYPE (var);
1059 if (mask & 4)
1060 {
1061 gcc_assert (TREE_CODE (type) == ARRAY_TYPE);
1062 type = build_pointer_type (build_pointer_type (type));
1063 }
1064 else if (by_ref)
1065 type = build_pointer_type (type);
1066 else if ((mask & 3) == 1 && is_reference (var))
1067 type = TREE_TYPE (type);
1068
1069 field = build_decl (DECL_SOURCE_LOCATION (var),
1070 FIELD_DECL, DECL_NAME (var), type);
1071
1072 /* Remember what variable this field was created for. This does have a
1073 side effect of making dwarf2out ignore this member, so for helpful
1074 debugging we clear it later in delete_omp_context. */
1075 DECL_ABSTRACT_ORIGIN (field) = var;
1076 if (type == TREE_TYPE (var))
1077 {
1078 DECL_ALIGN (field) = DECL_ALIGN (var);
1079 DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
1080 TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
1081 }
1082 else
1083 DECL_ALIGN (field) = TYPE_ALIGN (type);
1084
1085 if ((mask & 3) == 3)
1086 {
1087 insert_field_into_struct (ctx->record_type, field);
1088 if (ctx->srecord_type)
1089 {
1090 sfield = build_decl (DECL_SOURCE_LOCATION (var),
1091 FIELD_DECL, DECL_NAME (var), type);
1092 DECL_ABSTRACT_ORIGIN (sfield) = var;
1093 DECL_ALIGN (sfield) = DECL_ALIGN (field);
1094 DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
1095 TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
1096 insert_field_into_struct (ctx->srecord_type, sfield);
1097 }
1098 }
1099 else
1100 {
1101 if (ctx->srecord_type == NULL_TREE)
1102 {
1103 tree t;
1104
1105 ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
1106 ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1107 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
1108 {
1109 sfield = build_decl (DECL_SOURCE_LOCATION (var),
1110 FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
1111 DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
1112 insert_field_into_struct (ctx->srecord_type, sfield);
1113 splay_tree_insert (ctx->sfield_map,
1114 (splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
1115 (splay_tree_value) sfield);
1116 }
1117 }
1118 sfield = field;
1119 insert_field_into_struct ((mask & 1) ? ctx->record_type
1120 : ctx->srecord_type, field);
1121 }
1122
1123 if (mask & 1)
1124 splay_tree_insert (ctx->field_map, (splay_tree_key) var,
1125 (splay_tree_value) field);
1126 if ((mask & 2) && ctx->sfield_map)
1127 splay_tree_insert (ctx->sfield_map, (splay_tree_key) var,
1128 (splay_tree_value) sfield);
1129 }
1130
1131 static tree
1132 install_var_local (tree var, omp_context *ctx)
1133 {
1134 tree new_var = omp_copy_decl_1 (var, ctx);
1135 insert_decl_map (&ctx->cb, var, new_var);
1136 return new_var;
1137 }
1138
1139 /* Adjust the replacement for DECL in CTX for the new context. This means
1140 copying the DECL_VALUE_EXPR, and fixing up the type. */
1141
1142 static void
1143 fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
1144 {
1145 tree new_decl, size;
1146
1147 new_decl = lookup_decl (decl, ctx);
1148
1149 TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
1150
1151 if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
1152 && DECL_HAS_VALUE_EXPR_P (decl))
1153 {
1154 tree ve = DECL_VALUE_EXPR (decl);
1155 walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL);
1156 SET_DECL_VALUE_EXPR (new_decl, ve);
1157 DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
1158 }
1159
1160 if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
1161 {
1162 size = remap_decl (DECL_SIZE (decl), &ctx->cb);
1163 if (size == error_mark_node)
1164 size = TYPE_SIZE (TREE_TYPE (new_decl));
1165 DECL_SIZE (new_decl) = size;
1166
1167 size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
1168 if (size == error_mark_node)
1169 size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
1170 DECL_SIZE_UNIT (new_decl) = size;
1171 }
1172 }
1173
1174 /* The callback for remap_decl. Search all containing contexts for a
1175 mapping of the variable; this avoids having to duplicate the splay
1176 tree ahead of time. We know a mapping doesn't already exist in the
1177 given context. Create new mappings to implement default semantics. */
1178
1179 static tree
1180 omp_copy_decl (tree var, copy_body_data *cb)
1181 {
1182 omp_context *ctx = (omp_context *) cb;
1183 tree new_var;
1184
1185 if (TREE_CODE (var) == LABEL_DECL)
1186 {
1187 new_var = create_artificial_label (DECL_SOURCE_LOCATION (var));
1188 DECL_CONTEXT (new_var) = current_function_decl;
1189 insert_decl_map (&ctx->cb, var, new_var);
1190 return new_var;
1191 }
1192
1193 while (!is_taskreg_ctx (ctx))
1194 {
1195 ctx = ctx->outer;
1196 if (ctx == NULL)
1197 return var;
1198 new_var = maybe_lookup_decl (var, ctx);
1199 if (new_var)
1200 return new_var;
1201 }
1202
1203 if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
1204 return var;
1205
1206 return error_mark_node;
1207 }
1208
1209
1210 /* Return the parallel region associated with STMT. */
1211
1212 /* Debugging dumps for parallel regions. */
1213 void dump_omp_region (FILE *, struct omp_region *, int);
1214 void debug_omp_region (struct omp_region *);
1215 void debug_all_omp_regions (void);
1216
1217 /* Dump the parallel region tree rooted at REGION. */
1218
1219 void
1220 dump_omp_region (FILE *file, struct omp_region *region, int indent)
1221 {
1222 fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
1223 gimple_code_name[region->type]);
1224
1225 if (region->inner)
1226 dump_omp_region (file, region->inner, indent + 4);
1227
1228 if (region->cont)
1229 {
1230 fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
1231 region->cont->index);
1232 }
1233
1234 if (region->exit)
1235 fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
1236 region->exit->index);
1237 else
1238 fprintf (file, "%*s[no exit marker]\n", indent, "");
1239
1240 if (region->next)
1241 dump_omp_region (file, region->next, indent);
1242 }
1243
1244 DEBUG_FUNCTION void
1245 debug_omp_region (struct omp_region *region)
1246 {
1247 dump_omp_region (stderr, region, 0);
1248 }
1249
1250 DEBUG_FUNCTION void
1251 debug_all_omp_regions (void)
1252 {
1253 dump_omp_region (stderr, root_omp_region, 0);
1254 }
1255
1256
1257 /* Create a new parallel region starting at STMT inside region PARENT. */
1258
1259 static struct omp_region *
1260 new_omp_region (basic_block bb, enum gimple_code type,
1261 struct omp_region *parent)
1262 {
1263 struct omp_region *region = XCNEW (struct omp_region);
1264
1265 region->outer = parent;
1266 region->entry = bb;
1267 region->type = type;
1268
1269 if (parent)
1270 {
1271 /* This is a nested region. Add it to the list of inner
1272 regions in PARENT. */
1273 region->next = parent->inner;
1274 parent->inner = region;
1275 }
1276 else
1277 {
1278 /* This is a toplevel region. Add it to the list of toplevel
1279 regions in ROOT_OMP_REGION. */
1280 region->next = root_omp_region;
1281 root_omp_region = region;
1282 }
1283
1284 return region;
1285 }
1286
1287 /* Release the memory associated with the region tree rooted at REGION. */
1288
1289 static void
1290 free_omp_region_1 (struct omp_region *region)
1291 {
1292 struct omp_region *i, *n;
1293
1294 for (i = region->inner; i ; i = n)
1295 {
1296 n = i->next;
1297 free_omp_region_1 (i);
1298 }
1299
1300 free (region);
1301 }
1302
1303 /* Release the memory for the entire omp region tree. */
1304
1305 void
1306 free_omp_regions (void)
1307 {
1308 struct omp_region *r, *n;
1309 for (r = root_omp_region; r ; r = n)
1310 {
1311 n = r->next;
1312 free_omp_region_1 (r);
1313 }
1314 root_omp_region = NULL;
1315 }
1316
1317
1318 /* Create a new context, with OUTER_CTX being the surrounding context. */
1319
1320 static omp_context *
1321 new_omp_context (gimple stmt, omp_context *outer_ctx)
1322 {
1323 omp_context *ctx = XCNEW (omp_context);
1324
1325 splay_tree_insert (all_contexts, (splay_tree_key) stmt,
1326 (splay_tree_value) ctx);
1327 ctx->stmt = stmt;
1328
1329 if (outer_ctx)
1330 {
1331 ctx->outer = outer_ctx;
1332 ctx->cb = outer_ctx->cb;
1333 ctx->cb.block = NULL;
1334 ctx->depth = outer_ctx->depth + 1;
1335 }
1336 else
1337 {
1338 ctx->cb.src_fn = current_function_decl;
1339 ctx->cb.dst_fn = current_function_decl;
1340 ctx->cb.src_node = cgraph_get_node (current_function_decl);
1341 gcc_checking_assert (ctx->cb.src_node);
1342 ctx->cb.dst_node = ctx->cb.src_node;
1343 ctx->cb.src_cfun = cfun;
1344 ctx->cb.copy_decl = omp_copy_decl;
1345 ctx->cb.eh_lp_nr = 0;
1346 ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
1347 ctx->depth = 1;
1348 }
1349
1350 ctx->cb.decl_map = pointer_map_create ();
1351
1352 return ctx;
1353 }
1354
1355 static gimple_seq maybe_catch_exception (gimple_seq);
1356
1357 /* Finalize task copyfn. */
1358
1359 static void
1360 finalize_task_copyfn (gimple task_stmt)
1361 {
1362 struct function *child_cfun;
1363 tree child_fn;
1364 gimple_seq seq = NULL, new_seq;
1365 gimple bind;
1366
1367 child_fn = gimple_omp_task_copy_fn (task_stmt);
1368 if (child_fn == NULL_TREE)
1369 return;
1370
1371 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
1372 DECL_STRUCT_FUNCTION (child_fn)->curr_properties = cfun->curr_properties;
1373
1374 push_cfun (child_cfun);
1375 bind = gimplify_body (child_fn, false);
1376 gimple_seq_add_stmt (&seq, bind);
1377 new_seq = maybe_catch_exception (seq);
1378 if (new_seq != seq)
1379 {
1380 bind = gimple_build_bind (NULL, new_seq, NULL);
1381 seq = NULL;
1382 gimple_seq_add_stmt (&seq, bind);
1383 }
1384 gimple_set_body (child_fn, seq);
1385 pop_cfun ();
1386
1387 /* Inform the callgraph about the new function. */
1388 cgraph_add_new_function (child_fn, false);
1389 }
1390
1391 /* Destroy a omp_context data structures. Called through the splay tree
1392 value delete callback. */
1393
1394 static void
1395 delete_omp_context (splay_tree_value value)
1396 {
1397 omp_context *ctx = (omp_context *) value;
1398
1399 pointer_map_destroy (ctx->cb.decl_map);
1400
1401 if (ctx->field_map)
1402 splay_tree_delete (ctx->field_map);
1403 if (ctx->sfield_map)
1404 splay_tree_delete (ctx->sfield_map);
1405
1406 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1407 it produces corrupt debug information. */
1408 if (ctx->record_type)
1409 {
1410 tree t;
1411 for (t = TYPE_FIELDS (ctx->record_type); t ; t = DECL_CHAIN (t))
1412 DECL_ABSTRACT_ORIGIN (t) = NULL;
1413 }
1414 if (ctx->srecord_type)
1415 {
1416 tree t;
1417 for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = DECL_CHAIN (t))
1418 DECL_ABSTRACT_ORIGIN (t) = NULL;
1419 }
1420
1421 if (is_task_ctx (ctx))
1422 finalize_task_copyfn (ctx->stmt);
1423
1424 XDELETE (ctx);
1425 }
1426
1427 /* Fix up RECEIVER_DECL with a type that has been remapped to the child
1428 context. */
1429
1430 static void
1431 fixup_child_record_type (omp_context *ctx)
1432 {
1433 tree f, type = ctx->record_type;
1434
1435 /* ??? It isn't sufficient to just call remap_type here, because
1436 variably_modified_type_p doesn't work the way we expect for
1437 record types. Testing each field for whether it needs remapping
1438 and creating a new record by hand works, however. */
1439 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
1440 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
1441 break;
1442 if (f)
1443 {
1444 tree name, new_fields = NULL;
1445
1446 type = lang_hooks.types.make_type (RECORD_TYPE);
1447 name = DECL_NAME (TYPE_NAME (ctx->record_type));
1448 name = build_decl (DECL_SOURCE_LOCATION (ctx->receiver_decl),
1449 TYPE_DECL, name, type);
1450 TYPE_NAME (type) = name;
1451
1452 for (f = TYPE_FIELDS (ctx->record_type); f ; f = DECL_CHAIN (f))
1453 {
1454 tree new_f = copy_node (f);
1455 DECL_CONTEXT (new_f) = type;
1456 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
1457 DECL_CHAIN (new_f) = new_fields;
1458 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
1459 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
1460 &ctx->cb, NULL);
1461 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
1462 &ctx->cb, NULL);
1463 new_fields = new_f;
1464
1465 /* Arrange to be able to look up the receiver field
1466 given the sender field. */
1467 splay_tree_insert (ctx->field_map, (splay_tree_key) f,
1468 (splay_tree_value) new_f);
1469 }
1470 TYPE_FIELDS (type) = nreverse (new_fields);
1471 layout_type (type);
1472 }
1473
1474 TREE_TYPE (ctx->receiver_decl) = build_pointer_type (type);
1475 }
1476
1477 /* Instantiate decls as necessary in CTX to satisfy the data sharing
1478 specified by CLAUSES. */
1479
1480 static void
1481 scan_sharing_clauses (tree clauses, omp_context *ctx)
1482 {
1483 tree c, decl;
1484 bool scan_array_reductions = false;
1485
1486 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1487 {
1488 bool by_ref;
1489
1490 switch (OMP_CLAUSE_CODE (c))
1491 {
1492 case OMP_CLAUSE_PRIVATE:
1493 decl = OMP_CLAUSE_DECL (c);
1494 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
1495 goto do_private;
1496 else if (!is_variable_sized (decl))
1497 install_var_local (decl, ctx);
1498 break;
1499
1500 case OMP_CLAUSE_SHARED:
1501 /* Ignore shared directives in teams construct. */
1502 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
1503 break;
1504 gcc_assert (is_taskreg_ctx (ctx));
1505 decl = OMP_CLAUSE_DECL (c);
1506 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
1507 || !is_variable_sized (decl));
1508 /* Global variables don't need to be copied,
1509 the receiver side will use them directly. */
1510 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1511 break;
1512 by_ref = use_pointer_for_field (decl, ctx);
1513 if (! TREE_READONLY (decl)
1514 || TREE_ADDRESSABLE (decl)
1515 || by_ref
1516 || is_reference (decl))
1517 {
1518 install_var_field (decl, by_ref, 3, ctx);
1519 install_var_local (decl, ctx);
1520 break;
1521 }
1522 /* We don't need to copy const scalar vars back. */
1523 OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
1524 goto do_private;
1525
1526 case OMP_CLAUSE_LASTPRIVATE:
1527 /* Let the corresponding firstprivate clause create
1528 the variable. */
1529 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1530 break;
1531 /* FALLTHRU */
1532
1533 case OMP_CLAUSE_FIRSTPRIVATE:
1534 case OMP_CLAUSE_REDUCTION:
1535 case OMP_CLAUSE_LINEAR:
1536 decl = OMP_CLAUSE_DECL (c);
1537 do_private:
1538 if (is_variable_sized (decl))
1539 {
1540 if (is_task_ctx (ctx))
1541 install_var_field (decl, false, 1, ctx);
1542 break;
1543 }
1544 else if (is_taskreg_ctx (ctx))
1545 {
1546 bool global
1547 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
1548 by_ref = use_pointer_for_field (decl, NULL);
1549
1550 if (is_task_ctx (ctx)
1551 && (global || by_ref || is_reference (decl)))
1552 {
1553 install_var_field (decl, false, 1, ctx);
1554 if (!global)
1555 install_var_field (decl, by_ref, 2, ctx);
1556 }
1557 else if (!global)
1558 install_var_field (decl, by_ref, 3, ctx);
1559 }
1560 install_var_local (decl, ctx);
1561 break;
1562
1563 case OMP_CLAUSE__LOOPTEMP_:
1564 gcc_assert (is_parallel_ctx (ctx));
1565 decl = OMP_CLAUSE_DECL (c);
1566 install_var_field (decl, false, 3, ctx);
1567 install_var_local (decl, ctx);
1568 break;
1569
1570 case OMP_CLAUSE_COPYPRIVATE:
1571 case OMP_CLAUSE_COPYIN:
1572 decl = OMP_CLAUSE_DECL (c);
1573 by_ref = use_pointer_for_field (decl, NULL);
1574 install_var_field (decl, by_ref, 3, ctx);
1575 break;
1576
1577 case OMP_CLAUSE_DEFAULT:
1578 ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
1579 break;
1580
1581 case OMP_CLAUSE_FINAL:
1582 case OMP_CLAUSE_IF:
1583 case OMP_CLAUSE_NUM_THREADS:
1584 case OMP_CLAUSE_NUM_TEAMS:
1585 case OMP_CLAUSE_THREAD_LIMIT:
1586 case OMP_CLAUSE_DEVICE:
1587 case OMP_CLAUSE_SCHEDULE:
1588 case OMP_CLAUSE_DIST_SCHEDULE:
1589 case OMP_CLAUSE_DEPEND:
1590 if (ctx->outer)
1591 scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
1592 break;
1593
1594 case OMP_CLAUSE_TO:
1595 case OMP_CLAUSE_FROM:
1596 case OMP_CLAUSE_MAP:
1597 if (ctx->outer)
1598 scan_omp_op (&OMP_CLAUSE_SIZE (c), ctx->outer);
1599 decl = OMP_CLAUSE_DECL (c);
1600 /* Global variables with "omp declare target" attribute
1601 don't need to be copied, the receiver side will use them
1602 directly. */
1603 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
1604 && DECL_P (decl)
1605 && is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))
1606 && lookup_attribute ("omp declare target",
1607 DECL_ATTRIBUTES (decl)))
1608 break;
1609 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
1610 && OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_POINTER)
1611 {
1612 /* Ignore OMP_CLAUSE_MAP_POINTER kind for arrays in
1613 #pragma omp target data, there is nothing to map for
1614 those. */
1615 if (gimple_omp_target_kind (ctx->stmt) == GF_OMP_TARGET_KIND_DATA
1616 && !POINTER_TYPE_P (TREE_TYPE (decl)))
1617 break;
1618 }
1619 if (DECL_P (decl))
1620 {
1621 if (DECL_SIZE (decl)
1622 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
1623 {
1624 tree decl2 = DECL_VALUE_EXPR (decl);
1625 gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
1626 decl2 = TREE_OPERAND (decl2, 0);
1627 gcc_assert (DECL_P (decl2));
1628 install_var_field (decl2, true, 3, ctx);
1629 install_var_local (decl2, ctx);
1630 install_var_local (decl, ctx);
1631 }
1632 else
1633 {
1634 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
1635 && OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_POINTER
1636 && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
1637 && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
1638 install_var_field (decl, true, 7, ctx);
1639 else
1640 install_var_field (decl, true, 3, ctx);
1641 if (gimple_omp_target_kind (ctx->stmt)
1642 == GF_OMP_TARGET_KIND_REGION)
1643 install_var_local (decl, ctx);
1644 }
1645 }
1646 else
1647 {
1648 tree base = get_base_address (decl);
1649 tree nc = OMP_CLAUSE_CHAIN (c);
1650 if (DECL_P (base)
1651 && nc != NULL_TREE
1652 && OMP_CLAUSE_CODE (nc) == OMP_CLAUSE_MAP
1653 && OMP_CLAUSE_DECL (nc) == base
1654 && OMP_CLAUSE_MAP_KIND (nc) == OMP_CLAUSE_MAP_POINTER
1655 && integer_zerop (OMP_CLAUSE_SIZE (nc)))
1656 {
1657 OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c) = 1;
1658 OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (nc) = 1;
1659 }
1660 else
1661 {
1662 gcc_assert (!splay_tree_lookup (ctx->field_map,
1663 (splay_tree_key) decl));
1664 tree field
1665 = build_decl (OMP_CLAUSE_LOCATION (c),
1666 FIELD_DECL, NULL_TREE, ptr_type_node);
1667 DECL_ALIGN (field) = TYPE_ALIGN (ptr_type_node);
1668 insert_field_into_struct (ctx->record_type, field);
1669 splay_tree_insert (ctx->field_map, (splay_tree_key) decl,
1670 (splay_tree_value) field);
1671 }
1672 }
1673 break;
1674
1675 case OMP_CLAUSE_NOWAIT:
1676 case OMP_CLAUSE_ORDERED:
1677 case OMP_CLAUSE_COLLAPSE:
1678 case OMP_CLAUSE_UNTIED:
1679 case OMP_CLAUSE_MERGEABLE:
1680 case OMP_CLAUSE_PROC_BIND:
1681 case OMP_CLAUSE_SAFELEN:
1682 break;
1683
1684 case OMP_CLAUSE_ALIGNED:
1685 decl = OMP_CLAUSE_DECL (c);
1686 if (is_global_var (decl)
1687 && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
1688 install_var_local (decl, ctx);
1689 break;
1690
1691 default:
1692 gcc_unreachable ();
1693 }
1694 }
1695
1696 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1697 {
1698 switch (OMP_CLAUSE_CODE (c))
1699 {
1700 case OMP_CLAUSE_LASTPRIVATE:
1701 /* Let the corresponding firstprivate clause create
1702 the variable. */
1703 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1704 scan_array_reductions = true;
1705 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1706 break;
1707 /* FALLTHRU */
1708
1709 case OMP_CLAUSE_PRIVATE:
1710 case OMP_CLAUSE_FIRSTPRIVATE:
1711 case OMP_CLAUSE_REDUCTION:
1712 case OMP_CLAUSE_LINEAR:
1713 decl = OMP_CLAUSE_DECL (c);
1714 if (is_variable_sized (decl))
1715 install_var_local (decl, ctx);
1716 fixup_remapped_decl (decl, ctx,
1717 OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
1718 && OMP_CLAUSE_PRIVATE_DEBUG (c));
1719 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1720 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1721 scan_array_reductions = true;
1722 break;
1723
1724 case OMP_CLAUSE_SHARED:
1725 /* Ignore shared directives in teams construct. */
1726 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
1727 break;
1728 decl = OMP_CLAUSE_DECL (c);
1729 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1730 fixup_remapped_decl (decl, ctx, false);
1731 break;
1732
1733 case OMP_CLAUSE_MAP:
1734 if (gimple_omp_target_kind (ctx->stmt) == GF_OMP_TARGET_KIND_DATA)
1735 break;
1736 decl = OMP_CLAUSE_DECL (c);
1737 if (DECL_P (decl)
1738 && is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))
1739 && lookup_attribute ("omp declare target",
1740 DECL_ATTRIBUTES (decl)))
1741 break;
1742 if (DECL_P (decl))
1743 {
1744 if (OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_POINTER
1745 && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE
1746 && !COMPLETE_TYPE_P (TREE_TYPE (decl)))
1747 {
1748 tree new_decl = lookup_decl (decl, ctx);
1749 TREE_TYPE (new_decl)
1750 = remap_type (TREE_TYPE (decl), &ctx->cb);
1751 }
1752 else if (DECL_SIZE (decl)
1753 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
1754 {
1755 tree decl2 = DECL_VALUE_EXPR (decl);
1756 gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
1757 decl2 = TREE_OPERAND (decl2, 0);
1758 gcc_assert (DECL_P (decl2));
1759 fixup_remapped_decl (decl2, ctx, false);
1760 fixup_remapped_decl (decl, ctx, true);
1761 }
1762 else
1763 fixup_remapped_decl (decl, ctx, false);
1764 }
1765 break;
1766
1767 case OMP_CLAUSE_COPYPRIVATE:
1768 case OMP_CLAUSE_COPYIN:
1769 case OMP_CLAUSE_DEFAULT:
1770 case OMP_CLAUSE_IF:
1771 case OMP_CLAUSE_NUM_THREADS:
1772 case OMP_CLAUSE_NUM_TEAMS:
1773 case OMP_CLAUSE_THREAD_LIMIT:
1774 case OMP_CLAUSE_DEVICE:
1775 case OMP_CLAUSE_SCHEDULE:
1776 case OMP_CLAUSE_DIST_SCHEDULE:
1777 case OMP_CLAUSE_NOWAIT:
1778 case OMP_CLAUSE_ORDERED:
1779 case OMP_CLAUSE_COLLAPSE:
1780 case OMP_CLAUSE_UNTIED:
1781 case OMP_CLAUSE_FINAL:
1782 case OMP_CLAUSE_MERGEABLE:
1783 case OMP_CLAUSE_PROC_BIND:
1784 case OMP_CLAUSE_SAFELEN:
1785 case OMP_CLAUSE_ALIGNED:
1786 case OMP_CLAUSE_DEPEND:
1787 case OMP_CLAUSE__LOOPTEMP_:
1788 case OMP_CLAUSE_TO:
1789 case OMP_CLAUSE_FROM:
1790 break;
1791
1792 default:
1793 gcc_unreachable ();
1794 }
1795 }
1796
1797 if (scan_array_reductions)
1798 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1799 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1800 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1801 {
1802 scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
1803 scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
1804 }
1805 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
1806 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1807 scan_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
1808 }
1809
1810 /* Create a new name for omp child function. Returns an identifier. */
1811
1812 static GTY(()) unsigned int tmp_ompfn_id_num;
1813
1814 static tree
1815 create_omp_child_function_name (bool task_copy)
1816 {
1817 return (clone_function_name (current_function_decl,
1818 task_copy ? "_omp_cpyfn" : "_omp_fn"));
1819 }
1820
1821 /* Build a decl for the omp child function. It'll not contain a body
1822 yet, just the bare decl. */
1823
1824 static void
1825 create_omp_child_function (omp_context *ctx, bool task_copy)
1826 {
1827 tree decl, type, name, t;
1828
1829 name = create_omp_child_function_name (task_copy);
1830 if (task_copy)
1831 type = build_function_type_list (void_type_node, ptr_type_node,
1832 ptr_type_node, NULL_TREE);
1833 else
1834 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
1835
1836 decl = build_decl (gimple_location (ctx->stmt),
1837 FUNCTION_DECL, name, type);
1838
1839 if (!task_copy)
1840 ctx->cb.dst_fn = decl;
1841 else
1842 gimple_omp_task_set_copy_fn (ctx->stmt, decl);
1843
1844 TREE_STATIC (decl) = 1;
1845 TREE_USED (decl) = 1;
1846 DECL_ARTIFICIAL (decl) = 1;
1847 DECL_NAMELESS (decl) = 1;
1848 DECL_IGNORED_P (decl) = 0;
1849 TREE_PUBLIC (decl) = 0;
1850 DECL_UNINLINABLE (decl) = 1;
1851 DECL_EXTERNAL (decl) = 0;
1852 DECL_CONTEXT (decl) = NULL_TREE;
1853 DECL_INITIAL (decl) = make_node (BLOCK);
1854 bool target_p = false;
1855 if (lookup_attribute ("omp declare target",
1856 DECL_ATTRIBUTES (current_function_decl)))
1857 target_p = true;
1858 else
1859 {
1860 omp_context *octx;
1861 for (octx = ctx; octx; octx = octx->outer)
1862 if (gimple_code (octx->stmt) == GIMPLE_OMP_TARGET
1863 && gimple_omp_target_kind (octx->stmt)
1864 == GF_OMP_TARGET_KIND_REGION)
1865 {
1866 target_p = true;
1867 break;
1868 }
1869 }
1870 if (target_p)
1871 DECL_ATTRIBUTES (decl)
1872 = tree_cons (get_identifier ("omp declare target"),
1873 NULL_TREE, DECL_ATTRIBUTES (decl));
1874
1875 t = build_decl (DECL_SOURCE_LOCATION (decl),
1876 RESULT_DECL, NULL_TREE, void_type_node);
1877 DECL_ARTIFICIAL (t) = 1;
1878 DECL_IGNORED_P (t) = 1;
1879 DECL_CONTEXT (t) = decl;
1880 DECL_RESULT (decl) = t;
1881
1882 t = build_decl (DECL_SOURCE_LOCATION (decl),
1883 PARM_DECL, get_identifier (".omp_data_i"), ptr_type_node);
1884 DECL_ARTIFICIAL (t) = 1;
1885 DECL_NAMELESS (t) = 1;
1886 DECL_ARG_TYPE (t) = ptr_type_node;
1887 DECL_CONTEXT (t) = current_function_decl;
1888 TREE_USED (t) = 1;
1889 DECL_ARGUMENTS (decl) = t;
1890 if (!task_copy)
1891 ctx->receiver_decl = t;
1892 else
1893 {
1894 t = build_decl (DECL_SOURCE_LOCATION (decl),
1895 PARM_DECL, get_identifier (".omp_data_o"),
1896 ptr_type_node);
1897 DECL_ARTIFICIAL (t) = 1;
1898 DECL_NAMELESS (t) = 1;
1899 DECL_ARG_TYPE (t) = ptr_type_node;
1900 DECL_CONTEXT (t) = current_function_decl;
1901 TREE_USED (t) = 1;
1902 TREE_ADDRESSABLE (t) = 1;
1903 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
1904 DECL_ARGUMENTS (decl) = t;
1905 }
1906
1907 /* Allocate memory for the function structure. The call to
1908 allocate_struct_function clobbers CFUN, so we need to restore
1909 it afterward. */
1910 push_struct_function (decl);
1911 cfun->function_end_locus = gimple_location (ctx->stmt);
1912 pop_cfun ();
1913 }
1914
1915 /* Callback for walk_gimple_seq. Check if combined parallel
1916 contains gimple_omp_for_combined_into_p OMP_FOR. */
1917
1918 static tree
1919 find_combined_for (gimple_stmt_iterator *gsi_p,
1920 bool *handled_ops_p,
1921 struct walk_stmt_info *wi)
1922 {
1923 gimple stmt = gsi_stmt (*gsi_p);
1924
1925 *handled_ops_p = true;
1926 switch (gimple_code (stmt))
1927 {
1928 WALK_SUBSTMTS;
1929
1930 case GIMPLE_OMP_FOR:
1931 if (gimple_omp_for_combined_into_p (stmt)
1932 && gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_FOR)
1933 {
1934 wi->info = stmt;
1935 return integer_zero_node;
1936 }
1937 break;
1938 default:
1939 break;
1940 }
1941 return NULL;
1942 }
1943
1944 /* Scan an OpenMP parallel directive. */
1945
1946 static void
1947 scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1948 {
1949 omp_context *ctx;
1950 tree name;
1951 gimple stmt = gsi_stmt (*gsi);
1952
1953 /* Ignore parallel directives with empty bodies, unless there
1954 are copyin clauses. */
1955 if (optimize > 0
1956 && empty_body_p (gimple_omp_body (stmt))
1957 && find_omp_clause (gimple_omp_parallel_clauses (stmt),
1958 OMP_CLAUSE_COPYIN) == NULL)
1959 {
1960 gsi_replace (gsi, gimple_build_nop (), false);
1961 return;
1962 }
1963
1964 if (gimple_omp_parallel_combined_p (stmt))
1965 {
1966 gimple for_stmt;
1967 struct walk_stmt_info wi;
1968
1969 memset (&wi, 0, sizeof (wi));
1970 wi.val_only = true;
1971 walk_gimple_seq (gimple_omp_body (stmt),
1972 find_combined_for, NULL, &wi);
1973 for_stmt = (gimple) wi.info;
1974 if (for_stmt)
1975 {
1976 struct omp_for_data fd;
1977 extract_omp_for_data (for_stmt, &fd, NULL);
1978 /* We need two temporaries with fd.loop.v type (istart/iend)
1979 and then (fd.collapse - 1) temporaries with the same
1980 type for count2 ... countN-1 vars if not constant. */
1981 size_t count = 2, i;
1982 tree type = fd.iter_type;
1983 if (fd.collapse > 1
1984 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
1985 count += fd.collapse - 1;
1986 for (i = 0; i < count; i++)
1987 {
1988 tree temp = create_tmp_var (type, NULL);
1989 tree c = build_omp_clause (UNKNOWN_LOCATION,
1990 OMP_CLAUSE__LOOPTEMP_);
1991 OMP_CLAUSE_DECL (c) = temp;
1992 OMP_CLAUSE_CHAIN (c) = gimple_omp_parallel_clauses (stmt);
1993 gimple_omp_parallel_set_clauses (stmt, c);
1994 }
1995 }
1996 }
1997
1998 ctx = new_omp_context (stmt, outer_ctx);
1999 if (taskreg_nesting_level > 1)
2000 ctx->is_nested = true;
2001 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
2002 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
2003 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
2004 name = create_tmp_var_name (".omp_data_s");
2005 name = build_decl (gimple_location (stmt),
2006 TYPE_DECL, name, ctx->record_type);
2007 DECL_ARTIFICIAL (name) = 1;
2008 DECL_NAMELESS (name) = 1;
2009 TYPE_NAME (ctx->record_type) = name;
2010 create_omp_child_function (ctx, false);
2011 gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
2012
2013 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx);
2014 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2015
2016 if (TYPE_FIELDS (ctx->record_type) == NULL)
2017 ctx->record_type = ctx->receiver_decl = NULL;
2018 else
2019 {
2020 layout_type (ctx->record_type);
2021 fixup_child_record_type (ctx);
2022 }
2023 }
2024
2025 /* Scan an OpenMP task directive. */
2026
2027 static void
2028 scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
2029 {
2030 omp_context *ctx;
2031 tree name, t;
2032 gimple stmt = gsi_stmt (*gsi);
2033 location_t loc = gimple_location (stmt);
2034
2035 /* Ignore task directives with empty bodies. */
2036 if (optimize > 0
2037 && empty_body_p (gimple_omp_body (stmt)))
2038 {
2039 gsi_replace (gsi, gimple_build_nop (), false);
2040 return;
2041 }
2042
2043 ctx = new_omp_context (stmt, outer_ctx);
2044 if (taskreg_nesting_level > 1)
2045 ctx->is_nested = true;
2046 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
2047 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
2048 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
2049 name = create_tmp_var_name (".omp_data_s");
2050 name = build_decl (gimple_location (stmt),
2051 TYPE_DECL, name, ctx->record_type);
2052 DECL_ARTIFICIAL (name) = 1;
2053 DECL_NAMELESS (name) = 1;
2054 TYPE_NAME (ctx->record_type) = name;
2055 create_omp_child_function (ctx, false);
2056 gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
2057
2058 scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
2059
2060 if (ctx->srecord_type)
2061 {
2062 name = create_tmp_var_name (".omp_data_a");
2063 name = build_decl (gimple_location (stmt),
2064 TYPE_DECL, name, ctx->srecord_type);
2065 DECL_ARTIFICIAL (name) = 1;
2066 DECL_NAMELESS (name) = 1;
2067 TYPE_NAME (ctx->srecord_type) = name;
2068 create_omp_child_function (ctx, true);
2069 }
2070
2071 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2072
2073 if (TYPE_FIELDS (ctx->record_type) == NULL)
2074 {
2075 ctx->record_type = ctx->receiver_decl = NULL;
2076 t = build_int_cst (long_integer_type_node, 0);
2077 gimple_omp_task_set_arg_size (stmt, t);
2078 t = build_int_cst (long_integer_type_node, 1);
2079 gimple_omp_task_set_arg_align (stmt, t);
2080 }
2081 else
2082 {
2083 tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
2084 /* Move VLA fields to the end. */
2085 p = &TYPE_FIELDS (ctx->record_type);
2086 while (*p)
2087 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
2088 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
2089 {
2090 *q = *p;
2091 *p = TREE_CHAIN (*p);
2092 TREE_CHAIN (*q) = NULL_TREE;
2093 q = &TREE_CHAIN (*q);
2094 }
2095 else
2096 p = &DECL_CHAIN (*p);
2097 *p = vla_fields;
2098 layout_type (ctx->record_type);
2099 fixup_child_record_type (ctx);
2100 if (ctx->srecord_type)
2101 layout_type (ctx->srecord_type);
2102 t = fold_convert_loc (loc, long_integer_type_node,
2103 TYPE_SIZE_UNIT (ctx->record_type));
2104 gimple_omp_task_set_arg_size (stmt, t);
2105 t = build_int_cst (long_integer_type_node,
2106 TYPE_ALIGN_UNIT (ctx->record_type));
2107 gimple_omp_task_set_arg_align (stmt, t);
2108 }
2109 }
2110
2111
2112 /* Scan an OpenMP loop directive. */
2113
2114 static void
2115 scan_omp_for (gimple stmt, omp_context *outer_ctx)
2116 {
2117 omp_context *ctx;
2118 size_t i;
2119
2120 ctx = new_omp_context (stmt, outer_ctx);
2121
2122 scan_sharing_clauses (gimple_omp_for_clauses (stmt), ctx);
2123
2124 scan_omp (gimple_omp_for_pre_body_ptr (stmt), ctx);
2125 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
2126 {
2127 scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx);
2128 scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx);
2129 scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx);
2130 scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx);
2131 }
2132 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2133 }
2134
2135 /* Scan an OpenMP sections directive. */
2136
2137 static void
2138 scan_omp_sections (gimple stmt, omp_context *outer_ctx)
2139 {
2140 omp_context *ctx;
2141
2142 ctx = new_omp_context (stmt, outer_ctx);
2143 scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx);
2144 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2145 }
2146
2147 /* Scan an OpenMP single directive. */
2148
2149 static void
2150 scan_omp_single (gimple stmt, omp_context *outer_ctx)
2151 {
2152 omp_context *ctx;
2153 tree name;
2154
2155 ctx = new_omp_context (stmt, outer_ctx);
2156 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
2157 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
2158 name = create_tmp_var_name (".omp_copy_s");
2159 name = build_decl (gimple_location (stmt),
2160 TYPE_DECL, name, ctx->record_type);
2161 TYPE_NAME (ctx->record_type) = name;
2162
2163 scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx);
2164 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2165
2166 if (TYPE_FIELDS (ctx->record_type) == NULL)
2167 ctx->record_type = NULL;
2168 else
2169 layout_type (ctx->record_type);
2170 }
2171
2172 /* Scan an OpenMP target{, data, update} directive. */
2173
2174 static void
2175 scan_omp_target (gimple stmt, omp_context *outer_ctx)
2176 {
2177 omp_context *ctx;
2178 tree name;
2179 int kind = gimple_omp_target_kind (stmt);
2180
2181 ctx = new_omp_context (stmt, outer_ctx);
2182 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
2183 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
2184 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
2185 name = create_tmp_var_name (".omp_data_t");
2186 name = build_decl (gimple_location (stmt),
2187 TYPE_DECL, name, ctx->record_type);
2188 DECL_ARTIFICIAL (name) = 1;
2189 DECL_NAMELESS (name) = 1;
2190 TYPE_NAME (ctx->record_type) = name;
2191 if (kind == GF_OMP_TARGET_KIND_REGION)
2192 {
2193 create_omp_child_function (ctx, false);
2194 gimple_omp_target_set_child_fn (stmt, ctx->cb.dst_fn);
2195 }
2196
2197 scan_sharing_clauses (gimple_omp_target_clauses (stmt), ctx);
2198 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2199
2200 if (TYPE_FIELDS (ctx->record_type) == NULL)
2201 ctx->record_type = ctx->receiver_decl = NULL;
2202 else
2203 {
2204 TYPE_FIELDS (ctx->record_type)
2205 = nreverse (TYPE_FIELDS (ctx->record_type));
2206 #ifdef ENABLE_CHECKING
2207 tree field;
2208 unsigned int align = DECL_ALIGN (TYPE_FIELDS (ctx->record_type));
2209 for (field = TYPE_FIELDS (ctx->record_type);
2210 field;
2211 field = DECL_CHAIN (field))
2212 gcc_assert (DECL_ALIGN (field) == align);
2213 #endif
2214 layout_type (ctx->record_type);
2215 if (kind == GF_OMP_TARGET_KIND_REGION)
2216 fixup_child_record_type (ctx);
2217 }
2218 }
2219
2220 /* Scan an OpenMP teams directive. */
2221
2222 static void
2223 scan_omp_teams (gimple stmt, omp_context *outer_ctx)
2224 {
2225 omp_context *ctx = new_omp_context (stmt, outer_ctx);
2226 scan_sharing_clauses (gimple_omp_teams_clauses (stmt), ctx);
2227 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2228 }
2229
2230 /* Check OpenMP nesting restrictions. */
2231 static bool
2232 check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
2233 {
2234 if (ctx != NULL)
2235 {
2236 if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
2237 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_KIND_SIMD)
2238 {
2239 error_at (gimple_location (stmt),
2240 "OpenMP constructs may not be nested inside simd region");
2241 return false;
2242 }
2243 else if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
2244 {
2245 if ((gimple_code (stmt) != GIMPLE_OMP_FOR
2246 || (gimple_omp_for_kind (stmt)
2247 != GF_OMP_FOR_KIND_DISTRIBUTE))
2248 && gimple_code (stmt) != GIMPLE_OMP_PARALLEL)
2249 {
2250 error_at (gimple_location (stmt),
2251 "only distribute or parallel constructs are allowed to "
2252 "be closely nested inside teams construct");
2253 return false;
2254 }
2255 }
2256 }
2257 switch (gimple_code (stmt))
2258 {
2259 case GIMPLE_OMP_FOR:
2260 if (gimple_omp_for_kind (stmt) & GF_OMP_FOR_KIND_SIMD)
2261 return true;
2262 if (gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_DISTRIBUTE)
2263 {
2264 if (ctx != NULL && gimple_code (ctx->stmt) != GIMPLE_OMP_TEAMS)
2265 {
2266 error_at (gimple_location (stmt),
2267 "distribute construct must be closely nested inside "
2268 "teams construct");
2269 return false;
2270 }
2271 return true;
2272 }
2273 /* FALLTHRU */
2274 case GIMPLE_CALL:
2275 if (is_gimple_call (stmt)
2276 && (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2277 == BUILT_IN_GOMP_CANCEL
2278 || DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2279 == BUILT_IN_GOMP_CANCELLATION_POINT))
2280 {
2281 const char *bad = NULL;
2282 const char *kind = NULL;
2283 if (ctx == NULL)
2284 {
2285 error_at (gimple_location (stmt), "orphaned %qs construct",
2286 DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2287 == BUILT_IN_GOMP_CANCEL
2288 ? "#pragma omp cancel"
2289 : "#pragma omp cancellation point");
2290 return false;
2291 }
2292 switch (host_integerp (gimple_call_arg (stmt, 0), 0)
2293 ? tree_low_cst (gimple_call_arg (stmt, 0), 0)
2294 : 0)
2295 {
2296 case 1:
2297 if (gimple_code (ctx->stmt) != GIMPLE_OMP_PARALLEL)
2298 bad = "#pragma omp parallel";
2299 else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2300 == BUILT_IN_GOMP_CANCEL
2301 && !integer_zerop (gimple_call_arg (stmt, 1)))
2302 ctx->cancellable = true;
2303 kind = "parallel";
2304 break;
2305 case 2:
2306 if (gimple_code (ctx->stmt) != GIMPLE_OMP_FOR
2307 || gimple_omp_for_kind (ctx->stmt) != GF_OMP_FOR_KIND_FOR)
2308 bad = "#pragma omp for";
2309 else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2310 == BUILT_IN_GOMP_CANCEL
2311 && !integer_zerop (gimple_call_arg (stmt, 1)))
2312 {
2313 ctx->cancellable = true;
2314 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
2315 OMP_CLAUSE_NOWAIT))
2316 warning_at (gimple_location (stmt), 0,
2317 "%<#pragma omp cancel for%> inside "
2318 "%<nowait%> for construct");
2319 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
2320 OMP_CLAUSE_ORDERED))
2321 warning_at (gimple_location (stmt), 0,
2322 "%<#pragma omp cancel for%> inside "
2323 "%<ordered%> for construct");
2324 }
2325 kind = "for";
2326 break;
2327 case 4:
2328 if (gimple_code (ctx->stmt) != GIMPLE_OMP_SECTIONS
2329 && gimple_code (ctx->stmt) != GIMPLE_OMP_SECTION)
2330 bad = "#pragma omp sections";
2331 else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2332 == BUILT_IN_GOMP_CANCEL
2333 && !integer_zerop (gimple_call_arg (stmt, 1)))
2334 {
2335 if (gimple_code (ctx->stmt) == GIMPLE_OMP_SECTIONS)
2336 {
2337 ctx->cancellable = true;
2338 if (find_omp_clause (gimple_omp_sections_clauses
2339 (ctx->stmt),
2340 OMP_CLAUSE_NOWAIT))
2341 warning_at (gimple_location (stmt), 0,
2342 "%<#pragma omp cancel sections%> inside "
2343 "%<nowait%> sections construct");
2344 }
2345 else
2346 {
2347 gcc_assert (ctx->outer
2348 && gimple_code (ctx->outer->stmt)
2349 == GIMPLE_OMP_SECTIONS);
2350 ctx->outer->cancellable = true;
2351 if (find_omp_clause (gimple_omp_sections_clauses
2352 (ctx->outer->stmt),
2353 OMP_CLAUSE_NOWAIT))
2354 warning_at (gimple_location (stmt), 0,
2355 "%<#pragma omp cancel sections%> inside "
2356 "%<nowait%> sections construct");
2357 }
2358 }
2359 kind = "sections";
2360 break;
2361 case 8:
2362 if (gimple_code (ctx->stmt) != GIMPLE_OMP_TASK)
2363 bad = "#pragma omp task";
2364 else
2365 ctx->cancellable = true;
2366 kind = "taskgroup";
2367 break;
2368 default:
2369 error_at (gimple_location (stmt), "invalid arguments");
2370 return false;
2371 }
2372 if (bad)
2373 {
2374 error_at (gimple_location (stmt),
2375 "%<%s %s%> construct not closely nested inside of %qs",
2376 DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2377 == BUILT_IN_GOMP_CANCEL
2378 ? "#pragma omp cancel"
2379 : "#pragma omp cancellation point", kind, bad);
2380 return false;
2381 }
2382 }
2383 /* FALLTHRU */
2384 case GIMPLE_OMP_SECTIONS:
2385 case GIMPLE_OMP_SINGLE:
2386 for (; ctx != NULL; ctx = ctx->outer)
2387 switch (gimple_code (ctx->stmt))
2388 {
2389 case GIMPLE_OMP_FOR:
2390 case GIMPLE_OMP_SECTIONS:
2391 case GIMPLE_OMP_SINGLE:
2392 case GIMPLE_OMP_ORDERED:
2393 case GIMPLE_OMP_MASTER:
2394 case GIMPLE_OMP_TASK:
2395 case GIMPLE_OMP_CRITICAL:
2396 if (is_gimple_call (stmt))
2397 {
2398 if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2399 != BUILT_IN_GOMP_BARRIER)
2400 return true;
2401 error_at (gimple_location (stmt),
2402 "barrier region may not be closely nested inside "
2403 "of work-sharing, critical, ordered, master or "
2404 "explicit task region");
2405 return false;
2406 }
2407 error_at (gimple_location (stmt),
2408 "work-sharing region may not be closely nested inside "
2409 "of work-sharing, critical, ordered, master or explicit "
2410 "task region");
2411 return false;
2412 case GIMPLE_OMP_PARALLEL:
2413 return true;
2414 default:
2415 break;
2416 }
2417 break;
2418 case GIMPLE_OMP_MASTER:
2419 for (; ctx != NULL; ctx = ctx->outer)
2420 switch (gimple_code (ctx->stmt))
2421 {
2422 case GIMPLE_OMP_FOR:
2423 case GIMPLE_OMP_SECTIONS:
2424 case GIMPLE_OMP_SINGLE:
2425 case GIMPLE_OMP_TASK:
2426 error_at (gimple_location (stmt),
2427 "master region may not be closely nested inside "
2428 "of work-sharing or explicit task region");
2429 return false;
2430 case GIMPLE_OMP_PARALLEL:
2431 return true;
2432 default:
2433 break;
2434 }
2435 break;
2436 case GIMPLE_OMP_ORDERED:
2437 for (; ctx != NULL; ctx = ctx->outer)
2438 switch (gimple_code (ctx->stmt))
2439 {
2440 case GIMPLE_OMP_CRITICAL:
2441 case GIMPLE_OMP_TASK:
2442 error_at (gimple_location (stmt),
2443 "ordered region may not be closely nested inside "
2444 "of critical or explicit task region");
2445 return false;
2446 case GIMPLE_OMP_FOR:
2447 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
2448 OMP_CLAUSE_ORDERED) == NULL)
2449 {
2450 error_at (gimple_location (stmt),
2451 "ordered region must be closely nested inside "
2452 "a loop region with an ordered clause");
2453 return false;
2454 }
2455 return true;
2456 case GIMPLE_OMP_PARALLEL:
2457 error_at (gimple_location (stmt),
2458 "ordered region must be closely nested inside "
2459 "a loop region with an ordered clause");
2460 return false;
2461 default:
2462 break;
2463 }
2464 break;
2465 case GIMPLE_OMP_CRITICAL:
2466 for (; ctx != NULL; ctx = ctx->outer)
2467 if (gimple_code (ctx->stmt) == GIMPLE_OMP_CRITICAL
2468 && (gimple_omp_critical_name (stmt)
2469 == gimple_omp_critical_name (ctx->stmt)))
2470 {
2471 error_at (gimple_location (stmt),
2472 "critical region may not be nested inside a critical "
2473 "region with the same name");
2474 return false;
2475 }
2476 break;
2477 case GIMPLE_OMP_TEAMS:
2478 if (ctx == NULL
2479 || gimple_code (ctx->stmt) != GIMPLE_OMP_TARGET
2480 || gimple_omp_target_kind (ctx->stmt) != GF_OMP_TARGET_KIND_REGION)
2481 {
2482 error_at (gimple_location (stmt),
2483 "teams construct not closely nested inside of target "
2484 "region");
2485 return false;
2486 }
2487 break;
2488 default:
2489 break;
2490 }
2491 return true;
2492 }
2493
2494
2495 /* Helper function scan_omp.
2496
2497 Callback for walk_tree or operators in walk_gimple_stmt used to
2498 scan for OpenMP directives in TP. */
2499
2500 static tree
2501 scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
2502 {
2503 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
2504 omp_context *ctx = (omp_context *) wi->info;
2505 tree t = *tp;
2506
2507 switch (TREE_CODE (t))
2508 {
2509 case VAR_DECL:
2510 case PARM_DECL:
2511 case LABEL_DECL:
2512 case RESULT_DECL:
2513 if (ctx)
2514 *tp = remap_decl (t, &ctx->cb);
2515 break;
2516
2517 default:
2518 if (ctx && TYPE_P (t))
2519 *tp = remap_type (t, &ctx->cb);
2520 else if (!DECL_P (t))
2521 {
2522 *walk_subtrees = 1;
2523 if (ctx)
2524 {
2525 tree tem = remap_type (TREE_TYPE (t), &ctx->cb);
2526 if (tem != TREE_TYPE (t))
2527 {
2528 if (TREE_CODE (t) == INTEGER_CST)
2529 *tp = build_int_cst_wide (tem,
2530 TREE_INT_CST_LOW (t),
2531 TREE_INT_CST_HIGH (t));
2532 else
2533 TREE_TYPE (t) = tem;
2534 }
2535 }
2536 }
2537 break;
2538 }
2539
2540 return NULL_TREE;
2541 }
2542
2543 /* Return true if FNDECL is a setjmp or a longjmp. */
2544
2545 static bool
2546 setjmp_or_longjmp_p (const_tree fndecl)
2547 {
2548 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
2549 && (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_SETJMP
2550 || DECL_FUNCTION_CODE (fndecl) == BUILT_IN_LONGJMP))
2551 return true;
2552
2553 tree declname = DECL_NAME (fndecl);
2554 if (!declname)
2555 return false;
2556 const char *name = IDENTIFIER_POINTER (declname);
2557 return !strcmp (name, "setjmp") || !strcmp (name, "longjmp");
2558 }
2559
2560
2561 /* Helper function for scan_omp.
2562
2563 Callback for walk_gimple_stmt used to scan for OpenMP directives in
2564 the current statement in GSI. */
2565
2566 static tree
2567 scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
2568 struct walk_stmt_info *wi)
2569 {
2570 gimple stmt = gsi_stmt (*gsi);
2571 omp_context *ctx = (omp_context *) wi->info;
2572
2573 if (gimple_has_location (stmt))
2574 input_location = gimple_location (stmt);
2575
2576 /* Check the OpenMP nesting restrictions. */
2577 bool remove = false;
2578 if (is_gimple_omp (stmt))
2579 remove = !check_omp_nesting_restrictions (stmt, ctx);
2580 else if (is_gimple_call (stmt))
2581 {
2582 tree fndecl = gimple_call_fndecl (stmt);
2583 if (fndecl)
2584 {
2585 if (setjmp_or_longjmp_p (fndecl)
2586 && ctx
2587 && gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
2588 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_KIND_SIMD)
2589 {
2590 remove = true;
2591 error_at (gimple_location (stmt),
2592 "setjmp/longjmp inside simd construct");
2593 }
2594 else if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
2595 switch (DECL_FUNCTION_CODE (fndecl))
2596 {
2597 case BUILT_IN_GOMP_BARRIER:
2598 case BUILT_IN_GOMP_CANCEL:
2599 case BUILT_IN_GOMP_CANCELLATION_POINT:
2600 case BUILT_IN_GOMP_TASKYIELD:
2601 case BUILT_IN_GOMP_TASKWAIT:
2602 case BUILT_IN_GOMP_TASKGROUP_START:
2603 case BUILT_IN_GOMP_TASKGROUP_END:
2604 remove = !check_omp_nesting_restrictions (stmt, ctx);
2605 break;
2606 default:
2607 break;
2608 }
2609 }
2610 }
2611 if (remove)
2612 {
2613 stmt = gimple_build_nop ();
2614 gsi_replace (gsi, stmt, false);
2615 }
2616
2617 *handled_ops_p = true;
2618
2619 switch (gimple_code (stmt))
2620 {
2621 case GIMPLE_OMP_PARALLEL:
2622 taskreg_nesting_level++;
2623 scan_omp_parallel (gsi, ctx);
2624 taskreg_nesting_level--;
2625 break;
2626
2627 case GIMPLE_OMP_TASK:
2628 taskreg_nesting_level++;
2629 scan_omp_task (gsi, ctx);
2630 taskreg_nesting_level--;
2631 break;
2632
2633 case GIMPLE_OMP_FOR:
2634 scan_omp_for (stmt, ctx);
2635 break;
2636
2637 case GIMPLE_OMP_SECTIONS:
2638 scan_omp_sections (stmt, ctx);
2639 break;
2640
2641 case GIMPLE_OMP_SINGLE:
2642 scan_omp_single (stmt, ctx);
2643 break;
2644
2645 case GIMPLE_OMP_SECTION:
2646 case GIMPLE_OMP_MASTER:
2647 case GIMPLE_OMP_TASKGROUP:
2648 case GIMPLE_OMP_ORDERED:
2649 case GIMPLE_OMP_CRITICAL:
2650 ctx = new_omp_context (stmt, ctx);
2651 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2652 break;
2653
2654 case GIMPLE_OMP_TARGET:
2655 scan_omp_target (stmt, ctx);
2656 break;
2657
2658 case GIMPLE_OMP_TEAMS:
2659 scan_omp_teams (stmt, ctx);
2660 break;
2661
2662 case GIMPLE_BIND:
2663 {
2664 tree var;
2665
2666 *handled_ops_p = false;
2667 if (ctx)
2668 for (var = gimple_bind_vars (stmt); var ; var = DECL_CHAIN (var))
2669 insert_decl_map (&ctx->cb, var, var);
2670 }
2671 break;
2672 default:
2673 *handled_ops_p = false;
2674 break;
2675 }
2676
2677 return NULL_TREE;
2678 }
2679
2680
2681 /* Scan all the statements starting at the current statement. CTX
2682 contains context information about the OpenMP directives and
2683 clauses found during the scan. */
2684
2685 static void
2686 scan_omp (gimple_seq *body_p, omp_context *ctx)
2687 {
2688 location_t saved_location;
2689 struct walk_stmt_info wi;
2690
2691 memset (&wi, 0, sizeof (wi));
2692 wi.info = ctx;
2693 wi.want_locations = true;
2694
2695 saved_location = input_location;
2696 walk_gimple_seq_mod (body_p, scan_omp_1_stmt, scan_omp_1_op, &wi);
2697 input_location = saved_location;
2698 }
2699 \f
2700 /* Re-gimplification and code generation routines. */
2701
2702 /* Build a call to GOMP_barrier. */
2703
2704 static gimple
2705 build_omp_barrier (tree lhs)
2706 {
2707 tree fndecl = builtin_decl_explicit (lhs ? BUILT_IN_GOMP_BARRIER_CANCEL
2708 : BUILT_IN_GOMP_BARRIER);
2709 gimple g = gimple_build_call (fndecl, 0);
2710 if (lhs)
2711 gimple_call_set_lhs (g, lhs);
2712 return g;
2713 }
2714
2715 /* If a context was created for STMT when it was scanned, return it. */
2716
2717 static omp_context *
2718 maybe_lookup_ctx (gimple stmt)
2719 {
2720 splay_tree_node n;
2721 n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
2722 return n ? (omp_context *) n->value : NULL;
2723 }
2724
2725
2726 /* Find the mapping for DECL in CTX or the immediately enclosing
2727 context that has a mapping for DECL.
2728
2729 If CTX is a nested parallel directive, we may have to use the decl
2730 mappings created in CTX's parent context. Suppose that we have the
2731 following parallel nesting (variable UIDs showed for clarity):
2732
2733 iD.1562 = 0;
2734 #omp parallel shared(iD.1562) -> outer parallel
2735 iD.1562 = iD.1562 + 1;
2736
2737 #omp parallel shared (iD.1562) -> inner parallel
2738 iD.1562 = iD.1562 - 1;
2739
2740 Each parallel structure will create a distinct .omp_data_s structure
2741 for copying iD.1562 in/out of the directive:
2742
2743 outer parallel .omp_data_s.1.i -> iD.1562
2744 inner parallel .omp_data_s.2.i -> iD.1562
2745
2746 A shared variable mapping will produce a copy-out operation before
2747 the parallel directive and a copy-in operation after it. So, in
2748 this case we would have:
2749
2750 iD.1562 = 0;
2751 .omp_data_o.1.i = iD.1562;
2752 #omp parallel shared(iD.1562) -> outer parallel
2753 .omp_data_i.1 = &.omp_data_o.1
2754 .omp_data_i.1->i = .omp_data_i.1->i + 1;
2755
2756 .omp_data_o.2.i = iD.1562; -> **
2757 #omp parallel shared(iD.1562) -> inner parallel
2758 .omp_data_i.2 = &.omp_data_o.2
2759 .omp_data_i.2->i = .omp_data_i.2->i - 1;
2760
2761
2762 ** This is a problem. The symbol iD.1562 cannot be referenced
2763 inside the body of the outer parallel region. But since we are
2764 emitting this copy operation while expanding the inner parallel
2765 directive, we need to access the CTX structure of the outer
2766 parallel directive to get the correct mapping:
2767
2768 .omp_data_o.2.i = .omp_data_i.1->i
2769
2770 Since there may be other workshare or parallel directives enclosing
2771 the parallel directive, it may be necessary to walk up the context
2772 parent chain. This is not a problem in general because nested
2773 parallelism happens only rarely. */
2774
2775 static tree
2776 lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2777 {
2778 tree t;
2779 omp_context *up;
2780
2781 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2782 t = maybe_lookup_decl (decl, up);
2783
2784 gcc_assert (!ctx->is_nested || t || is_global_var (decl));
2785
2786 return t ? t : decl;
2787 }
2788
2789
2790 /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
2791 in outer contexts. */
2792
2793 static tree
2794 maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2795 {
2796 tree t = NULL;
2797 omp_context *up;
2798
2799 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2800 t = maybe_lookup_decl (decl, up);
2801
2802 return t ? t : decl;
2803 }
2804
2805
2806 /* Construct the initialization value for reduction CLAUSE. */
2807
2808 tree
2809 omp_reduction_init (tree clause, tree type)
2810 {
2811 location_t loc = OMP_CLAUSE_LOCATION (clause);
2812 switch (OMP_CLAUSE_REDUCTION_CODE (clause))
2813 {
2814 case PLUS_EXPR:
2815 case MINUS_EXPR:
2816 case BIT_IOR_EXPR:
2817 case BIT_XOR_EXPR:
2818 case TRUTH_OR_EXPR:
2819 case TRUTH_ORIF_EXPR:
2820 case TRUTH_XOR_EXPR:
2821 case NE_EXPR:
2822 return build_zero_cst (type);
2823
2824 case MULT_EXPR:
2825 case TRUTH_AND_EXPR:
2826 case TRUTH_ANDIF_EXPR:
2827 case EQ_EXPR:
2828 return fold_convert_loc (loc, type, integer_one_node);
2829
2830 case BIT_AND_EXPR:
2831 return fold_convert_loc (loc, type, integer_minus_one_node);
2832
2833 case MAX_EXPR:
2834 if (SCALAR_FLOAT_TYPE_P (type))
2835 {
2836 REAL_VALUE_TYPE max, min;
2837 if (HONOR_INFINITIES (TYPE_MODE (type)))
2838 {
2839 real_inf (&max);
2840 real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
2841 }
2842 else
2843 real_maxval (&min, 1, TYPE_MODE (type));
2844 return build_real (type, min);
2845 }
2846 else
2847 {
2848 gcc_assert (INTEGRAL_TYPE_P (type));
2849 return TYPE_MIN_VALUE (type);
2850 }
2851
2852 case MIN_EXPR:
2853 if (SCALAR_FLOAT_TYPE_P (type))
2854 {
2855 REAL_VALUE_TYPE max;
2856 if (HONOR_INFINITIES (TYPE_MODE (type)))
2857 real_inf (&max);
2858 else
2859 real_maxval (&max, 0, TYPE_MODE (type));
2860 return build_real (type, max);
2861 }
2862 else
2863 {
2864 gcc_assert (INTEGRAL_TYPE_P (type));
2865 return TYPE_MAX_VALUE (type);
2866 }
2867
2868 default:
2869 gcc_unreachable ();
2870 }
2871 }
2872
2873 /* Return alignment to be assumed for var in CLAUSE, which should be
2874 OMP_CLAUSE_ALIGNED. */
2875
2876 static tree
2877 omp_clause_aligned_alignment (tree clause)
2878 {
2879 if (OMP_CLAUSE_ALIGNED_ALIGNMENT (clause))
2880 return OMP_CLAUSE_ALIGNED_ALIGNMENT (clause);
2881
2882 /* Otherwise return implementation defined alignment. */
2883 unsigned int al = 1;
2884 enum machine_mode mode, vmode;
2885 int vs = targetm.vectorize.autovectorize_vector_sizes ();
2886 if (vs)
2887 vs = 1 << floor_log2 (vs);
2888 static enum mode_class classes[]
2889 = { MODE_INT, MODE_VECTOR_INT, MODE_FLOAT, MODE_VECTOR_FLOAT };
2890 for (int i = 0; i < 4; i += 2)
2891 for (mode = GET_CLASS_NARROWEST_MODE (classes[i]);
2892 mode != VOIDmode;
2893 mode = GET_MODE_WIDER_MODE (mode))
2894 {
2895 vmode = targetm.vectorize.preferred_simd_mode (mode);
2896 if (GET_MODE_CLASS (vmode) != classes[i + 1])
2897 continue;
2898 while (vs
2899 && GET_MODE_SIZE (vmode) < vs
2900 && GET_MODE_2XWIDER_MODE (vmode) != VOIDmode)
2901 vmode = GET_MODE_2XWIDER_MODE (vmode);
2902
2903 tree type = lang_hooks.types.type_for_mode (mode, 1);
2904 if (type == NULL_TREE || TYPE_MODE (type) != mode)
2905 continue;
2906 type = build_vector_type (type, GET_MODE_SIZE (vmode)
2907 / GET_MODE_SIZE (mode));
2908 if (TYPE_MODE (type) != vmode)
2909 continue;
2910 if (TYPE_ALIGN_UNIT (type) > al)
2911 al = TYPE_ALIGN_UNIT (type);
2912 }
2913 return build_int_cst (integer_type_node, al);
2914 }
2915
2916 /* Return maximum possible vectorization factor for the target. */
2917
2918 static int
2919 omp_max_vf (void)
2920 {
2921 if (!optimize
2922 || optimize_debug
2923 || (!flag_tree_loop_vectorize
2924 && (global_options_set.x_flag_tree_loop_vectorize
2925 || global_options_set.x_flag_tree_vectorize)))
2926 return 1;
2927
2928 int vs = targetm.vectorize.autovectorize_vector_sizes ();
2929 if (vs)
2930 {
2931 vs = 1 << floor_log2 (vs);
2932 return vs;
2933 }
2934 enum machine_mode vqimode = targetm.vectorize.preferred_simd_mode (QImode);
2935 if (GET_MODE_CLASS (vqimode) == MODE_VECTOR_INT)
2936 return GET_MODE_NUNITS (vqimode);
2937 return 1;
2938 }
2939
2940 /* Helper function of lower_rec_input_clauses, used for #pragma omp simd
2941 privatization. */
2942
2943 static bool
2944 lower_rec_simd_input_clauses (tree new_var, omp_context *ctx, int &max_vf,
2945 tree &idx, tree &lane, tree &ivar, tree &lvar)
2946 {
2947 if (max_vf == 0)
2948 {
2949 max_vf = omp_max_vf ();
2950 if (max_vf > 1)
2951 {
2952 tree c = find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
2953 OMP_CLAUSE_SAFELEN);
2954 if (c
2955 && compare_tree_int (OMP_CLAUSE_SAFELEN_EXPR (c), max_vf) == -1)
2956 max_vf = tree_low_cst (OMP_CLAUSE_SAFELEN_EXPR (c), 0);
2957 }
2958 if (max_vf > 1)
2959 {
2960 idx = create_tmp_var (unsigned_type_node, NULL);
2961 lane = create_tmp_var (unsigned_type_node, NULL);
2962 }
2963 }
2964 if (max_vf == 1)
2965 return false;
2966
2967 tree atype = build_array_type_nelts (TREE_TYPE (new_var), max_vf);
2968 tree avar = create_tmp_var_raw (atype, NULL);
2969 if (TREE_ADDRESSABLE (new_var))
2970 TREE_ADDRESSABLE (avar) = 1;
2971 DECL_ATTRIBUTES (avar)
2972 = tree_cons (get_identifier ("omp simd array"), NULL,
2973 DECL_ATTRIBUTES (avar));
2974 gimple_add_tmp_var (avar);
2975 ivar = build4 (ARRAY_REF, TREE_TYPE (new_var), avar, idx,
2976 NULL_TREE, NULL_TREE);
2977 lvar = build4 (ARRAY_REF, TREE_TYPE (new_var), avar, lane,
2978 NULL_TREE, NULL_TREE);
2979 if (DECL_P (new_var))
2980 {
2981 SET_DECL_VALUE_EXPR (new_var, lvar);
2982 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2983 }
2984 return true;
2985 }
2986
2987 /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
2988 from the receiver (aka child) side and initializers for REFERENCE_TYPE
2989 private variables. Initialization statements go in ILIST, while calls
2990 to destructors go in DLIST. */
2991
2992 static void
2993 lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
2994 omp_context *ctx, struct omp_for_data *fd)
2995 {
2996 tree c, dtor, copyin_seq, x, ptr;
2997 bool copyin_by_ref = false;
2998 bool lastprivate_firstprivate = false;
2999 bool reduction_omp_orig_ref = false;
3000 int pass;
3001 bool is_simd = (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
3002 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_KIND_SIMD);
3003 int max_vf = 0;
3004 tree lane = NULL_TREE, idx = NULL_TREE;
3005 tree ivar = NULL_TREE, lvar = NULL_TREE;
3006 gimple_seq llist[2] = { NULL, NULL };
3007
3008 copyin_seq = NULL;
3009
3010 /* Set max_vf=1 (which will later enforce safelen=1) in simd loops
3011 with data sharing clauses referencing variable sized vars. That
3012 is unnecessarily hard to support and very unlikely to result in
3013 vectorized code anyway. */
3014 if (is_simd)
3015 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
3016 switch (OMP_CLAUSE_CODE (c))
3017 {
3018 case OMP_CLAUSE_REDUCTION:
3019 case OMP_CLAUSE_PRIVATE:
3020 case OMP_CLAUSE_FIRSTPRIVATE:
3021 case OMP_CLAUSE_LASTPRIVATE:
3022 case OMP_CLAUSE_LINEAR:
3023 if (is_variable_sized (OMP_CLAUSE_DECL (c)))
3024 max_vf = 1;
3025 break;
3026 default:
3027 continue;
3028 }
3029
3030 /* Do all the fixed sized types in the first pass, and the variable sized
3031 types in the second pass. This makes sure that the scalar arguments to
3032 the variable sized types are processed before we use them in the
3033 variable sized operations. */
3034 for (pass = 0; pass < 2; ++pass)
3035 {
3036 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
3037 {
3038 enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
3039 tree var, new_var;
3040 bool by_ref;
3041 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
3042
3043 switch (c_kind)
3044 {
3045 case OMP_CLAUSE_PRIVATE:
3046 if (OMP_CLAUSE_PRIVATE_DEBUG (c))
3047 continue;
3048 break;
3049 case OMP_CLAUSE_SHARED:
3050 /* Ignore shared directives in teams construct. */
3051 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
3052 continue;
3053 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
3054 {
3055 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
3056 continue;
3057 }
3058 case OMP_CLAUSE_FIRSTPRIVATE:
3059 case OMP_CLAUSE_COPYIN:
3060 case OMP_CLAUSE_LINEAR:
3061 break;
3062 case OMP_CLAUSE_REDUCTION:
3063 if (OMP_CLAUSE_REDUCTION_OMP_ORIG_REF (c))
3064 reduction_omp_orig_ref = true;
3065 break;
3066 case OMP_CLAUSE__LOOPTEMP_:
3067 /* Handle _looptemp_ clauses only on parallel. */
3068 if (fd)
3069 continue;
3070 break;
3071 case OMP_CLAUSE_LASTPRIVATE:
3072 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
3073 {
3074 lastprivate_firstprivate = true;
3075 if (pass != 0)
3076 continue;
3077 }
3078 break;
3079 case OMP_CLAUSE_ALIGNED:
3080 if (pass == 0)
3081 continue;
3082 var = OMP_CLAUSE_DECL (c);
3083 if (TREE_CODE (TREE_TYPE (var)) == POINTER_TYPE
3084 && !is_global_var (var))
3085 {
3086 new_var = maybe_lookup_decl (var, ctx);
3087 if (new_var == NULL_TREE)
3088 new_var = maybe_lookup_decl_in_outer_ctx (var, ctx);
3089 x = builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED);
3090 x = build_call_expr_loc (clause_loc, x, 2, new_var,
3091 omp_clause_aligned_alignment (c));
3092 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
3093 x = build2 (MODIFY_EXPR, TREE_TYPE (new_var), new_var, x);
3094 gimplify_and_add (x, ilist);
3095 }
3096 else if (TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE
3097 && is_global_var (var))
3098 {
3099 tree ptype = build_pointer_type (TREE_TYPE (var)), t, t2;
3100 new_var = lookup_decl (var, ctx);
3101 t = maybe_lookup_decl_in_outer_ctx (var, ctx);
3102 t = build_fold_addr_expr_loc (clause_loc, t);
3103 t2 = builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED);
3104 t = build_call_expr_loc (clause_loc, t2, 2, t,
3105 omp_clause_aligned_alignment (c));
3106 t = fold_convert_loc (clause_loc, ptype, t);
3107 x = create_tmp_var (ptype, NULL);
3108 t = build2 (MODIFY_EXPR, ptype, x, t);
3109 gimplify_and_add (t, ilist);
3110 t = build_simple_mem_ref_loc (clause_loc, x);
3111 SET_DECL_VALUE_EXPR (new_var, t);
3112 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
3113 }
3114 continue;
3115 default:
3116 continue;
3117 }
3118
3119 new_var = var = OMP_CLAUSE_DECL (c);
3120 if (c_kind != OMP_CLAUSE_COPYIN)
3121 new_var = lookup_decl (var, ctx);
3122
3123 if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
3124 {
3125 if (pass != 0)
3126 continue;
3127 }
3128 else if (is_variable_sized (var))
3129 {
3130 /* For variable sized types, we need to allocate the
3131 actual storage here. Call alloca and store the
3132 result in the pointer decl that we created elsewhere. */
3133 if (pass == 0)
3134 continue;
3135
3136 if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
3137 {
3138 gimple stmt;
3139 tree tmp, atmp;
3140
3141 ptr = DECL_VALUE_EXPR (new_var);
3142 gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
3143 ptr = TREE_OPERAND (ptr, 0);
3144 gcc_assert (DECL_P (ptr));
3145 x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
3146
3147 /* void *tmp = __builtin_alloca */
3148 atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
3149 stmt = gimple_build_call (atmp, 1, x);
3150 tmp = create_tmp_var_raw (ptr_type_node, NULL);
3151 gimple_add_tmp_var (tmp);
3152 gimple_call_set_lhs (stmt, tmp);
3153
3154 gimple_seq_add_stmt (ilist, stmt);
3155
3156 x = fold_convert_loc (clause_loc, TREE_TYPE (ptr), tmp);
3157 gimplify_assign (ptr, x, ilist);
3158 }
3159 }
3160 else if (is_reference (var))
3161 {
3162 /* For references that are being privatized for Fortran,
3163 allocate new backing storage for the new pointer
3164 variable. This allows us to avoid changing all the
3165 code that expects a pointer to something that expects
3166 a direct variable. */
3167 if (pass == 0)
3168 continue;
3169
3170 x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
3171 if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
3172 {
3173 x = build_receiver_ref (var, false, ctx);
3174 x = build_fold_addr_expr_loc (clause_loc, x);
3175 }
3176 else if (TREE_CONSTANT (x))
3177 {
3178 const char *name = NULL;
3179 if (DECL_NAME (var))
3180 name = IDENTIFIER_POINTER (DECL_NAME (new_var));
3181
3182 x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
3183 name);
3184 gimple_add_tmp_var (x);
3185 TREE_ADDRESSABLE (x) = 1;
3186 x = build_fold_addr_expr_loc (clause_loc, x);
3187 }
3188 else
3189 {
3190 tree atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
3191 x = build_call_expr_loc (clause_loc, atmp, 1, x);
3192 }
3193
3194 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
3195 gimplify_assign (new_var, x, ilist);
3196
3197 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
3198 }
3199 else if (c_kind == OMP_CLAUSE_REDUCTION
3200 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
3201 {
3202 if (pass == 0)
3203 continue;
3204 }
3205 else if (pass != 0)
3206 continue;
3207
3208 switch (OMP_CLAUSE_CODE (c))
3209 {
3210 case OMP_CLAUSE_SHARED:
3211 /* Ignore shared directives in teams construct. */
3212 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
3213 continue;
3214 /* Shared global vars are just accessed directly. */
3215 if (is_global_var (new_var))
3216 break;
3217 /* Set up the DECL_VALUE_EXPR for shared variables now. This
3218 needs to be delayed until after fixup_child_record_type so
3219 that we get the correct type during the dereference. */
3220 by_ref = use_pointer_for_field (var, ctx);
3221 x = build_receiver_ref (var, by_ref, ctx);
3222 SET_DECL_VALUE_EXPR (new_var, x);
3223 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
3224
3225 /* ??? If VAR is not passed by reference, and the variable
3226 hasn't been initialized yet, then we'll get a warning for
3227 the store into the omp_data_s structure. Ideally, we'd be
3228 able to notice this and not store anything at all, but
3229 we're generating code too early. Suppress the warning. */
3230 if (!by_ref)
3231 TREE_NO_WARNING (var) = 1;
3232 break;
3233
3234 case OMP_CLAUSE_LASTPRIVATE:
3235 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
3236 break;
3237 /* FALLTHRU */
3238
3239 case OMP_CLAUSE_PRIVATE:
3240 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE)
3241 x = build_outer_var_ref (var, ctx);
3242 else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
3243 {
3244 if (is_task_ctx (ctx))
3245 x = build_receiver_ref (var, false, ctx);
3246 else
3247 x = build_outer_var_ref (var, ctx);
3248 }
3249 else
3250 x = NULL;
3251 do_private:
3252 tree nx;
3253 nx = lang_hooks.decls.omp_clause_default_ctor (c, new_var, x);
3254 if (is_simd)
3255 {
3256 tree y = lang_hooks.decls.omp_clause_dtor (c, new_var);
3257 if ((TREE_ADDRESSABLE (new_var) || nx || y
3258 || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
3259 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
3260 idx, lane, ivar, lvar))
3261 {
3262 if (nx)
3263 x = lang_hooks.decls.omp_clause_default_ctor
3264 (c, unshare_expr (ivar), x);
3265 if (nx && x)
3266 gimplify_and_add (x, &llist[0]);
3267 if (y)
3268 {
3269 y = lang_hooks.decls.omp_clause_dtor (c, ivar);
3270 if (y)
3271 {
3272 gimple_seq tseq = NULL;
3273
3274 dtor = y;
3275 gimplify_stmt (&dtor, &tseq);
3276 gimple_seq_add_seq (&llist[1], tseq);
3277 }
3278 }
3279 break;
3280 }
3281 }
3282 if (nx)
3283 gimplify_and_add (nx, ilist);
3284 /* FALLTHRU */
3285
3286 do_dtor:
3287 x = lang_hooks.decls.omp_clause_dtor (c, new_var);
3288 if (x)
3289 {
3290 gimple_seq tseq = NULL;
3291
3292 dtor = x;
3293 gimplify_stmt (&dtor, &tseq);
3294 gimple_seq_add_seq (dlist, tseq);
3295 }
3296 break;
3297
3298 case OMP_CLAUSE_LINEAR:
3299 if (!OMP_CLAUSE_LINEAR_NO_COPYIN (c))
3300 goto do_firstprivate;
3301 if (OMP_CLAUSE_LINEAR_NO_COPYOUT (c))
3302 x = NULL;
3303 else
3304 x = build_outer_var_ref (var, ctx);
3305 goto do_private;
3306
3307 case OMP_CLAUSE_FIRSTPRIVATE:
3308 if (is_task_ctx (ctx))
3309 {
3310 if (is_reference (var) || is_variable_sized (var))
3311 goto do_dtor;
3312 else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var,
3313 ctx))
3314 || use_pointer_for_field (var, NULL))
3315 {
3316 x = build_receiver_ref (var, false, ctx);
3317 SET_DECL_VALUE_EXPR (new_var, x);
3318 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
3319 goto do_dtor;
3320 }
3321 }
3322 do_firstprivate:
3323 x = build_outer_var_ref (var, ctx);
3324 if (is_simd)
3325 {
3326 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
3327 && gimple_omp_for_combined_into_p (ctx->stmt))
3328 {
3329 tree stept = POINTER_TYPE_P (TREE_TYPE (x))
3330 ? sizetype : TREE_TYPE (x);
3331 tree t = fold_convert (stept,
3332 OMP_CLAUSE_LINEAR_STEP (c));
3333 tree c = find_omp_clause (clauses,
3334 OMP_CLAUSE__LOOPTEMP_);
3335 gcc_assert (c);
3336 tree l = OMP_CLAUSE_DECL (c);
3337 if (fd->collapse == 1)
3338 {
3339 tree n1 = fd->loop.n1;
3340 tree step = fd->loop.step;
3341 tree itype = TREE_TYPE (l);
3342 if (POINTER_TYPE_P (itype))
3343 itype = signed_type_for (itype);
3344 l = fold_build2 (MINUS_EXPR, itype, l, n1);
3345 if (TYPE_UNSIGNED (itype)
3346 && fd->loop.cond_code == GT_EXPR)
3347 l = fold_build2 (TRUNC_DIV_EXPR, itype,
3348 fold_build1 (NEGATE_EXPR,
3349 itype, l),
3350 fold_build1 (NEGATE_EXPR,
3351 itype, step));
3352 else
3353 l = fold_build2 (TRUNC_DIV_EXPR, itype, l, step);
3354 }
3355 t = fold_build2 (MULT_EXPR, stept,
3356 fold_convert (stept, l), t);
3357 if (POINTER_TYPE_P (TREE_TYPE (x)))
3358 x = fold_build2 (POINTER_PLUS_EXPR,
3359 TREE_TYPE (x), x, t);
3360 else
3361 x = fold_build2 (PLUS_EXPR, TREE_TYPE (x), x, t);
3362 }
3363
3364 if ((OMP_CLAUSE_CODE (c) != OMP_CLAUSE_LINEAR
3365 || TREE_ADDRESSABLE (new_var))
3366 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
3367 idx, lane, ivar, lvar))
3368 {
3369 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR)
3370 {
3371 tree iv = create_tmp_var (TREE_TYPE (new_var), NULL);
3372 x = lang_hooks.decls.omp_clause_copy_ctor (c, iv, x);
3373 gimplify_and_add (x, ilist);
3374 gimple_stmt_iterator gsi
3375 = gsi_start_1 (gimple_omp_body_ptr (ctx->stmt));
3376 gimple g
3377 = gimple_build_assign (unshare_expr (lvar), iv);
3378 gsi_insert_before_without_update (&gsi, g,
3379 GSI_SAME_STMT);
3380 tree stept = POINTER_TYPE_P (TREE_TYPE (x))
3381 ? sizetype : TREE_TYPE (x);
3382 tree t = fold_convert (stept,
3383 OMP_CLAUSE_LINEAR_STEP (c));
3384 enum tree_code code = PLUS_EXPR;
3385 if (POINTER_TYPE_P (TREE_TYPE (new_var)))
3386 code = POINTER_PLUS_EXPR;
3387 g = gimple_build_assign_with_ops (code, iv, iv, t);
3388 gsi_insert_before_without_update (&gsi, g,
3389 GSI_SAME_STMT);
3390 break;
3391 }
3392 x = lang_hooks.decls.omp_clause_copy_ctor
3393 (c, unshare_expr (ivar), x);
3394 gimplify_and_add (x, &llist[0]);
3395 x = lang_hooks.decls.omp_clause_dtor (c, ivar);
3396 if (x)
3397 {
3398 gimple_seq tseq = NULL;
3399
3400 dtor = x;
3401 gimplify_stmt (&dtor, &tseq);
3402 gimple_seq_add_seq (&llist[1], tseq);
3403 }
3404 break;
3405 }
3406 }
3407 x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x);
3408 gimplify_and_add (x, ilist);
3409 goto do_dtor;
3410
3411 case OMP_CLAUSE__LOOPTEMP_:
3412 gcc_assert (is_parallel_ctx (ctx));
3413 x = build_outer_var_ref (var, ctx);
3414 x = build2 (MODIFY_EXPR, TREE_TYPE (new_var), new_var, x);
3415 gimplify_and_add (x, ilist);
3416 break;
3417
3418 case OMP_CLAUSE_COPYIN:
3419 by_ref = use_pointer_for_field (var, NULL);
3420 x = build_receiver_ref (var, by_ref, ctx);
3421 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
3422 append_to_statement_list (x, &copyin_seq);
3423 copyin_by_ref |= by_ref;
3424 break;
3425
3426 case OMP_CLAUSE_REDUCTION:
3427 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
3428 {
3429 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
3430 gimple tseq;
3431 x = build_outer_var_ref (var, ctx);
3432
3433 if (is_reference (var)
3434 && !useless_type_conversion_p (TREE_TYPE (placeholder),
3435 TREE_TYPE (x)))
3436 x = build_fold_addr_expr_loc (clause_loc, x);
3437 SET_DECL_VALUE_EXPR (placeholder, x);
3438 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
3439 tree new_vard = new_var;
3440 if (is_reference (var))
3441 {
3442 gcc_assert (TREE_CODE (new_var) == MEM_REF);
3443 new_vard = TREE_OPERAND (new_var, 0);
3444 gcc_assert (DECL_P (new_vard));
3445 }
3446 if (is_simd
3447 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
3448 idx, lane, ivar, lvar))
3449 {
3450 if (new_vard == new_var)
3451 {
3452 gcc_assert (DECL_VALUE_EXPR (new_var) == lvar);
3453 SET_DECL_VALUE_EXPR (new_var, ivar);
3454 }
3455 else
3456 {
3457 SET_DECL_VALUE_EXPR (new_vard,
3458 build_fold_addr_expr (ivar));
3459 DECL_HAS_VALUE_EXPR_P (new_vard) = 1;
3460 }
3461 x = lang_hooks.decls.omp_clause_default_ctor
3462 (c, unshare_expr (ivar),
3463 build_outer_var_ref (var, ctx));
3464 if (x)
3465 gimplify_and_add (x, &llist[0]);
3466 if (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c))
3467 {
3468 tseq = OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c);
3469 lower_omp (&tseq, ctx);
3470 gimple_seq_add_seq (&llist[0], tseq);
3471 }
3472 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
3473 tseq = OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c);
3474 lower_omp (&tseq, ctx);
3475 gimple_seq_add_seq (&llist[1], tseq);
3476 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
3477 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
3478 if (new_vard == new_var)
3479 SET_DECL_VALUE_EXPR (new_var, lvar);
3480 else
3481 SET_DECL_VALUE_EXPR (new_vard,
3482 build_fold_addr_expr (lvar));
3483 x = lang_hooks.decls.omp_clause_dtor (c, ivar);
3484 if (x)
3485 {
3486 tseq = NULL;
3487 dtor = x;
3488 gimplify_stmt (&dtor, &tseq);
3489 gimple_seq_add_seq (&llist[1], tseq);
3490 }
3491 break;
3492 }
3493 x = lang_hooks.decls.omp_clause_default_ctor
3494 (c, new_var, unshare_expr (x));
3495 if (x)
3496 gimplify_and_add (x, ilist);
3497 if (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c))
3498 {
3499 tseq = OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c);
3500 lower_omp (&tseq, ctx);
3501 gimple_seq_add_seq (ilist, tseq);
3502 }
3503 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
3504 if (is_simd)
3505 {
3506 tseq = OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c);
3507 lower_omp (&tseq, ctx);
3508 gimple_seq_add_seq (dlist, tseq);
3509 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
3510 }
3511 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
3512 goto do_dtor;
3513 }
3514 else
3515 {
3516 x = omp_reduction_init (c, TREE_TYPE (new_var));
3517 gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
3518 if (is_simd
3519 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
3520 idx, lane, ivar, lvar))
3521 {
3522 enum tree_code code = OMP_CLAUSE_REDUCTION_CODE (c);
3523 tree ref = build_outer_var_ref (var, ctx);
3524
3525 gimplify_assign (unshare_expr (ivar), x, &llist[0]);
3526
3527 /* reduction(-:var) sums up the partial results, so it
3528 acts identically to reduction(+:var). */
3529 if (code == MINUS_EXPR)
3530 code = PLUS_EXPR;
3531
3532 x = build2 (code, TREE_TYPE (ref), ref, ivar);
3533 ref = build_outer_var_ref (var, ctx);
3534 gimplify_assign (ref, x, &llist[1]);
3535 }
3536 else
3537 {
3538 gimplify_assign (new_var, x, ilist);
3539 if (is_simd)
3540 gimplify_assign (build_outer_var_ref (var, ctx),
3541 new_var, dlist);
3542 }
3543 }
3544 break;
3545
3546 default:
3547 gcc_unreachable ();
3548 }
3549 }
3550 }
3551
3552 if (lane)
3553 {
3554 tree uid = create_tmp_var (ptr_type_node, "simduid");
3555 /* Don't want uninit warnings on simduid, it is always uninitialized,
3556 but we use it not for the value, but for the DECL_UID only. */
3557 TREE_NO_WARNING (uid) = 1;
3558 gimple g
3559 = gimple_build_call_internal (IFN_GOMP_SIMD_LANE, 1, uid);
3560 gimple_call_set_lhs (g, lane);
3561 gimple_stmt_iterator gsi = gsi_start_1 (gimple_omp_body_ptr (ctx->stmt));
3562 gsi_insert_before_without_update (&gsi, g, GSI_SAME_STMT);
3563 c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__SIMDUID_);
3564 OMP_CLAUSE__SIMDUID__DECL (c) = uid;
3565 OMP_CLAUSE_CHAIN (c) = gimple_omp_for_clauses (ctx->stmt);
3566 gimple_omp_for_set_clauses (ctx->stmt, c);
3567 g = gimple_build_assign_with_ops (INTEGER_CST, lane,
3568 build_int_cst (unsigned_type_node, 0),
3569 NULL_TREE);
3570 gimple_seq_add_stmt (ilist, g);
3571 for (int i = 0; i < 2; i++)
3572 if (llist[i])
3573 {
3574 tree vf = create_tmp_var (unsigned_type_node, NULL);
3575 g = gimple_build_call_internal (IFN_GOMP_SIMD_VF, 1, uid);
3576 gimple_call_set_lhs (g, vf);
3577 gimple_seq *seq = i == 0 ? ilist : dlist;
3578 gimple_seq_add_stmt (seq, g);
3579 tree t = build_int_cst (unsigned_type_node, 0);
3580 g = gimple_build_assign_with_ops (INTEGER_CST, idx, t, NULL_TREE);
3581 gimple_seq_add_stmt (seq, g);
3582 tree body = create_artificial_label (UNKNOWN_LOCATION);
3583 tree header = create_artificial_label (UNKNOWN_LOCATION);
3584 tree end = create_artificial_label (UNKNOWN_LOCATION);
3585 gimple_seq_add_stmt (seq, gimple_build_goto (header));
3586 gimple_seq_add_stmt (seq, gimple_build_label (body));
3587 gimple_seq_add_seq (seq, llist[i]);
3588 t = build_int_cst (unsigned_type_node, 1);
3589 g = gimple_build_assign_with_ops (PLUS_EXPR, idx, idx, t);
3590 gimple_seq_add_stmt (seq, g);
3591 gimple_seq_add_stmt (seq, gimple_build_label (header));
3592 g = gimple_build_cond (LT_EXPR, idx, vf, body, end);
3593 gimple_seq_add_stmt (seq, g);
3594 gimple_seq_add_stmt (seq, gimple_build_label (end));
3595 }
3596 }
3597
3598 /* The copyin sequence is not to be executed by the main thread, since
3599 that would result in self-copies. Perhaps not visible to scalars,
3600 but it certainly is to C++ operator=. */
3601 if (copyin_seq)
3602 {
3603 x = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM),
3604 0);
3605 x = build2 (NE_EXPR, boolean_type_node, x,
3606 build_int_cst (TREE_TYPE (x), 0));
3607 x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
3608 gimplify_and_add (x, ilist);
3609 }
3610
3611 /* If any copyin variable is passed by reference, we must ensure the
3612 master thread doesn't modify it before it is copied over in all
3613 threads. Similarly for variables in both firstprivate and
3614 lastprivate clauses we need to ensure the lastprivate copying
3615 happens after firstprivate copying in all threads. And similarly
3616 for UDRs if initializer expression refers to omp_orig. */
3617 if (copyin_by_ref || lastprivate_firstprivate || reduction_omp_orig_ref)
3618 {
3619 /* Don't add any barrier for #pragma omp simd or
3620 #pragma omp distribute. */
3621 if (gimple_code (ctx->stmt) != GIMPLE_OMP_FOR
3622 || gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_KIND_FOR)
3623 gimple_seq_add_stmt (ilist, build_omp_barrier (NULL_TREE));
3624 }
3625
3626 /* If max_vf is non-zero, then we can use only a vectorization factor
3627 up to the max_vf we chose. So stick it into the safelen clause. */
3628 if (max_vf)
3629 {
3630 tree c = find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
3631 OMP_CLAUSE_SAFELEN);
3632 if (c == NULL_TREE
3633 || compare_tree_int (OMP_CLAUSE_SAFELEN_EXPR (c),
3634 max_vf) == 1)
3635 {
3636 c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE_SAFELEN);
3637 OMP_CLAUSE_SAFELEN_EXPR (c) = build_int_cst (integer_type_node,
3638 max_vf);
3639 OMP_CLAUSE_CHAIN (c) = gimple_omp_for_clauses (ctx->stmt);
3640 gimple_omp_for_set_clauses (ctx->stmt, c);
3641 }
3642 }
3643 }
3644
3645
3646 /* Generate code to implement the LASTPRIVATE clauses. This is used for
3647 both parallel and workshare constructs. PREDICATE may be NULL if it's
3648 always true. */
3649
3650 static void
3651 lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *stmt_list,
3652 omp_context *ctx)
3653 {
3654 tree x, c, label = NULL, orig_clauses = clauses;
3655 bool par_clauses = false;
3656 tree simduid = NULL, lastlane = NULL;
3657
3658 /* Early exit if there are no lastprivate or linear clauses. */
3659 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
3660 if (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_LASTPRIVATE
3661 || (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_LINEAR
3662 && !OMP_CLAUSE_LINEAR_NO_COPYOUT (clauses)))
3663 break;
3664 if (clauses == NULL)
3665 {
3666 /* If this was a workshare clause, see if it had been combined
3667 with its parallel. In that case, look for the clauses on the
3668 parallel statement itself. */
3669 if (is_parallel_ctx (ctx))
3670 return;
3671
3672 ctx = ctx->outer;
3673 if (ctx == NULL || !is_parallel_ctx (ctx))
3674 return;
3675
3676 clauses = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
3677 OMP_CLAUSE_LASTPRIVATE);
3678 if (clauses == NULL)
3679 return;
3680 par_clauses = true;
3681 }
3682
3683 if (predicate)
3684 {
3685 gimple stmt;
3686 tree label_true, arm1, arm2;
3687
3688 label = create_artificial_label (UNKNOWN_LOCATION);
3689 label_true = create_artificial_label (UNKNOWN_LOCATION);
3690 arm1 = TREE_OPERAND (predicate, 0);
3691 arm2 = TREE_OPERAND (predicate, 1);
3692 gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
3693 gimplify_expr (&arm2, stmt_list, NULL, is_gimple_val, fb_rvalue);
3694 stmt = gimple_build_cond (TREE_CODE (predicate), arm1, arm2,
3695 label_true, label);
3696 gimple_seq_add_stmt (stmt_list, stmt);
3697 gimple_seq_add_stmt (stmt_list, gimple_build_label (label_true));
3698 }
3699
3700 if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
3701 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_KIND_SIMD)
3702 {
3703 simduid = find_omp_clause (orig_clauses, OMP_CLAUSE__SIMDUID_);
3704 if (simduid)
3705 simduid = OMP_CLAUSE__SIMDUID__DECL (simduid);
3706 }
3707
3708 for (c = clauses; c ;)
3709 {
3710 tree var, new_var;
3711 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
3712
3713 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
3714 || (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
3715 && !OMP_CLAUSE_LINEAR_NO_COPYOUT (c)))
3716 {
3717 var = OMP_CLAUSE_DECL (c);
3718 new_var = lookup_decl (var, ctx);
3719
3720 if (simduid && DECL_HAS_VALUE_EXPR_P (new_var))
3721 {
3722 tree val = DECL_VALUE_EXPR (new_var);
3723 if (TREE_CODE (val) == ARRAY_REF
3724 && VAR_P (TREE_OPERAND (val, 0))
3725 && lookup_attribute ("omp simd array",
3726 DECL_ATTRIBUTES (TREE_OPERAND (val,
3727 0))))
3728 {
3729 if (lastlane == NULL)
3730 {
3731 lastlane = create_tmp_var (unsigned_type_node, NULL);
3732 gimple g
3733 = gimple_build_call_internal (IFN_GOMP_SIMD_LAST_LANE,
3734 2, simduid,
3735 TREE_OPERAND (val, 1));
3736 gimple_call_set_lhs (g, lastlane);
3737 gimple_seq_add_stmt (stmt_list, g);
3738 }
3739 new_var = build4 (ARRAY_REF, TREE_TYPE (val),
3740 TREE_OPERAND (val, 0), lastlane,
3741 NULL_TREE, NULL_TREE);
3742 }
3743 }
3744
3745 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
3746 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
3747 {
3748 lower_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
3749 gimple_seq_add_seq (stmt_list,
3750 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
3751 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) = NULL;
3752 }
3753
3754 x = build_outer_var_ref (var, ctx);
3755 if (is_reference (var))
3756 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
3757 x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
3758 gimplify_and_add (x, stmt_list);
3759 }
3760 c = OMP_CLAUSE_CHAIN (c);
3761 if (c == NULL && !par_clauses)
3762 {
3763 /* If this was a workshare clause, see if it had been combined
3764 with its parallel. In that case, continue looking for the
3765 clauses also on the parallel statement itself. */
3766 if (is_parallel_ctx (ctx))
3767 break;
3768
3769 ctx = ctx->outer;
3770 if (ctx == NULL || !is_parallel_ctx (ctx))
3771 break;
3772
3773 c = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
3774 OMP_CLAUSE_LASTPRIVATE);
3775 par_clauses = true;
3776 }
3777 }
3778
3779 if (label)
3780 gimple_seq_add_stmt (stmt_list, gimple_build_label (label));
3781 }
3782
3783
3784 /* Generate code to implement the REDUCTION clauses. */
3785
3786 static void
3787 lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
3788 {
3789 gimple_seq sub_seq = NULL;
3790 gimple stmt;
3791 tree x, c;
3792 int count = 0;
3793
3794 /* SIMD reductions are handled in lower_rec_input_clauses. */
3795 if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
3796 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_KIND_SIMD)
3797 return;
3798
3799 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
3800 update in that case, otherwise use a lock. */
3801 for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
3802 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
3803 {
3804 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
3805 {
3806 /* Never use OMP_ATOMIC for array reductions or UDRs. */
3807 count = -1;
3808 break;
3809 }
3810 count++;
3811 }
3812
3813 if (count == 0)
3814 return;
3815
3816 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
3817 {
3818 tree var, ref, new_var;
3819 enum tree_code code;
3820 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
3821
3822 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
3823 continue;
3824
3825 var = OMP_CLAUSE_DECL (c);
3826 new_var = lookup_decl (var, ctx);
3827 if (is_reference (var))
3828 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
3829 ref = build_outer_var_ref (var, ctx);
3830 code = OMP_CLAUSE_REDUCTION_CODE (c);
3831
3832 /* reduction(-:var) sums up the partial results, so it acts
3833 identically to reduction(+:var). */
3834 if (code == MINUS_EXPR)
3835 code = PLUS_EXPR;
3836
3837 if (count == 1)
3838 {
3839 tree addr = build_fold_addr_expr_loc (clause_loc, ref);
3840
3841 addr = save_expr (addr);
3842 ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
3843 x = fold_build2_loc (clause_loc, code, TREE_TYPE (ref), ref, new_var);
3844 x = build2 (OMP_ATOMIC, void_type_node, addr, x);
3845 gimplify_and_add (x, stmt_seqp);
3846 return;
3847 }
3848
3849 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
3850 {
3851 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
3852
3853 if (is_reference (var)
3854 && !useless_type_conversion_p (TREE_TYPE (placeholder),
3855 TREE_TYPE (ref)))
3856 ref = build_fold_addr_expr_loc (clause_loc, ref);
3857 SET_DECL_VALUE_EXPR (placeholder, ref);
3858 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
3859 lower_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
3860 gimple_seq_add_seq (&sub_seq, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
3861 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
3862 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
3863 }
3864 else
3865 {
3866 x = build2 (code, TREE_TYPE (ref), ref, new_var);
3867 ref = build_outer_var_ref (var, ctx);
3868 gimplify_assign (ref, x, &sub_seq);
3869 }
3870 }
3871
3872 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START),
3873 0);
3874 gimple_seq_add_stmt (stmt_seqp, stmt);
3875
3876 gimple_seq_add_seq (stmt_seqp, sub_seq);
3877
3878 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END),
3879 0);
3880 gimple_seq_add_stmt (stmt_seqp, stmt);
3881 }
3882
3883
3884 /* Generate code to implement the COPYPRIVATE clauses. */
3885
3886 static void
3887 lower_copyprivate_clauses (tree clauses, gimple_seq *slist, gimple_seq *rlist,
3888 omp_context *ctx)
3889 {
3890 tree c;
3891
3892 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
3893 {
3894 tree var, new_var, ref, x;
3895 bool by_ref;
3896 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
3897
3898 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
3899 continue;
3900
3901 var = OMP_CLAUSE_DECL (c);
3902 by_ref = use_pointer_for_field (var, NULL);
3903
3904 ref = build_sender_ref (var, ctx);
3905 x = new_var = lookup_decl_in_outer_ctx (var, ctx);
3906 if (by_ref)
3907 {
3908 x = build_fold_addr_expr_loc (clause_loc, new_var);
3909 x = fold_convert_loc (clause_loc, TREE_TYPE (ref), x);
3910 }
3911 gimplify_assign (ref, x, slist);
3912
3913 ref = build_receiver_ref (var, false, ctx);
3914 if (by_ref)
3915 {
3916 ref = fold_convert_loc (clause_loc,
3917 build_pointer_type (TREE_TYPE (new_var)),
3918 ref);
3919 ref = build_fold_indirect_ref_loc (clause_loc, ref);
3920 }
3921 if (is_reference (var))
3922 {
3923 ref = fold_convert_loc (clause_loc, TREE_TYPE (new_var), ref);
3924 ref = build_simple_mem_ref_loc (clause_loc, ref);
3925 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
3926 }
3927 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, ref);
3928 gimplify_and_add (x, rlist);
3929 }
3930 }
3931
3932
3933 /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
3934 and REDUCTION from the sender (aka parent) side. */
3935
3936 static void
3937 lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
3938 omp_context *ctx)
3939 {
3940 tree c;
3941
3942 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
3943 {
3944 tree val, ref, x, var;
3945 bool by_ref, do_in = false, do_out = false;
3946 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
3947
3948 switch (OMP_CLAUSE_CODE (c))
3949 {
3950 case OMP_CLAUSE_PRIVATE:
3951 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
3952 break;
3953 continue;
3954 case OMP_CLAUSE_FIRSTPRIVATE:
3955 case OMP_CLAUSE_COPYIN:
3956 case OMP_CLAUSE_LASTPRIVATE:
3957 case OMP_CLAUSE_REDUCTION:
3958 case OMP_CLAUSE__LOOPTEMP_:
3959 break;
3960 default:
3961 continue;
3962 }
3963
3964 val = OMP_CLAUSE_DECL (c);
3965 var = lookup_decl_in_outer_ctx (val, ctx);
3966
3967 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
3968 && is_global_var (var))
3969 continue;
3970 if (is_variable_sized (val))
3971 continue;
3972 by_ref = use_pointer_for_field (val, NULL);
3973
3974 switch (OMP_CLAUSE_CODE (c))
3975 {
3976 case OMP_CLAUSE_PRIVATE:
3977 case OMP_CLAUSE_FIRSTPRIVATE:
3978 case OMP_CLAUSE_COPYIN:
3979 case OMP_CLAUSE__LOOPTEMP_:
3980 do_in = true;
3981 break;
3982
3983 case OMP_CLAUSE_LASTPRIVATE:
3984 if (by_ref || is_reference (val))
3985 {
3986 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
3987 continue;
3988 do_in = true;
3989 }
3990 else
3991 {
3992 do_out = true;
3993 if (lang_hooks.decls.omp_private_outer_ref (val))
3994 do_in = true;
3995 }
3996 break;
3997
3998 case OMP_CLAUSE_REDUCTION:
3999 do_in = true;
4000 do_out = !(by_ref || is_reference (val));
4001 break;
4002
4003 default:
4004 gcc_unreachable ();
4005 }
4006
4007 if (do_in)
4008 {
4009 ref = build_sender_ref (val, ctx);
4010 x = by_ref ? build_fold_addr_expr_loc (clause_loc, var) : var;
4011 gimplify_assign (ref, x, ilist);
4012 if (is_task_ctx (ctx))
4013 DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL;
4014 }
4015
4016 if (do_out)
4017 {
4018 ref = build_sender_ref (val, ctx);
4019 gimplify_assign (var, ref, olist);
4020 }
4021 }
4022 }
4023
4024 /* Generate code to implement SHARED from the sender (aka parent)
4025 side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
4026 list things that got automatically shared. */
4027
4028 static void
4029 lower_send_shared_vars (gimple_seq *ilist, gimple_seq *olist, omp_context *ctx)
4030 {
4031 tree var, ovar, nvar, f, x, record_type;
4032
4033 if (ctx->record_type == NULL)
4034 return;
4035
4036 record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type;
4037 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
4038 {
4039 ovar = DECL_ABSTRACT_ORIGIN (f);
4040 nvar = maybe_lookup_decl (ovar, ctx);
4041 if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
4042 continue;
4043
4044 /* If CTX is a nested parallel directive. Find the immediately
4045 enclosing parallel or workshare construct that contains a
4046 mapping for OVAR. */
4047 var = lookup_decl_in_outer_ctx (ovar, ctx);
4048
4049 if (use_pointer_for_field (ovar, ctx))
4050 {
4051 x = build_sender_ref (ovar, ctx);
4052 var = build_fold_addr_expr (var);
4053 gimplify_assign (x, var, ilist);
4054 }
4055 else
4056 {
4057 x = build_sender_ref (ovar, ctx);
4058 gimplify_assign (x, var, ilist);
4059
4060 if (!TREE_READONLY (var)
4061 /* We don't need to receive a new reference to a result
4062 or parm decl. In fact we may not store to it as we will
4063 invalidate any pending RSO and generate wrong gimple
4064 during inlining. */
4065 && !((TREE_CODE (var) == RESULT_DECL
4066 || TREE_CODE (var) == PARM_DECL)
4067 && DECL_BY_REFERENCE (var)))
4068 {
4069 x = build_sender_ref (ovar, ctx);
4070 gimplify_assign (var, x, olist);
4071 }
4072 }
4073 }
4074 }
4075
4076
4077 /* A convenience function to build an empty GIMPLE_COND with just the
4078 condition. */
4079
4080 static gimple
4081 gimple_build_cond_empty (tree cond)
4082 {
4083 enum tree_code pred_code;
4084 tree lhs, rhs;
4085
4086 gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs);
4087 return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE);
4088 }
4089
4090
4091 /* Build the function calls to GOMP_parallel_start etc to actually
4092 generate the parallel operation. REGION is the parallel region
4093 being expanded. BB is the block where to insert the code. WS_ARGS
4094 will be set if this is a call to a combined parallel+workshare
4095 construct, it contains the list of additional arguments needed by
4096 the workshare construct. */
4097
4098 static void
4099 expand_parallel_call (struct omp_region *region, basic_block bb,
4100 gimple entry_stmt, vec<tree, va_gc> *ws_args)
4101 {
4102 tree t, t1, t2, val, cond, c, clauses, flags;
4103 gimple_stmt_iterator gsi;
4104 gimple stmt;
4105 enum built_in_function start_ix;
4106 int start_ix2;
4107 location_t clause_loc;
4108 vec<tree, va_gc> *args;
4109
4110 clauses = gimple_omp_parallel_clauses (entry_stmt);
4111
4112 /* Determine what flavor of GOMP_parallel we will be
4113 emitting. */
4114 start_ix = BUILT_IN_GOMP_PARALLEL;
4115 if (is_combined_parallel (region))
4116 {
4117 switch (region->inner->type)
4118 {
4119 case GIMPLE_OMP_FOR:
4120 gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
4121 start_ix2 = ((int)BUILT_IN_GOMP_PARALLEL_LOOP_STATIC
4122 + (region->inner->sched_kind
4123 == OMP_CLAUSE_SCHEDULE_RUNTIME
4124 ? 3 : region->inner->sched_kind));
4125 start_ix = (enum built_in_function)start_ix2;
4126 break;
4127 case GIMPLE_OMP_SECTIONS:
4128 start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS;
4129 break;
4130 default:
4131 gcc_unreachable ();
4132 }
4133 }
4134
4135 /* By default, the value of NUM_THREADS is zero (selected at run time)
4136 and there is no conditional. */
4137 cond = NULL_TREE;
4138 val = build_int_cst (unsigned_type_node, 0);
4139 flags = build_int_cst (unsigned_type_node, 0);
4140
4141 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
4142 if (c)
4143 cond = OMP_CLAUSE_IF_EXPR (c);
4144
4145 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
4146 if (c)
4147 {
4148 val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
4149 clause_loc = OMP_CLAUSE_LOCATION (c);
4150 }
4151 else
4152 clause_loc = gimple_location (entry_stmt);
4153
4154 c = find_omp_clause (clauses, OMP_CLAUSE_PROC_BIND);
4155 if (c)
4156 flags = build_int_cst (unsigned_type_node, OMP_CLAUSE_PROC_BIND_KIND (c));
4157
4158 /* Ensure 'val' is of the correct type. */
4159 val = fold_convert_loc (clause_loc, unsigned_type_node, val);
4160
4161 /* If we found the clause 'if (cond)', build either
4162 (cond != 0) or (cond ? val : 1u). */
4163 if (cond)
4164 {
4165 gimple_stmt_iterator gsi;
4166
4167 cond = gimple_boolify (cond);
4168
4169 if (integer_zerop (val))
4170 val = fold_build2_loc (clause_loc,
4171 EQ_EXPR, unsigned_type_node, cond,
4172 build_int_cst (TREE_TYPE (cond), 0));
4173 else
4174 {
4175 basic_block cond_bb, then_bb, else_bb;
4176 edge e, e_then, e_else;
4177 tree tmp_then, tmp_else, tmp_join, tmp_var;
4178
4179 tmp_var = create_tmp_var (TREE_TYPE (val), NULL);
4180 if (gimple_in_ssa_p (cfun))
4181 {
4182 tmp_then = make_ssa_name (tmp_var, NULL);
4183 tmp_else = make_ssa_name (tmp_var, NULL);
4184 tmp_join = make_ssa_name (tmp_var, NULL);
4185 }
4186 else
4187 {
4188 tmp_then = tmp_var;
4189 tmp_else = tmp_var;
4190 tmp_join = tmp_var;
4191 }
4192
4193 e = split_block (bb, NULL);
4194 cond_bb = e->src;
4195 bb = e->dest;
4196 remove_edge (e);
4197
4198 then_bb = create_empty_bb (cond_bb);
4199 else_bb = create_empty_bb (then_bb);
4200 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
4201 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
4202
4203 stmt = gimple_build_cond_empty (cond);
4204 gsi = gsi_start_bb (cond_bb);
4205 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4206
4207 gsi = gsi_start_bb (then_bb);
4208 stmt = gimple_build_assign (tmp_then, val);
4209 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4210
4211 gsi = gsi_start_bb (else_bb);
4212 stmt = gimple_build_assign
4213 (tmp_else, build_int_cst (unsigned_type_node, 1));
4214 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4215
4216 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
4217 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
4218 if (current_loops)
4219 {
4220 add_bb_to_loop (then_bb, cond_bb->loop_father);
4221 add_bb_to_loop (else_bb, cond_bb->loop_father);
4222 }
4223 e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
4224 e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
4225
4226 if (gimple_in_ssa_p (cfun))
4227 {
4228 gimple phi = create_phi_node (tmp_join, bb);
4229 add_phi_arg (phi, tmp_then, e_then, UNKNOWN_LOCATION);
4230 add_phi_arg (phi, tmp_else, e_else, UNKNOWN_LOCATION);
4231 }
4232
4233 val = tmp_join;
4234 }
4235
4236 gsi = gsi_start_bb (bb);
4237 val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE,
4238 false, GSI_CONTINUE_LINKING);
4239 }
4240
4241 gsi = gsi_last_bb (bb);
4242 t = gimple_omp_parallel_data_arg (entry_stmt);
4243 if (t == NULL)
4244 t1 = null_pointer_node;
4245 else
4246 t1 = build_fold_addr_expr (t);
4247 t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
4248
4249 vec_alloc (args, 4 + vec_safe_length (ws_args));
4250 args->quick_push (t2);
4251 args->quick_push (t1);
4252 args->quick_push (val);
4253 if (ws_args)
4254 args->splice (*ws_args);
4255 args->quick_push (flags);
4256
4257 t = build_call_expr_loc_vec (UNKNOWN_LOCATION,
4258 builtin_decl_explicit (start_ix), args);
4259
4260 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4261 false, GSI_CONTINUE_LINKING);
4262 }
4263
4264
4265 /* Build the function call to GOMP_task to actually
4266 generate the task operation. BB is the block where to insert the code. */
4267
4268 static void
4269 expand_task_call (basic_block bb, gimple entry_stmt)
4270 {
4271 tree t, t1, t2, t3, flags, cond, c, c2, clauses, depend;
4272 gimple_stmt_iterator gsi;
4273 location_t loc = gimple_location (entry_stmt);
4274
4275 clauses = gimple_omp_task_clauses (entry_stmt);
4276
4277 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
4278 if (c)
4279 cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (c));
4280 else
4281 cond = boolean_true_node;
4282
4283 c = find_omp_clause (clauses, OMP_CLAUSE_UNTIED);
4284 c2 = find_omp_clause (clauses, OMP_CLAUSE_MERGEABLE);
4285 depend = find_omp_clause (clauses, OMP_CLAUSE_DEPEND);
4286 flags = build_int_cst (unsigned_type_node,
4287 (c ? 1 : 0) + (c2 ? 4 : 0) + (depend ? 8 : 0));
4288
4289 c = find_omp_clause (clauses, OMP_CLAUSE_FINAL);
4290 if (c)
4291 {
4292 c = gimple_boolify (OMP_CLAUSE_FINAL_EXPR (c));
4293 c = fold_build3_loc (loc, COND_EXPR, unsigned_type_node, c,
4294 build_int_cst (unsigned_type_node, 2),
4295 build_int_cst (unsigned_type_node, 0));
4296 flags = fold_build2_loc (loc, PLUS_EXPR, unsigned_type_node, flags, c);
4297 }
4298 if (depend)
4299 depend = OMP_CLAUSE_DECL (depend);
4300 else
4301 depend = build_int_cst (ptr_type_node, 0);
4302
4303 gsi = gsi_last_bb (bb);
4304 t = gimple_omp_task_data_arg (entry_stmt);
4305 if (t == NULL)
4306 t2 = null_pointer_node;
4307 else
4308 t2 = build_fold_addr_expr_loc (loc, t);
4309 t1 = build_fold_addr_expr_loc (loc, gimple_omp_task_child_fn (entry_stmt));
4310 t = gimple_omp_task_copy_fn (entry_stmt);
4311 if (t == NULL)
4312 t3 = null_pointer_node;
4313 else
4314 t3 = build_fold_addr_expr_loc (loc, t);
4315
4316 t = build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_TASK),
4317 8, t1, t2, t3,
4318 gimple_omp_task_arg_size (entry_stmt),
4319 gimple_omp_task_arg_align (entry_stmt), cond, flags,
4320 depend);
4321
4322 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4323 false, GSI_CONTINUE_LINKING);
4324 }
4325
4326
4327 /* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
4328 catch handler and return it. This prevents programs from violating the
4329 structured block semantics with throws. */
4330
4331 static gimple_seq
4332 maybe_catch_exception (gimple_seq body)
4333 {
4334 gimple g;
4335 tree decl;
4336
4337 if (!flag_exceptions)
4338 return body;
4339
4340 if (lang_hooks.eh_protect_cleanup_actions != NULL)
4341 decl = lang_hooks.eh_protect_cleanup_actions ();
4342 else
4343 decl = builtin_decl_explicit (BUILT_IN_TRAP);
4344
4345 g = gimple_build_eh_must_not_throw (decl);
4346 g = gimple_build_try (body, gimple_seq_alloc_with_stmt (g),
4347 GIMPLE_TRY_CATCH);
4348
4349 return gimple_seq_alloc_with_stmt (g);
4350 }
4351
4352 /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
4353
4354 static tree
4355 vec2chain (vec<tree, va_gc> *v)
4356 {
4357 tree chain = NULL_TREE, t;
4358 unsigned ix;
4359
4360 FOR_EACH_VEC_SAFE_ELT_REVERSE (v, ix, t)
4361 {
4362 DECL_CHAIN (t) = chain;
4363 chain = t;
4364 }
4365
4366 return chain;
4367 }
4368
4369
4370 /* Remove barriers in REGION->EXIT's block. Note that this is only
4371 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
4372 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
4373 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
4374 removed. */
4375
4376 static void
4377 remove_exit_barrier (struct omp_region *region)
4378 {
4379 gimple_stmt_iterator gsi;
4380 basic_block exit_bb;
4381 edge_iterator ei;
4382 edge e;
4383 gimple stmt;
4384 int any_addressable_vars = -1;
4385
4386 exit_bb = region->exit;
4387
4388 /* If the parallel region doesn't return, we don't have REGION->EXIT
4389 block at all. */
4390 if (! exit_bb)
4391 return;
4392
4393 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
4394 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
4395 statements that can appear in between are extremely limited -- no
4396 memory operations at all. Here, we allow nothing at all, so the
4397 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
4398 gsi = gsi_last_bb (exit_bb);
4399 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
4400 gsi_prev (&gsi);
4401 if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
4402 return;
4403
4404 FOR_EACH_EDGE (e, ei, exit_bb->preds)
4405 {
4406 gsi = gsi_last_bb (e->src);
4407 if (gsi_end_p (gsi))
4408 continue;
4409 stmt = gsi_stmt (gsi);
4410 if (gimple_code (stmt) == GIMPLE_OMP_RETURN
4411 && !gimple_omp_return_nowait_p (stmt))
4412 {
4413 /* OpenMP 3.0 tasks unfortunately prevent this optimization
4414 in many cases. If there could be tasks queued, the barrier
4415 might be needed to let the tasks run before some local
4416 variable of the parallel that the task uses as shared
4417 runs out of scope. The task can be spawned either
4418 from within current function (this would be easy to check)
4419 or from some function it calls and gets passed an address
4420 of such a variable. */
4421 if (any_addressable_vars < 0)
4422 {
4423 gimple parallel_stmt = last_stmt (region->entry);
4424 tree child_fun = gimple_omp_parallel_child_fn (parallel_stmt);
4425 tree local_decls, block, decl;
4426 unsigned ix;
4427
4428 any_addressable_vars = 0;
4429 FOR_EACH_LOCAL_DECL (DECL_STRUCT_FUNCTION (child_fun), ix, decl)
4430 if (TREE_ADDRESSABLE (decl))
4431 {
4432 any_addressable_vars = 1;
4433 break;
4434 }
4435 for (block = gimple_block (stmt);
4436 !any_addressable_vars
4437 && block
4438 && TREE_CODE (block) == BLOCK;
4439 block = BLOCK_SUPERCONTEXT (block))
4440 {
4441 for (local_decls = BLOCK_VARS (block);
4442 local_decls;
4443 local_decls = DECL_CHAIN (local_decls))
4444 if (TREE_ADDRESSABLE (local_decls))
4445 {
4446 any_addressable_vars = 1;
4447 break;
4448 }
4449 if (block == gimple_block (parallel_stmt))
4450 break;
4451 }
4452 }
4453 if (!any_addressable_vars)
4454 gimple_omp_return_set_nowait (stmt);
4455 }
4456 }
4457 }
4458
4459 static void
4460 remove_exit_barriers (struct omp_region *region)
4461 {
4462 if (region->type == GIMPLE_OMP_PARALLEL)
4463 remove_exit_barrier (region);
4464
4465 if (region->inner)
4466 {
4467 region = region->inner;
4468 remove_exit_barriers (region);
4469 while (region->next)
4470 {
4471 region = region->next;
4472 remove_exit_barriers (region);
4473 }
4474 }
4475 }
4476
4477 /* Optimize omp_get_thread_num () and omp_get_num_threads ()
4478 calls. These can't be declared as const functions, but
4479 within one parallel body they are constant, so they can be
4480 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
4481 which are declared const. Similarly for task body, except
4482 that in untied task omp_get_thread_num () can change at any task
4483 scheduling point. */
4484
4485 static void
4486 optimize_omp_library_calls (gimple entry_stmt)
4487 {
4488 basic_block bb;
4489 gimple_stmt_iterator gsi;
4490 tree thr_num_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
4491 tree thr_num_id = DECL_ASSEMBLER_NAME (thr_num_tree);
4492 tree num_thr_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
4493 tree num_thr_id = DECL_ASSEMBLER_NAME (num_thr_tree);
4494 bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK
4495 && find_omp_clause (gimple_omp_task_clauses (entry_stmt),
4496 OMP_CLAUSE_UNTIED) != NULL);
4497
4498 FOR_EACH_BB (bb)
4499 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4500 {
4501 gimple call = gsi_stmt (gsi);
4502 tree decl;
4503
4504 if (is_gimple_call (call)
4505 && (decl = gimple_call_fndecl (call))
4506 && DECL_EXTERNAL (decl)
4507 && TREE_PUBLIC (decl)
4508 && DECL_INITIAL (decl) == NULL)
4509 {
4510 tree built_in;
4511
4512 if (DECL_NAME (decl) == thr_num_id)
4513 {
4514 /* In #pragma omp task untied omp_get_thread_num () can change
4515 during the execution of the task region. */
4516 if (untied_task)
4517 continue;
4518 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
4519 }
4520 else if (DECL_NAME (decl) == num_thr_id)
4521 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
4522 else
4523 continue;
4524
4525 if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
4526 || gimple_call_num_args (call) != 0)
4527 continue;
4528
4529 if (flag_exceptions && !TREE_NOTHROW (decl))
4530 continue;
4531
4532 if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
4533 || !types_compatible_p (TREE_TYPE (TREE_TYPE (decl)),
4534 TREE_TYPE (TREE_TYPE (built_in))))
4535 continue;
4536
4537 gimple_call_set_fndecl (call, built_in);
4538 }
4539 }
4540 }
4541
4542 /* Callback for expand_omp_build_assign. Return non-NULL if *tp needs to be
4543 regimplified. */
4544
4545 static tree
4546 expand_omp_regimplify_p (tree *tp, int *walk_subtrees, void *)
4547 {
4548 tree t = *tp;
4549
4550 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
4551 if (TREE_CODE (t) == VAR_DECL && DECL_HAS_VALUE_EXPR_P (t))
4552 return t;
4553
4554 if (TREE_CODE (t) == ADDR_EXPR)
4555 recompute_tree_invariant_for_addr_expr (t);
4556
4557 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
4558 return NULL_TREE;
4559 }
4560
4561 /* Prepend TO = FROM assignment before *GSI_P. */
4562
4563 static void
4564 expand_omp_build_assign (gimple_stmt_iterator *gsi_p, tree to, tree from)
4565 {
4566 bool simple_p = DECL_P (to) && TREE_ADDRESSABLE (to);
4567 from = force_gimple_operand_gsi (gsi_p, from, simple_p, NULL_TREE,
4568 true, GSI_SAME_STMT);
4569 gimple stmt = gimple_build_assign (to, from);
4570 gsi_insert_before (gsi_p, stmt, GSI_SAME_STMT);
4571 if (walk_tree (&from, expand_omp_regimplify_p, NULL, NULL)
4572 || walk_tree (&to, expand_omp_regimplify_p, NULL, NULL))
4573 {
4574 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
4575 gimple_regimplify_operands (stmt, &gsi);
4576 }
4577 }
4578
4579 /* Expand the OpenMP parallel or task directive starting at REGION. */
4580
4581 static void
4582 expand_omp_taskreg (struct omp_region *region)
4583 {
4584 basic_block entry_bb, exit_bb, new_bb;
4585 struct function *child_cfun;
4586 tree child_fn, block, t;
4587 gimple_stmt_iterator gsi;
4588 gimple entry_stmt, stmt;
4589 edge e;
4590 vec<tree, va_gc> *ws_args;
4591
4592 entry_stmt = last_stmt (region->entry);
4593 child_fn = gimple_omp_taskreg_child_fn (entry_stmt);
4594 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
4595
4596 entry_bb = region->entry;
4597 exit_bb = region->exit;
4598
4599 if (is_combined_parallel (region))
4600 ws_args = region->ws_args;
4601 else
4602 ws_args = NULL;
4603
4604 if (child_cfun->cfg)
4605 {
4606 /* Due to inlining, it may happen that we have already outlined
4607 the region, in which case all we need to do is make the
4608 sub-graph unreachable and emit the parallel call. */
4609 edge entry_succ_e, exit_succ_e;
4610 gimple_stmt_iterator gsi;
4611
4612 entry_succ_e = single_succ_edge (entry_bb);
4613
4614 gsi = gsi_last_bb (entry_bb);
4615 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL
4616 || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK);
4617 gsi_remove (&gsi, true);
4618
4619 new_bb = entry_bb;
4620 if (exit_bb)
4621 {
4622 exit_succ_e = single_succ_edge (exit_bb);
4623 make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
4624 }
4625 remove_edge_and_dominated_blocks (entry_succ_e);
4626 }
4627 else
4628 {
4629 unsigned srcidx, dstidx, num;
4630
4631 /* If the parallel region needs data sent from the parent
4632 function, then the very first statement (except possible
4633 tree profile counter updates) of the parallel body
4634 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
4635 &.OMP_DATA_O is passed as an argument to the child function,
4636 we need to replace it with the argument as seen by the child
4637 function.
4638
4639 In most cases, this will end up being the identity assignment
4640 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
4641 a function call that has been inlined, the original PARM_DECL
4642 .OMP_DATA_I may have been converted into a different local
4643 variable. In which case, we need to keep the assignment. */
4644 if (gimple_omp_taskreg_data_arg (entry_stmt))
4645 {
4646 basic_block entry_succ_bb = single_succ (entry_bb);
4647 gimple_stmt_iterator gsi;
4648 tree arg, narg;
4649 gimple parcopy_stmt = NULL;
4650
4651 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
4652 {
4653 gimple stmt;
4654
4655 gcc_assert (!gsi_end_p (gsi));
4656 stmt = gsi_stmt (gsi);
4657 if (gimple_code (stmt) != GIMPLE_ASSIGN)
4658 continue;
4659
4660 if (gimple_num_ops (stmt) == 2)
4661 {
4662 tree arg = gimple_assign_rhs1 (stmt);
4663
4664 /* We're ignore the subcode because we're
4665 effectively doing a STRIP_NOPS. */
4666
4667 if (TREE_CODE (arg) == ADDR_EXPR
4668 && TREE_OPERAND (arg, 0)
4669 == gimple_omp_taskreg_data_arg (entry_stmt))
4670 {
4671 parcopy_stmt = stmt;
4672 break;
4673 }
4674 }
4675 }
4676
4677 gcc_assert (parcopy_stmt != NULL);
4678 arg = DECL_ARGUMENTS (child_fn);
4679
4680 if (!gimple_in_ssa_p (cfun))
4681 {
4682 if (gimple_assign_lhs (parcopy_stmt) == arg)
4683 gsi_remove (&gsi, true);
4684 else
4685 {
4686 /* ?? Is setting the subcode really necessary ?? */
4687 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg));
4688 gimple_assign_set_rhs1 (parcopy_stmt, arg);
4689 }
4690 }
4691 else
4692 {
4693 /* If we are in ssa form, we must load the value from the default
4694 definition of the argument. That should not be defined now,
4695 since the argument is not used uninitialized. */
4696 gcc_assert (ssa_default_def (cfun, arg) == NULL);
4697 narg = make_ssa_name (arg, gimple_build_nop ());
4698 set_ssa_default_def (cfun, arg, narg);
4699 /* ?? Is setting the subcode really necessary ?? */
4700 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (narg));
4701 gimple_assign_set_rhs1 (parcopy_stmt, narg);
4702 update_stmt (parcopy_stmt);
4703 }
4704 }
4705
4706 /* Declare local variables needed in CHILD_CFUN. */
4707 block = DECL_INITIAL (child_fn);
4708 BLOCK_VARS (block) = vec2chain (child_cfun->local_decls);
4709 /* The gimplifier could record temporaries in parallel/task block
4710 rather than in containing function's local_decls chain,
4711 which would mean cgraph missed finalizing them. Do it now. */
4712 for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t))
4713 if (TREE_CODE (t) == VAR_DECL
4714 && TREE_STATIC (t)
4715 && !DECL_EXTERNAL (t))
4716 varpool_finalize_decl (t);
4717 DECL_SAVED_TREE (child_fn) = NULL;
4718 /* We'll create a CFG for child_fn, so no gimple body is needed. */
4719 gimple_set_body (child_fn, NULL);
4720 TREE_USED (block) = 1;
4721
4722 /* Reset DECL_CONTEXT on function arguments. */
4723 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
4724 DECL_CONTEXT (t) = child_fn;
4725
4726 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
4727 so that it can be moved to the child function. */
4728 gsi = gsi_last_bb (entry_bb);
4729 stmt = gsi_stmt (gsi);
4730 gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
4731 || gimple_code (stmt) == GIMPLE_OMP_TASK));
4732 gsi_remove (&gsi, true);
4733 e = split_block (entry_bb, stmt);
4734 entry_bb = e->dest;
4735 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4736
4737 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
4738 if (exit_bb)
4739 {
4740 gsi = gsi_last_bb (exit_bb);
4741 gcc_assert (!gsi_end_p (gsi)
4742 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
4743 stmt = gimple_build_return (NULL);
4744 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
4745 gsi_remove (&gsi, true);
4746 }
4747
4748 /* Move the parallel region into CHILD_CFUN. */
4749
4750 if (gimple_in_ssa_p (cfun))
4751 {
4752 init_tree_ssa (child_cfun);
4753 init_ssa_operands (child_cfun);
4754 child_cfun->gimple_df->in_ssa_p = true;
4755 block = NULL_TREE;
4756 }
4757 else
4758 block = gimple_block (entry_stmt);
4759
4760 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
4761 if (exit_bb)
4762 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
4763 /* When the OMP expansion process cannot guarantee an up-to-date
4764 loop tree arrange for the child function to fixup loops. */
4765 if (loops_state_satisfies_p (LOOPS_NEED_FIXUP))
4766 child_cfun->x_current_loops->state |= LOOPS_NEED_FIXUP;
4767
4768 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
4769 num = vec_safe_length (child_cfun->local_decls);
4770 for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
4771 {
4772 t = (*child_cfun->local_decls)[srcidx];
4773 if (DECL_CONTEXT (t) == cfun->decl)
4774 continue;
4775 if (srcidx != dstidx)
4776 (*child_cfun->local_decls)[dstidx] = t;
4777 dstidx++;
4778 }
4779 if (dstidx != num)
4780 vec_safe_truncate (child_cfun->local_decls, dstidx);
4781
4782 /* Inform the callgraph about the new function. */
4783 DECL_STRUCT_FUNCTION (child_fn)->curr_properties = cfun->curr_properties;
4784 cgraph_add_new_function (child_fn, true);
4785
4786 /* Fix the callgraph edges for child_cfun. Those for cfun will be
4787 fixed in a following pass. */
4788 push_cfun (child_cfun);
4789 if (optimize)
4790 optimize_omp_library_calls (entry_stmt);
4791 rebuild_cgraph_edges ();
4792
4793 /* Some EH regions might become dead, see PR34608. If
4794 pass_cleanup_cfg isn't the first pass to happen with the
4795 new child, these dead EH edges might cause problems.
4796 Clean them up now. */
4797 if (flag_exceptions)
4798 {
4799 basic_block bb;
4800 bool changed = false;
4801
4802 FOR_EACH_BB (bb)
4803 changed |= gimple_purge_dead_eh_edges (bb);
4804 if (changed)
4805 cleanup_tree_cfg ();
4806 }
4807 if (gimple_in_ssa_p (cfun))
4808 update_ssa (TODO_update_ssa);
4809 pop_cfun ();
4810 }
4811
4812 /* Emit a library call to launch the children threads. */
4813 if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
4814 expand_parallel_call (region, new_bb, entry_stmt, ws_args);
4815 else
4816 expand_task_call (new_bb, entry_stmt);
4817 if (gimple_in_ssa_p (cfun))
4818 update_ssa (TODO_update_ssa_only_virtuals);
4819 }
4820
4821
4822 /* Helper function for expand_omp_{for_*,simd}. If this is the outermost
4823 of the combined collapse > 1 loop constructs, generate code like:
4824 if (__builtin_expect (N32 cond3 N31, 0)) goto ZERO_ITER_BB;
4825 if (cond3 is <)
4826 adj = STEP3 - 1;
4827 else
4828 adj = STEP3 + 1;
4829 count3 = (adj + N32 - N31) / STEP3;
4830 if (__builtin_expect (N22 cond2 N21, 0)) goto ZERO_ITER_BB;
4831 if (cond2 is <)
4832 adj = STEP2 - 1;
4833 else
4834 adj = STEP2 + 1;
4835 count2 = (adj + N22 - N21) / STEP2;
4836 if (__builtin_expect (N12 cond1 N11, 0)) goto ZERO_ITER_BB;
4837 if (cond1 is <)
4838 adj = STEP1 - 1;
4839 else
4840 adj = STEP1 + 1;
4841 count1 = (adj + N12 - N11) / STEP1;
4842 count = count1 * count2 * count3;
4843 Furthermore, if ZERO_ITER_BB is NULL, create a BB which does:
4844 count = 0;
4845 and set ZERO_ITER_BB to that bb. If this isn't the outermost
4846 of the combined loop constructs, just initialize COUNTS array
4847 from the _looptemp_ clauses. */
4848
4849 /* NOTE: It *could* be better to moosh all of the BBs together,
4850 creating one larger BB with all the computation and the unexpected
4851 jump at the end. I.e.
4852
4853 bool zero3, zero2, zero1, zero;
4854
4855 zero3 = N32 c3 N31;
4856 count3 = (N32 - N31) /[cl] STEP3;
4857 zero2 = N22 c2 N21;
4858 count2 = (N22 - N21) /[cl] STEP2;
4859 zero1 = N12 c1 N11;
4860 count1 = (N12 - N11) /[cl] STEP1;
4861 zero = zero3 || zero2 || zero1;
4862 count = count1 * count2 * count3;
4863 if (__builtin_expect(zero, false)) goto zero_iter_bb;
4864
4865 After all, we expect the zero=false, and thus we expect to have to
4866 evaluate all of the comparison expressions, so short-circuiting
4867 oughtn't be a win. Since the condition isn't protecting a
4868 denominator, we're not concerned about divide-by-zero, so we can
4869 fully evaluate count even if a numerator turned out to be wrong.
4870
4871 It seems like putting this all together would create much better
4872 scheduling opportunities, and less pressure on the chip's branch
4873 predictor. */
4874
4875 static void
4876 expand_omp_for_init_counts (struct omp_for_data *fd, gimple_stmt_iterator *gsi,
4877 basic_block &entry_bb, tree *counts,
4878 basic_block &zero_iter_bb, int &first_zero_iter,
4879 basic_block &l2_dom_bb)
4880 {
4881 tree t, type = TREE_TYPE (fd->loop.v);
4882 gimple stmt;
4883 edge e, ne;
4884 int i;
4885
4886 /* Collapsed loops need work for expansion into SSA form. */
4887 gcc_assert (!gimple_in_ssa_p (cfun));
4888
4889 if (gimple_omp_for_combined_into_p (fd->for_stmt)
4890 && TREE_CODE (fd->loop.n2) != INTEGER_CST)
4891 {
4892 /* First two _looptemp_ clauses are for istart/iend, counts[0]
4893 isn't supposed to be handled, as the inner loop doesn't
4894 use it. */
4895 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
4896 OMP_CLAUSE__LOOPTEMP_);
4897 gcc_assert (innerc);
4898 for (i = 0; i < fd->collapse; i++)
4899 {
4900 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
4901 OMP_CLAUSE__LOOPTEMP_);
4902 gcc_assert (innerc);
4903 if (i)
4904 counts[i] = OMP_CLAUSE_DECL (innerc);
4905 else
4906 counts[0] = NULL_TREE;
4907 }
4908 return;
4909 }
4910
4911 for (i = 0; i < fd->collapse; i++)
4912 {
4913 tree itype = TREE_TYPE (fd->loops[i].v);
4914
4915 if (SSA_VAR_P (fd->loop.n2)
4916 && ((t = fold_binary (fd->loops[i].cond_code, boolean_type_node,
4917 fold_convert (itype, fd->loops[i].n1),
4918 fold_convert (itype, fd->loops[i].n2)))
4919 == NULL_TREE || !integer_onep (t)))
4920 {
4921 tree n1, n2;
4922 n1 = fold_convert (itype, unshare_expr (fd->loops[i].n1));
4923 n1 = force_gimple_operand_gsi (gsi, n1, true, NULL_TREE,
4924 true, GSI_SAME_STMT);
4925 n2 = fold_convert (itype, unshare_expr (fd->loops[i].n2));
4926 n2 = force_gimple_operand_gsi (gsi, n2, true, NULL_TREE,
4927 true, GSI_SAME_STMT);
4928 stmt = gimple_build_cond (fd->loops[i].cond_code, n1, n2,
4929 NULL_TREE, NULL_TREE);
4930 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
4931 if (walk_tree (gimple_cond_lhs_ptr (stmt),
4932 expand_omp_regimplify_p, NULL, NULL)
4933 || walk_tree (gimple_cond_rhs_ptr (stmt),
4934 expand_omp_regimplify_p, NULL, NULL))
4935 {
4936 *gsi = gsi_for_stmt (stmt);
4937 gimple_regimplify_operands (stmt, gsi);
4938 }
4939 e = split_block (entry_bb, stmt);
4940 if (zero_iter_bb == NULL)
4941 {
4942 first_zero_iter = i;
4943 zero_iter_bb = create_empty_bb (entry_bb);
4944 if (current_loops)
4945 add_bb_to_loop (zero_iter_bb, entry_bb->loop_father);
4946 *gsi = gsi_after_labels (zero_iter_bb);
4947 stmt = gimple_build_assign (fd->loop.n2,
4948 build_zero_cst (type));
4949 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
4950 set_immediate_dominator (CDI_DOMINATORS, zero_iter_bb,
4951 entry_bb);
4952 }
4953 ne = make_edge (entry_bb, zero_iter_bb, EDGE_FALSE_VALUE);
4954 ne->probability = REG_BR_PROB_BASE / 2000 - 1;
4955 e->flags = EDGE_TRUE_VALUE;
4956 e->probability = REG_BR_PROB_BASE - ne->probability;
4957 if (l2_dom_bb == NULL)
4958 l2_dom_bb = entry_bb;
4959 entry_bb = e->dest;
4960 *gsi = gsi_last_bb (entry_bb);
4961 }
4962
4963 if (POINTER_TYPE_P (itype))
4964 itype = signed_type_for (itype);
4965 t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR
4966 ? -1 : 1));
4967 t = fold_build2 (PLUS_EXPR, itype,
4968 fold_convert (itype, fd->loops[i].step), t);
4969 t = fold_build2 (PLUS_EXPR, itype, t,
4970 fold_convert (itype, fd->loops[i].n2));
4971 t = fold_build2 (MINUS_EXPR, itype, t,
4972 fold_convert (itype, fd->loops[i].n1));
4973 /* ?? We could probably use CEIL_DIV_EXPR instead of
4974 TRUNC_DIV_EXPR and adjusting by hand. Unless we can't
4975 generate the same code in the end because generically we
4976 don't know that the values involved must be negative for
4977 GT?? */
4978 if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
4979 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4980 fold_build1 (NEGATE_EXPR, itype, t),
4981 fold_build1 (NEGATE_EXPR, itype,
4982 fold_convert (itype,
4983 fd->loops[i].step)));
4984 else
4985 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
4986 fold_convert (itype, fd->loops[i].step));
4987 t = fold_convert (type, t);
4988 if (TREE_CODE (t) == INTEGER_CST)
4989 counts[i] = t;
4990 else
4991 {
4992 counts[i] = create_tmp_reg (type, ".count");
4993 expand_omp_build_assign (gsi, counts[i], t);
4994 }
4995 if (SSA_VAR_P (fd->loop.n2))
4996 {
4997 if (i == 0)
4998 t = counts[0];
4999 else
5000 t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]);
5001 expand_omp_build_assign (gsi, fd->loop.n2, t);
5002 }
5003 }
5004 }
5005
5006
5007 /* Helper function for expand_omp_{for_*,simd}. Generate code like:
5008 T = V;
5009 V3 = N31 + (T % count3) * STEP3;
5010 T = T / count3;
5011 V2 = N21 + (T % count2) * STEP2;
5012 T = T / count2;
5013 V1 = N11 + T * STEP1;
5014 if this loop doesn't have an inner loop construct combined with it.
5015 If it does have an inner loop construct combined with it and the
5016 iteration count isn't known constant, store values from counts array
5017 into its _looptemp_ temporaries instead. */
5018
5019 static void
5020 expand_omp_for_init_vars (struct omp_for_data *fd, gimple_stmt_iterator *gsi,
5021 tree *counts, gimple inner_stmt, tree startvar)
5022 {
5023 int i;
5024 if (gimple_omp_for_combined_p (fd->for_stmt))
5025 {
5026 /* If fd->loop.n2 is constant, then no propagation of the counts
5027 is needed, they are constant. */
5028 if (TREE_CODE (fd->loop.n2) == INTEGER_CST)
5029 return;
5030
5031 tree clauses = gimple_code (inner_stmt) == GIMPLE_OMP_PARALLEL
5032 ? gimple_omp_parallel_clauses (inner_stmt)
5033 : gimple_omp_for_clauses (inner_stmt);
5034 /* First two _looptemp_ clauses are for istart/iend, counts[0]
5035 isn't supposed to be handled, as the inner loop doesn't
5036 use it. */
5037 tree innerc = find_omp_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
5038 gcc_assert (innerc);
5039 for (i = 0; i < fd->collapse; i++)
5040 {
5041 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
5042 OMP_CLAUSE__LOOPTEMP_);
5043 gcc_assert (innerc);
5044 if (i)
5045 {
5046 tree tem = OMP_CLAUSE_DECL (innerc);
5047 tree t = fold_convert (TREE_TYPE (tem), counts[i]);
5048 t = force_gimple_operand_gsi (gsi, t, false, NULL_TREE,
5049 false, GSI_CONTINUE_LINKING);
5050 gimple stmt = gimple_build_assign (tem, t);
5051 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
5052 }
5053 }
5054 return;
5055 }
5056
5057 tree type = TREE_TYPE (fd->loop.v);
5058 tree tem = create_tmp_reg (type, ".tem");
5059 gimple stmt = gimple_build_assign (tem, startvar);
5060 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
5061
5062 for (i = fd->collapse - 1; i >= 0; i--)
5063 {
5064 tree vtype = TREE_TYPE (fd->loops[i].v), itype, t;
5065 itype = vtype;
5066 if (POINTER_TYPE_P (vtype))
5067 itype = signed_type_for (vtype);
5068 if (i != 0)
5069 t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]);
5070 else
5071 t = tem;
5072 t = fold_convert (itype, t);
5073 t = fold_build2 (MULT_EXPR, itype, t,
5074 fold_convert (itype, fd->loops[i].step));
5075 if (POINTER_TYPE_P (vtype))
5076 t = fold_build_pointer_plus (fd->loops[i].n1, t);
5077 else
5078 t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t);
5079 t = force_gimple_operand_gsi (gsi, t,
5080 DECL_P (fd->loops[i].v)
5081 && TREE_ADDRESSABLE (fd->loops[i].v),
5082 NULL_TREE, false,
5083 GSI_CONTINUE_LINKING);
5084 stmt = gimple_build_assign (fd->loops[i].v, t);
5085 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
5086 if (i != 0)
5087 {
5088 t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]);
5089 t = force_gimple_operand_gsi (gsi, t, false, NULL_TREE,
5090 false, GSI_CONTINUE_LINKING);
5091 stmt = gimple_build_assign (tem, t);
5092 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
5093 }
5094 }
5095 }
5096
5097
5098 /* Helper function for expand_omp_for_*. Generate code like:
5099 L10:
5100 V3 += STEP3;
5101 if (V3 cond3 N32) goto BODY_BB; else goto L11;
5102 L11:
5103 V3 = N31;
5104 V2 += STEP2;
5105 if (V2 cond2 N22) goto BODY_BB; else goto L12;
5106 L12:
5107 V2 = N21;
5108 V1 += STEP1;
5109 goto BODY_BB; */
5110
5111 static basic_block
5112 extract_omp_for_update_vars (struct omp_for_data *fd, basic_block cont_bb,
5113 basic_block body_bb)
5114 {
5115 basic_block last_bb, bb, collapse_bb = NULL;
5116 int i;
5117 gimple_stmt_iterator gsi;
5118 edge e;
5119 tree t;
5120 gimple stmt;
5121
5122 last_bb = cont_bb;
5123 for (i = fd->collapse - 1; i >= 0; i--)
5124 {
5125 tree vtype = TREE_TYPE (fd->loops[i].v);
5126
5127 bb = create_empty_bb (last_bb);
5128 if (current_loops)
5129 add_bb_to_loop (bb, last_bb->loop_father);
5130 gsi = gsi_start_bb (bb);
5131
5132 if (i < fd->collapse - 1)
5133 {
5134 e = make_edge (last_bb, bb, EDGE_FALSE_VALUE);
5135 e->probability = REG_BR_PROB_BASE / 8;
5136
5137 t = fd->loops[i + 1].n1;
5138 t = force_gimple_operand_gsi (&gsi, t,
5139 DECL_P (fd->loops[i + 1].v)
5140 && TREE_ADDRESSABLE (fd->loops[i
5141 + 1].v),
5142 NULL_TREE, false,
5143 GSI_CONTINUE_LINKING);
5144 stmt = gimple_build_assign (fd->loops[i + 1].v, t);
5145 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5146 }
5147 else
5148 collapse_bb = bb;
5149
5150 set_immediate_dominator (CDI_DOMINATORS, bb, last_bb);
5151
5152 if (POINTER_TYPE_P (vtype))
5153 t = fold_build_pointer_plus (fd->loops[i].v, fd->loops[i].step);
5154 else
5155 t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v, fd->loops[i].step);
5156 t = force_gimple_operand_gsi (&gsi, t,
5157 DECL_P (fd->loops[i].v)
5158 && TREE_ADDRESSABLE (fd->loops[i].v),
5159 NULL_TREE, false, GSI_CONTINUE_LINKING);
5160 stmt = gimple_build_assign (fd->loops[i].v, t);
5161 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5162
5163 if (i > 0)
5164 {
5165 t = fd->loops[i].n2;
5166 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5167 false, GSI_CONTINUE_LINKING);
5168 tree v = fd->loops[i].v;
5169 if (DECL_P (v) && TREE_ADDRESSABLE (v))
5170 v = force_gimple_operand_gsi (&gsi, v, true, NULL_TREE,
5171 false, GSI_CONTINUE_LINKING);
5172 t = fold_build2 (fd->loops[i].cond_code, boolean_type_node, v, t);
5173 stmt = gimple_build_cond_empty (t);
5174 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5175 e = make_edge (bb, body_bb, EDGE_TRUE_VALUE);
5176 e->probability = REG_BR_PROB_BASE * 7 / 8;
5177 }
5178 else
5179 make_edge (bb, body_bb, EDGE_FALLTHRU);
5180 last_bb = bb;
5181 }
5182
5183 return collapse_bb;
5184 }
5185
5186
5187 /* A subroutine of expand_omp_for. Generate code for a parallel
5188 loop with any schedule. Given parameters:
5189
5190 for (V = N1; V cond N2; V += STEP) BODY;
5191
5192 where COND is "<" or ">", we generate pseudocode
5193
5194 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
5195 if (more) goto L0; else goto L3;
5196 L0:
5197 V = istart0;
5198 iend = iend0;
5199 L1:
5200 BODY;
5201 V += STEP;
5202 if (V cond iend) goto L1; else goto L2;
5203 L2:
5204 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
5205 L3:
5206
5207 If this is a combined omp parallel loop, instead of the call to
5208 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
5209 If this is gimple_omp_for_combined_p loop, then instead of assigning
5210 V and iend in L0 we assign the first two _looptemp_ clause decls of the
5211 inner GIMPLE_OMP_FOR and V += STEP; and
5212 if (V cond iend) goto L1; else goto L2; are removed.
5213
5214 For collapsed loops, given parameters:
5215 collapse(3)
5216 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
5217 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
5218 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
5219 BODY;
5220
5221 we generate pseudocode
5222
5223 if (__builtin_expect (N32 cond3 N31, 0)) goto Z0;
5224 if (cond3 is <)
5225 adj = STEP3 - 1;
5226 else
5227 adj = STEP3 + 1;
5228 count3 = (adj + N32 - N31) / STEP3;
5229 if (__builtin_expect (N22 cond2 N21, 0)) goto Z0;
5230 if (cond2 is <)
5231 adj = STEP2 - 1;
5232 else
5233 adj = STEP2 + 1;
5234 count2 = (adj + N22 - N21) / STEP2;
5235 if (__builtin_expect (N12 cond1 N11, 0)) goto Z0;
5236 if (cond1 is <)
5237 adj = STEP1 - 1;
5238 else
5239 adj = STEP1 + 1;
5240 count1 = (adj + N12 - N11) / STEP1;
5241 count = count1 * count2 * count3;
5242 goto Z1;
5243 Z0:
5244 count = 0;
5245 Z1:
5246 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
5247 if (more) goto L0; else goto L3;
5248 L0:
5249 V = istart0;
5250 T = V;
5251 V3 = N31 + (T % count3) * STEP3;
5252 T = T / count3;
5253 V2 = N21 + (T % count2) * STEP2;
5254 T = T / count2;
5255 V1 = N11 + T * STEP1;
5256 iend = iend0;
5257 L1:
5258 BODY;
5259 V += 1;
5260 if (V < iend) goto L10; else goto L2;
5261 L10:
5262 V3 += STEP3;
5263 if (V3 cond3 N32) goto L1; else goto L11;
5264 L11:
5265 V3 = N31;
5266 V2 += STEP2;
5267 if (V2 cond2 N22) goto L1; else goto L12;
5268 L12:
5269 V2 = N21;
5270 V1 += STEP1;
5271 goto L1;
5272 L2:
5273 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
5274 L3:
5275
5276 */
5277
5278 static void
5279 expand_omp_for_generic (struct omp_region *region,
5280 struct omp_for_data *fd,
5281 enum built_in_function start_fn,
5282 enum built_in_function next_fn,
5283 gimple inner_stmt)
5284 {
5285 tree type, istart0, iend0, iend;
5286 tree t, vmain, vback, bias = NULL_TREE;
5287 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb;
5288 basic_block l2_bb = NULL, l3_bb = NULL;
5289 gimple_stmt_iterator gsi;
5290 gimple stmt;
5291 bool in_combined_parallel = is_combined_parallel (region);
5292 bool broken_loop = region->cont == NULL;
5293 edge e, ne;
5294 tree *counts = NULL;
5295 int i;
5296
5297 gcc_assert (!broken_loop || !in_combined_parallel);
5298 gcc_assert (fd->iter_type == long_integer_type_node
5299 || !in_combined_parallel);
5300
5301 type = TREE_TYPE (fd->loop.v);
5302 istart0 = create_tmp_var (fd->iter_type, ".istart0");
5303 iend0 = create_tmp_var (fd->iter_type, ".iend0");
5304 TREE_ADDRESSABLE (istart0) = 1;
5305 TREE_ADDRESSABLE (iend0) = 1;
5306
5307 /* See if we need to bias by LLONG_MIN. */
5308 if (fd->iter_type == long_long_unsigned_type_node
5309 && TREE_CODE (type) == INTEGER_TYPE
5310 && !TYPE_UNSIGNED (type))
5311 {
5312 tree n1, n2;
5313
5314 if (fd->loop.cond_code == LT_EXPR)
5315 {
5316 n1 = fd->loop.n1;
5317 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
5318 }
5319 else
5320 {
5321 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
5322 n2 = fd->loop.n1;
5323 }
5324 if (TREE_CODE (n1) != INTEGER_CST
5325 || TREE_CODE (n2) != INTEGER_CST
5326 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
5327 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
5328 }
5329
5330 entry_bb = region->entry;
5331 cont_bb = region->cont;
5332 collapse_bb = NULL;
5333 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
5334 gcc_assert (broken_loop
5335 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
5336 l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
5337 l1_bb = single_succ (l0_bb);
5338 if (!broken_loop)
5339 {
5340 l2_bb = create_empty_bb (cont_bb);
5341 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb);
5342 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
5343 }
5344 else
5345 l2_bb = NULL;
5346 l3_bb = BRANCH_EDGE (entry_bb)->dest;
5347 exit_bb = region->exit;
5348
5349 gsi = gsi_last_bb (entry_bb);
5350
5351 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
5352 if (fd->collapse > 1)
5353 {
5354 int first_zero_iter = -1;
5355 basic_block zero_iter_bb = NULL, l2_dom_bb = NULL;
5356
5357 counts = XALLOCAVEC (tree, fd->collapse);
5358 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
5359 zero_iter_bb, first_zero_iter,
5360 l2_dom_bb);
5361
5362 if (zero_iter_bb)
5363 {
5364 /* Some counts[i] vars might be uninitialized if
5365 some loop has zero iterations. But the body shouldn't
5366 be executed in that case, so just avoid uninit warnings. */
5367 for (i = first_zero_iter; i < fd->collapse; i++)
5368 if (SSA_VAR_P (counts[i]))
5369 TREE_NO_WARNING (counts[i]) = 1;
5370 gsi_prev (&gsi);
5371 e = split_block (entry_bb, gsi_stmt (gsi));
5372 entry_bb = e->dest;
5373 make_edge (zero_iter_bb, entry_bb, EDGE_FALLTHRU);
5374 gsi = gsi_last_bb (entry_bb);
5375 set_immediate_dominator (CDI_DOMINATORS, entry_bb,
5376 get_immediate_dominator (CDI_DOMINATORS,
5377 zero_iter_bb));
5378 }
5379 }
5380 if (in_combined_parallel)
5381 {
5382 /* In a combined parallel loop, emit a call to
5383 GOMP_loop_foo_next. */
5384 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
5385 build_fold_addr_expr (istart0),
5386 build_fold_addr_expr (iend0));
5387 }
5388 else
5389 {
5390 tree t0, t1, t2, t3, t4;
5391 /* If this is not a combined parallel loop, emit a call to
5392 GOMP_loop_foo_start in ENTRY_BB. */
5393 t4 = build_fold_addr_expr (iend0);
5394 t3 = build_fold_addr_expr (istart0);
5395 t2 = fold_convert (fd->iter_type, fd->loop.step);
5396 t1 = fd->loop.n2;
5397 t0 = fd->loop.n1;
5398 if (gimple_omp_for_combined_into_p (fd->for_stmt))
5399 {
5400 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
5401 OMP_CLAUSE__LOOPTEMP_);
5402 gcc_assert (innerc);
5403 t0 = OMP_CLAUSE_DECL (innerc);
5404 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
5405 OMP_CLAUSE__LOOPTEMP_);
5406 gcc_assert (innerc);
5407 t1 = OMP_CLAUSE_DECL (innerc);
5408 }
5409 if (POINTER_TYPE_P (TREE_TYPE (t0))
5410 && TYPE_PRECISION (TREE_TYPE (t0))
5411 != TYPE_PRECISION (fd->iter_type))
5412 {
5413 /* Avoid casting pointers to integer of a different size. */
5414 tree itype = signed_type_for (type);
5415 t1 = fold_convert (fd->iter_type, fold_convert (itype, t1));
5416 t0 = fold_convert (fd->iter_type, fold_convert (itype, t0));
5417 }
5418 else
5419 {
5420 t1 = fold_convert (fd->iter_type, t1);
5421 t0 = fold_convert (fd->iter_type, t0);
5422 }
5423 if (bias)
5424 {
5425 t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
5426 t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
5427 }
5428 if (fd->iter_type == long_integer_type_node)
5429 {
5430 if (fd->chunk_size)
5431 {
5432 t = fold_convert (fd->iter_type, fd->chunk_size);
5433 t = build_call_expr (builtin_decl_explicit (start_fn),
5434 6, t0, t1, t2, t, t3, t4);
5435 }
5436 else
5437 t = build_call_expr (builtin_decl_explicit (start_fn),
5438 5, t0, t1, t2, t3, t4);
5439 }
5440 else
5441 {
5442 tree t5;
5443 tree c_bool_type;
5444 tree bfn_decl;
5445
5446 /* The GOMP_loop_ull_*start functions have additional boolean
5447 argument, true for < loops and false for > loops.
5448 In Fortran, the C bool type can be different from
5449 boolean_type_node. */
5450 bfn_decl = builtin_decl_explicit (start_fn);
5451 c_bool_type = TREE_TYPE (TREE_TYPE (bfn_decl));
5452 t5 = build_int_cst (c_bool_type,
5453 fd->loop.cond_code == LT_EXPR ? 1 : 0);
5454 if (fd->chunk_size)
5455 {
5456 tree bfn_decl = builtin_decl_explicit (start_fn);
5457 t = fold_convert (fd->iter_type, fd->chunk_size);
5458 t = build_call_expr (bfn_decl, 7, t5, t0, t1, t2, t, t3, t4);
5459 }
5460 else
5461 t = build_call_expr (builtin_decl_explicit (start_fn),
5462 6, t5, t0, t1, t2, t3, t4);
5463 }
5464 }
5465 if (TREE_TYPE (t) != boolean_type_node)
5466 t = fold_build2 (NE_EXPR, boolean_type_node,
5467 t, build_int_cst (TREE_TYPE (t), 0));
5468 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5469 true, GSI_SAME_STMT);
5470 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
5471
5472 /* Remove the GIMPLE_OMP_FOR statement. */
5473 gsi_remove (&gsi, true);
5474
5475 /* Iteration setup for sequential loop goes in L0_BB. */
5476 tree startvar = fd->loop.v;
5477 tree endvar = NULL_TREE;
5478
5479 if (gimple_omp_for_combined_p (fd->for_stmt))
5480 {
5481 gcc_assert (gimple_code (inner_stmt) == GIMPLE_OMP_FOR
5482 && gimple_omp_for_kind (inner_stmt)
5483 == GF_OMP_FOR_KIND_SIMD);
5484 tree innerc = find_omp_clause (gimple_omp_for_clauses (inner_stmt),
5485 OMP_CLAUSE__LOOPTEMP_);
5486 gcc_assert (innerc);
5487 startvar = OMP_CLAUSE_DECL (innerc);
5488 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
5489 OMP_CLAUSE__LOOPTEMP_);
5490 gcc_assert (innerc);
5491 endvar = OMP_CLAUSE_DECL (innerc);
5492 }
5493
5494 gsi = gsi_start_bb (l0_bb);
5495 t = istart0;
5496 if (bias)
5497 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
5498 if (POINTER_TYPE_P (TREE_TYPE (startvar)))
5499 t = fold_convert (signed_type_for (TREE_TYPE (startvar)), t);
5500 t = fold_convert (TREE_TYPE (startvar), t);
5501 t = force_gimple_operand_gsi (&gsi, t,
5502 DECL_P (startvar)
5503 && TREE_ADDRESSABLE (startvar),
5504 NULL_TREE, false, GSI_CONTINUE_LINKING);
5505 stmt = gimple_build_assign (startvar, t);
5506 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5507
5508 t = iend0;
5509 if (bias)
5510 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
5511 if (POINTER_TYPE_P (TREE_TYPE (startvar)))
5512 t = fold_convert (signed_type_for (TREE_TYPE (startvar)), t);
5513 t = fold_convert (TREE_TYPE (startvar), t);
5514 iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5515 false, GSI_CONTINUE_LINKING);
5516 if (endvar)
5517 {
5518 stmt = gimple_build_assign (endvar, iend);
5519 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5520 }
5521 if (fd->collapse > 1)
5522 expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar);
5523
5524 if (!broken_loop)
5525 {
5526 /* Code to control the increment and predicate for the sequential
5527 loop goes in the CONT_BB. */
5528 gsi = gsi_last_bb (cont_bb);
5529 stmt = gsi_stmt (gsi);
5530 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
5531 vmain = gimple_omp_continue_control_use (stmt);
5532 vback = gimple_omp_continue_control_def (stmt);
5533
5534 if (!gimple_omp_for_combined_p (fd->for_stmt))
5535 {
5536 if (POINTER_TYPE_P (type))
5537 t = fold_build_pointer_plus (vmain, fd->loop.step);
5538 else
5539 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
5540 t = force_gimple_operand_gsi (&gsi, t,
5541 DECL_P (vback)
5542 && TREE_ADDRESSABLE (vback),
5543 NULL_TREE, true, GSI_SAME_STMT);
5544 stmt = gimple_build_assign (vback, t);
5545 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
5546
5547 t = build2 (fd->loop.cond_code, boolean_type_node,
5548 DECL_P (vback) && TREE_ADDRESSABLE (vback) ? t : vback,
5549 iend);
5550 stmt = gimple_build_cond_empty (t);
5551 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
5552 }
5553
5554 /* Remove GIMPLE_OMP_CONTINUE. */
5555 gsi_remove (&gsi, true);
5556
5557 if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt))
5558 collapse_bb = extract_omp_for_update_vars (fd, cont_bb, l1_bb);
5559
5560 /* Emit code to get the next parallel iteration in L2_BB. */
5561 gsi = gsi_start_bb (l2_bb);
5562
5563 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
5564 build_fold_addr_expr (istart0),
5565 build_fold_addr_expr (iend0));
5566 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5567 false, GSI_CONTINUE_LINKING);
5568 if (TREE_TYPE (t) != boolean_type_node)
5569 t = fold_build2 (NE_EXPR, boolean_type_node,
5570 t, build_int_cst (TREE_TYPE (t), 0));
5571 stmt = gimple_build_cond_empty (t);
5572 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5573 }
5574
5575 /* Add the loop cleanup function. */
5576 gsi = gsi_last_bb (exit_bb);
5577 if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
5578 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_NOWAIT);
5579 else if (gimple_omp_return_lhs (gsi_stmt (gsi)))
5580 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_CANCEL);
5581 else
5582 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END);
5583 stmt = gimple_build_call (t, 0);
5584 if (gimple_omp_return_lhs (gsi_stmt (gsi)))
5585 gimple_call_set_lhs (stmt, gimple_omp_return_lhs (gsi_stmt (gsi)));
5586 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
5587 gsi_remove (&gsi, true);
5588
5589 /* Connect the new blocks. */
5590 find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
5591 find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
5592
5593 if (!broken_loop)
5594 {
5595 gimple_seq phis;
5596
5597 e = find_edge (cont_bb, l3_bb);
5598 ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
5599
5600 phis = phi_nodes (l3_bb);
5601 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
5602 {
5603 gimple phi = gsi_stmt (gsi);
5604 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
5605 PHI_ARG_DEF_FROM_EDGE (phi, e));
5606 }
5607 remove_edge (e);
5608
5609 make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
5610 if (current_loops)
5611 add_bb_to_loop (l2_bb, cont_bb->loop_father);
5612 e = find_edge (cont_bb, l1_bb);
5613 if (gimple_omp_for_combined_p (fd->for_stmt))
5614 {
5615 remove_edge (e);
5616 e = NULL;
5617 }
5618 else if (fd->collapse > 1)
5619 {
5620 remove_edge (e);
5621 e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
5622 }
5623 else
5624 e->flags = EDGE_TRUE_VALUE;
5625 if (e)
5626 {
5627 e->probability = REG_BR_PROB_BASE * 7 / 8;
5628 find_edge (cont_bb, l2_bb)->probability = REG_BR_PROB_BASE / 8;
5629 }
5630 else
5631 {
5632 e = find_edge (cont_bb, l2_bb);
5633 e->flags = EDGE_FALLTHRU;
5634 }
5635 make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
5636
5637 set_immediate_dominator (CDI_DOMINATORS, l2_bb,
5638 recompute_dominator (CDI_DOMINATORS, l2_bb));
5639 set_immediate_dominator (CDI_DOMINATORS, l3_bb,
5640 recompute_dominator (CDI_DOMINATORS, l3_bb));
5641 set_immediate_dominator (CDI_DOMINATORS, l0_bb,
5642 recompute_dominator (CDI_DOMINATORS, l0_bb));
5643 set_immediate_dominator (CDI_DOMINATORS, l1_bb,
5644 recompute_dominator (CDI_DOMINATORS, l1_bb));
5645
5646 struct loop *outer_loop = alloc_loop ();
5647 outer_loop->header = l0_bb;
5648 outer_loop->latch = l2_bb;
5649 add_loop (outer_loop, l0_bb->loop_father);
5650
5651 if (!gimple_omp_for_combined_p (fd->for_stmt))
5652 {
5653 struct loop *loop = alloc_loop ();
5654 loop->header = l1_bb;
5655 /* The loop may have multiple latches. */
5656 add_loop (loop, outer_loop);
5657 }
5658 }
5659 }
5660
5661
5662 /* A subroutine of expand_omp_for. Generate code for a parallel
5663 loop with static schedule and no specified chunk size. Given
5664 parameters:
5665
5666 for (V = N1; V cond N2; V += STEP) BODY;
5667
5668 where COND is "<" or ">", we generate pseudocode
5669
5670 if ((__typeof (V)) -1 > 0 && N2 cond N1) goto L2;
5671 if (cond is <)
5672 adj = STEP - 1;
5673 else
5674 adj = STEP + 1;
5675 if ((__typeof (V)) -1 > 0 && cond is >)
5676 n = -(adj + N2 - N1) / -STEP;
5677 else
5678 n = (adj + N2 - N1) / STEP;
5679 q = n / nthreads;
5680 tt = n % nthreads;
5681 if (threadid < tt) goto L3; else goto L4;
5682 L3:
5683 tt = 0;
5684 q = q + 1;
5685 L4:
5686 s0 = q * threadid + tt;
5687 e0 = s0 + q;
5688 V = s0 * STEP + N1;
5689 if (s0 >= e0) goto L2; else goto L0;
5690 L0:
5691 e = e0 * STEP + N1;
5692 L1:
5693 BODY;
5694 V += STEP;
5695 if (V cond e) goto L1;
5696 L2:
5697 */
5698
5699 static void
5700 expand_omp_for_static_nochunk (struct omp_region *region,
5701 struct omp_for_data *fd,
5702 gimple inner_stmt)
5703 {
5704 tree n, q, s0, e0, e, t, tt, nthreads, threadid;
5705 tree type, itype, vmain, vback;
5706 basic_block entry_bb, second_bb, third_bb, exit_bb, seq_start_bb;
5707 basic_block body_bb, cont_bb, collapse_bb = NULL;
5708 basic_block fin_bb;
5709 gimple_stmt_iterator gsi;
5710 gimple stmt;
5711 edge ep;
5712 enum built_in_function get_num_threads = BUILT_IN_OMP_GET_NUM_THREADS;
5713 enum built_in_function get_thread_num = BUILT_IN_OMP_GET_THREAD_NUM;
5714 bool broken_loop = region->cont == NULL;
5715 tree *counts = NULL;
5716 tree n1, n2, step;
5717
5718 itype = type = TREE_TYPE (fd->loop.v);
5719 if (POINTER_TYPE_P (type))
5720 itype = signed_type_for (type);
5721
5722 entry_bb = region->entry;
5723 cont_bb = region->cont;
5724 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
5725 fin_bb = BRANCH_EDGE (entry_bb)->dest;
5726 gcc_assert (broken_loop
5727 || (fin_bb == FALLTHRU_EDGE (cont_bb)->dest));
5728 seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
5729 body_bb = single_succ (seq_start_bb);
5730 if (!broken_loop)
5731 {
5732 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
5733 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
5734 }
5735 exit_bb = region->exit;
5736
5737 /* Iteration space partitioning goes in ENTRY_BB. */
5738 gsi = gsi_last_bb (entry_bb);
5739 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
5740
5741 if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_DISTRIBUTE)
5742 {
5743 get_num_threads = BUILT_IN_OMP_GET_NUM_TEAMS;
5744 get_thread_num = BUILT_IN_OMP_GET_TEAM_NUM;
5745 }
5746
5747 if (fd->collapse > 1)
5748 {
5749 int first_zero_iter = -1;
5750 basic_block l2_dom_bb = NULL;
5751
5752 counts = XALLOCAVEC (tree, fd->collapse);
5753 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
5754 fin_bb, first_zero_iter,
5755 l2_dom_bb);
5756 t = NULL_TREE;
5757 }
5758 else if (gimple_omp_for_combined_into_p (fd->for_stmt))
5759 t = integer_one_node;
5760 else
5761 t = fold_binary (fd->loop.cond_code, boolean_type_node,
5762 fold_convert (type, fd->loop.n1),
5763 fold_convert (type, fd->loop.n2));
5764 if (fd->collapse == 1
5765 && TYPE_UNSIGNED (type)
5766 && (t == NULL_TREE || !integer_onep (t)))
5767 {
5768 n1 = fold_convert (type, unshare_expr (fd->loop.n1));
5769 n1 = force_gimple_operand_gsi (&gsi, n1, true, NULL_TREE,
5770 true, GSI_SAME_STMT);
5771 n2 = fold_convert (type, unshare_expr (fd->loop.n2));
5772 n2 = force_gimple_operand_gsi (&gsi, n2, true, NULL_TREE,
5773 true, GSI_SAME_STMT);
5774 stmt = gimple_build_cond (fd->loop.cond_code, n1, n2,
5775 NULL_TREE, NULL_TREE);
5776 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
5777 if (walk_tree (gimple_cond_lhs_ptr (stmt),
5778 expand_omp_regimplify_p, NULL, NULL)
5779 || walk_tree (gimple_cond_rhs_ptr (stmt),
5780 expand_omp_regimplify_p, NULL, NULL))
5781 {
5782 gsi = gsi_for_stmt (stmt);
5783 gimple_regimplify_operands (stmt, &gsi);
5784 }
5785 ep = split_block (entry_bb, stmt);
5786 ep->flags = EDGE_TRUE_VALUE;
5787 entry_bb = ep->dest;
5788 ep->probability = REG_BR_PROB_BASE - (REG_BR_PROB_BASE / 2000 - 1);
5789 ep = make_edge (ep->src, fin_bb, EDGE_FALSE_VALUE);
5790 ep->probability = REG_BR_PROB_BASE / 2000 - 1;
5791 if (gimple_in_ssa_p (cfun))
5792 {
5793 int dest_idx = find_edge (entry_bb, fin_bb)->dest_idx;
5794 for (gsi = gsi_start_phis (fin_bb);
5795 !gsi_end_p (gsi); gsi_next (&gsi))
5796 {
5797 gimple phi = gsi_stmt (gsi);
5798 add_phi_arg (phi, gimple_phi_arg_def (phi, dest_idx),
5799 ep, UNKNOWN_LOCATION);
5800 }
5801 }
5802 gsi = gsi_last_bb (entry_bb);
5803 }
5804
5805 t = build_call_expr (builtin_decl_explicit (get_num_threads), 0);
5806 t = fold_convert (itype, t);
5807 nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5808 true, GSI_SAME_STMT);
5809
5810 t = build_call_expr (builtin_decl_explicit (get_thread_num), 0);
5811 t = fold_convert (itype, t);
5812 threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5813 true, GSI_SAME_STMT);
5814
5815 n1 = fd->loop.n1;
5816 n2 = fd->loop.n2;
5817 step = fd->loop.step;
5818 if (gimple_omp_for_combined_into_p (fd->for_stmt))
5819 {
5820 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
5821 OMP_CLAUSE__LOOPTEMP_);
5822 gcc_assert (innerc);
5823 n1 = OMP_CLAUSE_DECL (innerc);
5824 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
5825 OMP_CLAUSE__LOOPTEMP_);
5826 gcc_assert (innerc);
5827 n2 = OMP_CLAUSE_DECL (innerc);
5828 }
5829 n1 = force_gimple_operand_gsi (&gsi, fold_convert (type, n1),
5830 true, NULL_TREE, true, GSI_SAME_STMT);
5831 n2 = force_gimple_operand_gsi (&gsi, fold_convert (itype, n2),
5832 true, NULL_TREE, true, GSI_SAME_STMT);
5833 step = force_gimple_operand_gsi (&gsi, fold_convert (itype, step),
5834 true, NULL_TREE, true, GSI_SAME_STMT);
5835
5836 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
5837 t = fold_build2 (PLUS_EXPR, itype, step, t);
5838 t = fold_build2 (PLUS_EXPR, itype, t, n2);
5839 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, n1));
5840 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
5841 t = fold_build2 (TRUNC_DIV_EXPR, itype,
5842 fold_build1 (NEGATE_EXPR, itype, t),
5843 fold_build1 (NEGATE_EXPR, itype, step));
5844 else
5845 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, step);
5846 t = fold_convert (itype, t);
5847 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
5848
5849 q = create_tmp_reg (itype, "q");
5850 t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads);
5851 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
5852 gsi_insert_before (&gsi, gimple_build_assign (q, t), GSI_SAME_STMT);
5853
5854 tt = create_tmp_reg (itype, "tt");
5855 t = fold_build2 (TRUNC_MOD_EXPR, itype, n, nthreads);
5856 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
5857 gsi_insert_before (&gsi, gimple_build_assign (tt, t), GSI_SAME_STMT);
5858
5859 t = build2 (LT_EXPR, boolean_type_node, threadid, tt);
5860 stmt = gimple_build_cond_empty (t);
5861 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
5862
5863 second_bb = split_block (entry_bb, stmt)->dest;
5864 gsi = gsi_last_bb (second_bb);
5865 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
5866
5867 gsi_insert_before (&gsi, gimple_build_assign (tt, build_int_cst (itype, 0)),
5868 GSI_SAME_STMT);
5869 stmt = gimple_build_assign_with_ops (PLUS_EXPR, q, q,
5870 build_int_cst (itype, 1));
5871 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
5872
5873 third_bb = split_block (second_bb, stmt)->dest;
5874 gsi = gsi_last_bb (third_bb);
5875 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
5876
5877 t = build2 (MULT_EXPR, itype, q, threadid);
5878 t = build2 (PLUS_EXPR, itype, t, tt);
5879 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
5880
5881 t = fold_build2 (PLUS_EXPR, itype, s0, q);
5882 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
5883
5884 t = build2 (GE_EXPR, boolean_type_node, s0, e0);
5885 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
5886
5887 /* Remove the GIMPLE_OMP_FOR statement. */
5888 gsi_remove (&gsi, true);
5889
5890 /* Setup code for sequential iteration goes in SEQ_START_BB. */
5891 gsi = gsi_start_bb (seq_start_bb);
5892
5893 tree startvar = fd->loop.v;
5894 tree endvar = NULL_TREE;
5895
5896 if (gimple_omp_for_combined_p (fd->for_stmt))
5897 {
5898 tree clauses = gimple_code (inner_stmt) == GIMPLE_OMP_PARALLEL
5899 ? gimple_omp_parallel_clauses (inner_stmt)
5900 : gimple_omp_for_clauses (inner_stmt);
5901 tree innerc = find_omp_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
5902 gcc_assert (innerc);
5903 startvar = OMP_CLAUSE_DECL (innerc);
5904 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
5905 OMP_CLAUSE__LOOPTEMP_);
5906 gcc_assert (innerc);
5907 endvar = OMP_CLAUSE_DECL (innerc);
5908 }
5909 t = fold_convert (itype, s0);
5910 t = fold_build2 (MULT_EXPR, itype, t, step);
5911 if (POINTER_TYPE_P (type))
5912 t = fold_build_pointer_plus (n1, t);
5913 else
5914 t = fold_build2 (PLUS_EXPR, type, t, n1);
5915 t = fold_convert (TREE_TYPE (startvar), t);
5916 t = force_gimple_operand_gsi (&gsi, t,
5917 DECL_P (startvar)
5918 && TREE_ADDRESSABLE (startvar),
5919 NULL_TREE, false, GSI_CONTINUE_LINKING);
5920 stmt = gimple_build_assign (startvar, t);
5921 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5922
5923 t = fold_convert (itype, e0);
5924 t = fold_build2 (MULT_EXPR, itype, t, step);
5925 if (POINTER_TYPE_P (type))
5926 t = fold_build_pointer_plus (n1, t);
5927 else
5928 t = fold_build2 (PLUS_EXPR, type, t, n1);
5929 t = fold_convert (TREE_TYPE (startvar), t);
5930 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5931 false, GSI_CONTINUE_LINKING);
5932 if (endvar)
5933 {
5934 stmt = gimple_build_assign (endvar, e);
5935 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5936 }
5937 if (fd->collapse > 1)
5938 expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar);
5939
5940 if (!broken_loop)
5941 {
5942 /* The code controlling the sequential loop replaces the
5943 GIMPLE_OMP_CONTINUE. */
5944 gsi = gsi_last_bb (cont_bb);
5945 stmt = gsi_stmt (gsi);
5946 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
5947 vmain = gimple_omp_continue_control_use (stmt);
5948 vback = gimple_omp_continue_control_def (stmt);
5949
5950 if (!gimple_omp_for_combined_p (fd->for_stmt))
5951 {
5952 if (POINTER_TYPE_P (type))
5953 t = fold_build_pointer_plus (vmain, step);
5954 else
5955 t = fold_build2 (PLUS_EXPR, type, vmain, step);
5956 t = force_gimple_operand_gsi (&gsi, t,
5957 DECL_P (vback)
5958 && TREE_ADDRESSABLE (vback),
5959 NULL_TREE, true, GSI_SAME_STMT);
5960 stmt = gimple_build_assign (vback, t);
5961 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
5962
5963 t = build2 (fd->loop.cond_code, boolean_type_node,
5964 DECL_P (vback) && TREE_ADDRESSABLE (vback)
5965 ? t : vback, e);
5966 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
5967 }
5968
5969 /* Remove the GIMPLE_OMP_CONTINUE statement. */
5970 gsi_remove (&gsi, true);
5971
5972 if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt))
5973 collapse_bb = extract_omp_for_update_vars (fd, cont_bb, body_bb);
5974 }
5975
5976 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
5977 gsi = gsi_last_bb (exit_bb);
5978 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
5979 {
5980 t = gimple_omp_return_lhs (gsi_stmt (gsi));
5981 gsi_insert_after (&gsi, build_omp_barrier (t), GSI_SAME_STMT);
5982 }
5983 gsi_remove (&gsi, true);
5984
5985 /* Connect all the blocks. */
5986 ep = make_edge (entry_bb, third_bb, EDGE_FALSE_VALUE);
5987 ep->probability = REG_BR_PROB_BASE / 4 * 3;
5988 ep = find_edge (entry_bb, second_bb);
5989 ep->flags = EDGE_TRUE_VALUE;
5990 ep->probability = REG_BR_PROB_BASE / 4;
5991 find_edge (third_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
5992 find_edge (third_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
5993
5994 if (!broken_loop)
5995 {
5996 ep = find_edge (cont_bb, body_bb);
5997 if (gimple_omp_for_combined_p (fd->for_stmt))
5998 {
5999 remove_edge (ep);
6000 ep = NULL;
6001 }
6002 else if (fd->collapse > 1)
6003 {
6004 remove_edge (ep);
6005 ep = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
6006 }
6007 else
6008 ep->flags = EDGE_TRUE_VALUE;
6009 find_edge (cont_bb, fin_bb)->flags
6010 = ep ? EDGE_FALSE_VALUE : EDGE_FALLTHRU;
6011 }
6012
6013 set_immediate_dominator (CDI_DOMINATORS, second_bb, entry_bb);
6014 set_immediate_dominator (CDI_DOMINATORS, third_bb, entry_bb);
6015 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, third_bb);
6016
6017 set_immediate_dominator (CDI_DOMINATORS, body_bb,
6018 recompute_dominator (CDI_DOMINATORS, body_bb));
6019 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
6020 recompute_dominator (CDI_DOMINATORS, fin_bb));
6021
6022 if (!broken_loop && !gimple_omp_for_combined_p (fd->for_stmt))
6023 {
6024 struct loop *loop = alloc_loop ();
6025 loop->header = body_bb;
6026 if (collapse_bb == NULL)
6027 loop->latch = cont_bb;
6028 add_loop (loop, body_bb->loop_father);
6029 }
6030 }
6031
6032
6033 /* A subroutine of expand_omp_for. Generate code for a parallel
6034 loop with static schedule and a specified chunk size. Given
6035 parameters:
6036
6037 for (V = N1; V cond N2; V += STEP) BODY;
6038
6039 where COND is "<" or ">", we generate pseudocode
6040
6041 if ((__typeof (V)) -1 > 0 && N2 cond N1) goto L2;
6042 if (cond is <)
6043 adj = STEP - 1;
6044 else
6045 adj = STEP + 1;
6046 if ((__typeof (V)) -1 > 0 && cond is >)
6047 n = -(adj + N2 - N1) / -STEP;
6048 else
6049 n = (adj + N2 - N1) / STEP;
6050 trip = 0;
6051 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
6052 here so that V is defined
6053 if the loop is not entered
6054 L0:
6055 s0 = (trip * nthreads + threadid) * CHUNK;
6056 e0 = min(s0 + CHUNK, n);
6057 if (s0 < n) goto L1; else goto L4;
6058 L1:
6059 V = s0 * STEP + N1;
6060 e = e0 * STEP + N1;
6061 L2:
6062 BODY;
6063 V += STEP;
6064 if (V cond e) goto L2; else goto L3;
6065 L3:
6066 trip += 1;
6067 goto L0;
6068 L4:
6069 */
6070
6071 static void
6072 expand_omp_for_static_chunk (struct omp_region *region,
6073 struct omp_for_data *fd, gimple inner_stmt)
6074 {
6075 tree n, s0, e0, e, t;
6076 tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
6077 tree type, itype, v_main, v_back, v_extra;
6078 basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
6079 basic_block trip_update_bb = NULL, cont_bb, collapse_bb = NULL, fin_bb;
6080 gimple_stmt_iterator si;
6081 gimple stmt;
6082 edge se;
6083 enum built_in_function get_num_threads = BUILT_IN_OMP_GET_NUM_THREADS;
6084 enum built_in_function get_thread_num = BUILT_IN_OMP_GET_THREAD_NUM;
6085 bool broken_loop = region->cont == NULL;
6086 tree *counts = NULL;
6087 tree n1, n2, step;
6088
6089 itype = type = TREE_TYPE (fd->loop.v);
6090 if (POINTER_TYPE_P (type))
6091 itype = signed_type_for (type);
6092
6093 entry_bb = region->entry;
6094 se = split_block (entry_bb, last_stmt (entry_bb));
6095 entry_bb = se->src;
6096 iter_part_bb = se->dest;
6097 cont_bb = region->cont;
6098 gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
6099 fin_bb = BRANCH_EDGE (iter_part_bb)->dest;
6100 gcc_assert (broken_loop
6101 || fin_bb == FALLTHRU_EDGE (cont_bb)->dest);
6102 seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
6103 body_bb = single_succ (seq_start_bb);
6104 if (!broken_loop)
6105 {
6106 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
6107 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
6108 trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
6109 }
6110 exit_bb = region->exit;
6111
6112 /* Trip and adjustment setup goes in ENTRY_BB. */
6113 si = gsi_last_bb (entry_bb);
6114 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_FOR);
6115
6116 if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_DISTRIBUTE)
6117 {
6118 get_num_threads = BUILT_IN_OMP_GET_NUM_TEAMS;
6119 get_thread_num = BUILT_IN_OMP_GET_TEAM_NUM;
6120 }
6121
6122 if (fd->collapse > 1)
6123 {
6124 int first_zero_iter = -1;
6125 basic_block l2_dom_bb = NULL;
6126
6127 counts = XALLOCAVEC (tree, fd->collapse);
6128 expand_omp_for_init_counts (fd, &si, entry_bb, counts,
6129 fin_bb, first_zero_iter,
6130 l2_dom_bb);
6131 t = NULL_TREE;
6132 }
6133 else if (gimple_omp_for_combined_into_p (fd->for_stmt))
6134 t = integer_one_node;
6135 else
6136 t = fold_binary (fd->loop.cond_code, boolean_type_node,
6137 fold_convert (type, fd->loop.n1),
6138 fold_convert (type, fd->loop.n2));
6139 if (fd->collapse == 1
6140 && TYPE_UNSIGNED (type)
6141 && (t == NULL_TREE || !integer_onep (t)))
6142 {
6143 n1 = fold_convert (type, unshare_expr (fd->loop.n1));
6144 n1 = force_gimple_operand_gsi (&si, n1, true, NULL_TREE,
6145 true, GSI_SAME_STMT);
6146 n2 = fold_convert (type, unshare_expr (fd->loop.n2));
6147 n2 = force_gimple_operand_gsi (&si, n2, true, NULL_TREE,
6148 true, GSI_SAME_STMT);
6149 stmt = gimple_build_cond (fd->loop.cond_code, n1, n2,
6150 NULL_TREE, NULL_TREE);
6151 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
6152 if (walk_tree (gimple_cond_lhs_ptr (stmt),
6153 expand_omp_regimplify_p, NULL, NULL)
6154 || walk_tree (gimple_cond_rhs_ptr (stmt),
6155 expand_omp_regimplify_p, NULL, NULL))
6156 {
6157 si = gsi_for_stmt (stmt);
6158 gimple_regimplify_operands (stmt, &si);
6159 }
6160 se = split_block (entry_bb, stmt);
6161 se->flags = EDGE_TRUE_VALUE;
6162 entry_bb = se->dest;
6163 se->probability = REG_BR_PROB_BASE - (REG_BR_PROB_BASE / 2000 - 1);
6164 se = make_edge (se->src, fin_bb, EDGE_FALSE_VALUE);
6165 se->probability = REG_BR_PROB_BASE / 2000 - 1;
6166 if (gimple_in_ssa_p (cfun))
6167 {
6168 int dest_idx = find_edge (entry_bb, fin_bb)->dest_idx;
6169 for (si = gsi_start_phis (fin_bb);
6170 !gsi_end_p (si); gsi_next (&si))
6171 {
6172 gimple phi = gsi_stmt (si);
6173 add_phi_arg (phi, gimple_phi_arg_def (phi, dest_idx),
6174 se, UNKNOWN_LOCATION);
6175 }
6176 }
6177 si = gsi_last_bb (entry_bb);
6178 }
6179
6180 t = build_call_expr (builtin_decl_explicit (get_num_threads), 0);
6181 t = fold_convert (itype, t);
6182 nthreads = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
6183 true, GSI_SAME_STMT);
6184
6185 t = build_call_expr (builtin_decl_explicit (get_thread_num), 0);
6186 t = fold_convert (itype, t);
6187 threadid = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
6188 true, GSI_SAME_STMT);
6189
6190 n1 = fd->loop.n1;
6191 n2 = fd->loop.n2;
6192 step = fd->loop.step;
6193 if (gimple_omp_for_combined_into_p (fd->for_stmt))
6194 {
6195 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
6196 OMP_CLAUSE__LOOPTEMP_);
6197 gcc_assert (innerc);
6198 n1 = OMP_CLAUSE_DECL (innerc);
6199 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
6200 OMP_CLAUSE__LOOPTEMP_);
6201 gcc_assert (innerc);
6202 n2 = OMP_CLAUSE_DECL (innerc);
6203 }
6204 n1 = force_gimple_operand_gsi (&si, fold_convert (type, n1),
6205 true, NULL_TREE, true, GSI_SAME_STMT);
6206 n2 = force_gimple_operand_gsi (&si, fold_convert (itype, n2),
6207 true, NULL_TREE, true, GSI_SAME_STMT);
6208 step = force_gimple_operand_gsi (&si, fold_convert (itype, step),
6209 true, NULL_TREE, true, GSI_SAME_STMT);
6210 fd->chunk_size
6211 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->chunk_size),
6212 true, NULL_TREE, true, GSI_SAME_STMT);
6213
6214 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
6215 t = fold_build2 (PLUS_EXPR, itype, step, t);
6216 t = fold_build2 (PLUS_EXPR, itype, t, n2);
6217 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, n1));
6218 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
6219 t = fold_build2 (TRUNC_DIV_EXPR, itype,
6220 fold_build1 (NEGATE_EXPR, itype, t),
6221 fold_build1 (NEGATE_EXPR, itype, step));
6222 else
6223 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, step);
6224 t = fold_convert (itype, t);
6225 n = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
6226 true, GSI_SAME_STMT);
6227
6228 trip_var = create_tmp_reg (itype, ".trip");
6229 if (gimple_in_ssa_p (cfun))
6230 {
6231 trip_init = make_ssa_name (trip_var, NULL);
6232 trip_main = make_ssa_name (trip_var, NULL);
6233 trip_back = make_ssa_name (trip_var, NULL);
6234 }
6235 else
6236 {
6237 trip_init = trip_var;
6238 trip_main = trip_var;
6239 trip_back = trip_var;
6240 }
6241
6242 stmt = gimple_build_assign (trip_init, build_int_cst (itype, 0));
6243 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
6244
6245 t = fold_build2 (MULT_EXPR, itype, threadid, fd->chunk_size);
6246 t = fold_build2 (MULT_EXPR, itype, t, step);
6247 if (POINTER_TYPE_P (type))
6248 t = fold_build_pointer_plus (n1, t);
6249 else
6250 t = fold_build2 (PLUS_EXPR, type, t, n1);
6251 v_extra = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
6252 true, GSI_SAME_STMT);
6253
6254 /* Remove the GIMPLE_OMP_FOR. */
6255 gsi_remove (&si, true);
6256
6257 /* Iteration space partitioning goes in ITER_PART_BB. */
6258 si = gsi_last_bb (iter_part_bb);
6259
6260 t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads);
6261 t = fold_build2 (PLUS_EXPR, itype, t, threadid);
6262 t = fold_build2 (MULT_EXPR, itype, t, fd->chunk_size);
6263 s0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
6264 false, GSI_CONTINUE_LINKING);
6265
6266 t = fold_build2 (PLUS_EXPR, itype, s0, fd->chunk_size);
6267 t = fold_build2 (MIN_EXPR, itype, t, n);
6268 e0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
6269 false, GSI_CONTINUE_LINKING);
6270
6271 t = build2 (LT_EXPR, boolean_type_node, s0, n);
6272 gsi_insert_after (&si, gimple_build_cond_empty (t), GSI_CONTINUE_LINKING);
6273
6274 /* Setup code for sequential iteration goes in SEQ_START_BB. */
6275 si = gsi_start_bb (seq_start_bb);
6276
6277 tree startvar = fd->loop.v;
6278 tree endvar = NULL_TREE;
6279
6280 if (gimple_omp_for_combined_p (fd->for_stmt))
6281 {
6282 tree clauses = gimple_code (inner_stmt) == GIMPLE_OMP_PARALLEL
6283 ? gimple_omp_parallel_clauses (inner_stmt)
6284 : gimple_omp_for_clauses (inner_stmt);
6285 tree innerc = find_omp_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
6286 gcc_assert (innerc);
6287 startvar = OMP_CLAUSE_DECL (innerc);
6288 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
6289 OMP_CLAUSE__LOOPTEMP_);
6290 gcc_assert (innerc);
6291 endvar = OMP_CLAUSE_DECL (innerc);
6292 }
6293
6294 t = fold_convert (itype, s0);
6295 t = fold_build2 (MULT_EXPR, itype, t, step);
6296 if (POINTER_TYPE_P (type))
6297 t = fold_build_pointer_plus (n1, t);
6298 else
6299 t = fold_build2 (PLUS_EXPR, type, t, n1);
6300 t = fold_convert (TREE_TYPE (startvar), t);
6301 t = force_gimple_operand_gsi (&si, t,
6302 DECL_P (startvar)
6303 && TREE_ADDRESSABLE (startvar),
6304 NULL_TREE, false, GSI_CONTINUE_LINKING);
6305 stmt = gimple_build_assign (startvar, t);
6306 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
6307
6308 t = fold_convert (itype, e0);
6309 t = fold_build2 (MULT_EXPR, itype, t, step);
6310 if (POINTER_TYPE_P (type))
6311 t = fold_build_pointer_plus (n1, t);
6312 else
6313 t = fold_build2 (PLUS_EXPR, type, t, n1);
6314 t = fold_convert (TREE_TYPE (startvar), t);
6315 e = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
6316 false, GSI_CONTINUE_LINKING);
6317 if (endvar)
6318 {
6319 stmt = gimple_build_assign (endvar, e);
6320 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
6321 }
6322 if (fd->collapse > 1)
6323 expand_omp_for_init_vars (fd, &si, counts, inner_stmt, startvar);
6324
6325 if (!broken_loop)
6326 {
6327 /* The code controlling the sequential loop goes in CONT_BB,
6328 replacing the GIMPLE_OMP_CONTINUE. */
6329 si = gsi_last_bb (cont_bb);
6330 stmt = gsi_stmt (si);
6331 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
6332 v_main = gimple_omp_continue_control_use (stmt);
6333 v_back = gimple_omp_continue_control_def (stmt);
6334
6335 if (!gimple_omp_for_combined_p (fd->for_stmt))
6336 {
6337 if (POINTER_TYPE_P (type))
6338 t = fold_build_pointer_plus (v_main, step);
6339 else
6340 t = fold_build2 (PLUS_EXPR, type, v_main, step);
6341 if (DECL_P (v_back) && TREE_ADDRESSABLE (v_back))
6342 t = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
6343 true, GSI_SAME_STMT);
6344 stmt = gimple_build_assign (v_back, t);
6345 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
6346
6347 t = build2 (fd->loop.cond_code, boolean_type_node,
6348 DECL_P (v_back) && TREE_ADDRESSABLE (v_back)
6349 ? t : v_back, e);
6350 gsi_insert_before (&si, gimple_build_cond_empty (t), GSI_SAME_STMT);
6351 }
6352
6353 /* Remove GIMPLE_OMP_CONTINUE. */
6354 gsi_remove (&si, true);
6355
6356 if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt))
6357 collapse_bb = extract_omp_for_update_vars (fd, cont_bb, body_bb);
6358
6359 /* Trip update code goes into TRIP_UPDATE_BB. */
6360 si = gsi_start_bb (trip_update_bb);
6361
6362 t = build_int_cst (itype, 1);
6363 t = build2 (PLUS_EXPR, itype, trip_main, t);
6364 stmt = gimple_build_assign (trip_back, t);
6365 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
6366 }
6367
6368 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
6369 si = gsi_last_bb (exit_bb);
6370 if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
6371 {
6372 t = gimple_omp_return_lhs (gsi_stmt (si));
6373 gsi_insert_after (&si, build_omp_barrier (t), GSI_SAME_STMT);
6374 }
6375 gsi_remove (&si, true);
6376
6377 /* Connect the new blocks. */
6378 find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
6379 find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
6380
6381 if (!broken_loop)
6382 {
6383 se = find_edge (cont_bb, body_bb);
6384 if (gimple_omp_for_combined_p (fd->for_stmt))
6385 {
6386 remove_edge (se);
6387 se = NULL;
6388 }
6389 else if (fd->collapse > 1)
6390 {
6391 remove_edge (se);
6392 se = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
6393 }
6394 else
6395 se->flags = EDGE_TRUE_VALUE;
6396 find_edge (cont_bb, trip_update_bb)->flags
6397 = se ? EDGE_FALSE_VALUE : EDGE_FALLTHRU;
6398
6399 redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
6400 }
6401
6402 if (gimple_in_ssa_p (cfun))
6403 {
6404 gimple_stmt_iterator psi;
6405 gimple phi;
6406 edge re, ene;
6407 edge_var_map_vector *head;
6408 edge_var_map *vm;
6409 size_t i;
6410
6411 gcc_assert (fd->collapse == 1 && !broken_loop);
6412
6413 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
6414 remove arguments of the phi nodes in fin_bb. We need to create
6415 appropriate phi nodes in iter_part_bb instead. */
6416 se = single_pred_edge (fin_bb);
6417 re = single_succ_edge (trip_update_bb);
6418 head = redirect_edge_var_map_vector (re);
6419 ene = single_succ_edge (entry_bb);
6420
6421 psi = gsi_start_phis (fin_bb);
6422 for (i = 0; !gsi_end_p (psi) && head->iterate (i, &vm);
6423 gsi_next (&psi), ++i)
6424 {
6425 gimple nphi;
6426 source_location locus;
6427
6428 phi = gsi_stmt (psi);
6429 t = gimple_phi_result (phi);
6430 gcc_assert (t == redirect_edge_var_map_result (vm));
6431 nphi = create_phi_node (t, iter_part_bb);
6432
6433 t = PHI_ARG_DEF_FROM_EDGE (phi, se);
6434 locus = gimple_phi_arg_location_from_edge (phi, se);
6435
6436 /* A special case -- fd->loop.v is not yet computed in
6437 iter_part_bb, we need to use v_extra instead. */
6438 if (t == fd->loop.v)
6439 t = v_extra;
6440 add_phi_arg (nphi, t, ene, locus);
6441 locus = redirect_edge_var_map_location (vm);
6442 add_phi_arg (nphi, redirect_edge_var_map_def (vm), re, locus);
6443 }
6444 gcc_assert (!gsi_end_p (psi) && i == head->length ());
6445 redirect_edge_var_map_clear (re);
6446 while (1)
6447 {
6448 psi = gsi_start_phis (fin_bb);
6449 if (gsi_end_p (psi))
6450 break;
6451 remove_phi_node (&psi, false);
6452 }
6453
6454 /* Make phi node for trip. */
6455 phi = create_phi_node (trip_main, iter_part_bb);
6456 add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb),
6457 UNKNOWN_LOCATION);
6458 add_phi_arg (phi, trip_init, single_succ_edge (entry_bb),
6459 UNKNOWN_LOCATION);
6460 }
6461
6462 if (!broken_loop)
6463 set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
6464 set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
6465 recompute_dominator (CDI_DOMINATORS, iter_part_bb));
6466 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
6467 recompute_dominator (CDI_DOMINATORS, fin_bb));
6468 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
6469 recompute_dominator (CDI_DOMINATORS, seq_start_bb));
6470 set_immediate_dominator (CDI_DOMINATORS, body_bb,
6471 recompute_dominator (CDI_DOMINATORS, body_bb));
6472
6473 if (!broken_loop)
6474 {
6475 struct loop *trip_loop = alloc_loop ();
6476 trip_loop->header = iter_part_bb;
6477 trip_loop->latch = trip_update_bb;
6478 add_loop (trip_loop, iter_part_bb->loop_father);
6479
6480 if (!gimple_omp_for_combined_p (fd->for_stmt))
6481 {
6482 struct loop *loop = alloc_loop ();
6483 loop->header = body_bb;
6484 loop->latch = cont_bb;
6485 add_loop (loop, trip_loop);
6486 }
6487 }
6488 }
6489
6490
6491 /* A subroutine of expand_omp_for. Generate code for a simd non-worksharing
6492 loop. Given parameters:
6493
6494 for (V = N1; V cond N2; V += STEP) BODY;
6495
6496 where COND is "<" or ">", we generate pseudocode
6497
6498 V = N1;
6499 goto L1;
6500 L0:
6501 BODY;
6502 V += STEP;
6503 L1:
6504 if (V cond N2) goto L0; else goto L2;
6505 L2:
6506
6507 For collapsed loops, given parameters:
6508 collapse(3)
6509 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
6510 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
6511 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
6512 BODY;
6513
6514 we generate pseudocode
6515
6516 if (cond3 is <)
6517 adj = STEP3 - 1;
6518 else
6519 adj = STEP3 + 1;
6520 count3 = (adj + N32 - N31) / STEP3;
6521 if (cond2 is <)
6522 adj = STEP2 - 1;
6523 else
6524 adj = STEP2 + 1;
6525 count2 = (adj + N22 - N21) / STEP2;
6526 if (cond1 is <)
6527 adj = STEP1 - 1;
6528 else
6529 adj = STEP1 + 1;
6530 count1 = (adj + N12 - N11) / STEP1;
6531 count = count1 * count2 * count3;
6532 V = 0;
6533 V1 = N11;
6534 V2 = N21;
6535 V3 = N31;
6536 goto L1;
6537 L0:
6538 BODY;
6539 V += 1;
6540 V3 += STEP3;
6541 V2 += (V3 cond3 N32) ? 0 : STEP2;
6542 V3 = (V3 cond3 N32) ? V3 : N31;
6543 V1 += (V2 cond2 N22) ? 0 : STEP1;
6544 V2 = (V2 cond2 N22) ? V2 : N21;
6545 L1:
6546 if (V < count) goto L0; else goto L2;
6547 L2:
6548
6549 */
6550
6551 static void
6552 expand_omp_simd (struct omp_region *region, struct omp_for_data *fd)
6553 {
6554 tree type, t;
6555 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, l2_bb, l2_dom_bb;
6556 gimple_stmt_iterator gsi;
6557 gimple stmt;
6558 bool broken_loop = region->cont == NULL;
6559 edge e, ne;
6560 tree *counts = NULL;
6561 int i;
6562 tree safelen = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
6563 OMP_CLAUSE_SAFELEN);
6564 tree simduid = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
6565 OMP_CLAUSE__SIMDUID_);
6566 tree n1, n2;
6567
6568 type = TREE_TYPE (fd->loop.v);
6569 entry_bb = region->entry;
6570 cont_bb = region->cont;
6571 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
6572 gcc_assert (broken_loop
6573 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
6574 l0_bb = FALLTHRU_EDGE (entry_bb)->dest;
6575 if (!broken_loop)
6576 {
6577 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l0_bb);
6578 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
6579 l1_bb = split_block (cont_bb, last_stmt (cont_bb))->dest;
6580 l2_bb = BRANCH_EDGE (entry_bb)->dest;
6581 }
6582 else
6583 {
6584 BRANCH_EDGE (entry_bb)->flags &= ~EDGE_ABNORMAL;
6585 l1_bb = split_edge (BRANCH_EDGE (entry_bb));
6586 l2_bb = single_succ (l1_bb);
6587 }
6588 exit_bb = region->exit;
6589 l2_dom_bb = NULL;
6590
6591 gsi = gsi_last_bb (entry_bb);
6592
6593 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
6594 /* Not needed in SSA form right now. */
6595 gcc_assert (!gimple_in_ssa_p (cfun));
6596 if (fd->collapse > 1)
6597 {
6598 int first_zero_iter = -1;
6599 basic_block zero_iter_bb = l2_bb;
6600
6601 counts = XALLOCAVEC (tree, fd->collapse);
6602 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
6603 zero_iter_bb, first_zero_iter,
6604 l2_dom_bb);
6605 }
6606 if (l2_dom_bb == NULL)
6607 l2_dom_bb = l1_bb;
6608
6609 n1 = fd->loop.n1;
6610 n2 = fd->loop.n2;
6611 if (gimple_omp_for_combined_into_p (fd->for_stmt))
6612 {
6613 tree innerc = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
6614 OMP_CLAUSE__LOOPTEMP_);
6615 gcc_assert (innerc);
6616 n1 = OMP_CLAUSE_DECL (innerc);
6617 innerc = find_omp_clause (OMP_CLAUSE_CHAIN (innerc),
6618 OMP_CLAUSE__LOOPTEMP_);
6619 gcc_assert (innerc);
6620 n2 = OMP_CLAUSE_DECL (innerc);
6621 expand_omp_build_assign (&gsi, fd->loop.v,
6622 fold_convert (type, n1));
6623 if (fd->collapse > 1)
6624 {
6625 gsi_prev (&gsi);
6626 expand_omp_for_init_vars (fd, &gsi, counts, NULL, n1);
6627 gsi_next (&gsi);
6628 }
6629 }
6630 else
6631 {
6632 expand_omp_build_assign (&gsi, fd->loop.v,
6633 fold_convert (type, fd->loop.n1));
6634 if (fd->collapse > 1)
6635 for (i = 0; i < fd->collapse; i++)
6636 {
6637 tree itype = TREE_TYPE (fd->loops[i].v);
6638 if (POINTER_TYPE_P (itype))
6639 itype = signed_type_for (itype);
6640 t = fold_convert (TREE_TYPE (fd->loops[i].v), fd->loops[i].n1);
6641 expand_omp_build_assign (&gsi, fd->loops[i].v, t);
6642 }
6643 }
6644
6645 /* Remove the GIMPLE_OMP_FOR statement. */
6646 gsi_remove (&gsi, true);
6647
6648 if (!broken_loop)
6649 {
6650 /* Code to control the increment goes in the CONT_BB. */
6651 gsi = gsi_last_bb (cont_bb);
6652 stmt = gsi_stmt (gsi);
6653 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
6654
6655 if (POINTER_TYPE_P (type))
6656 t = fold_build_pointer_plus (fd->loop.v, fd->loop.step);
6657 else
6658 t = fold_build2 (PLUS_EXPR, type, fd->loop.v, fd->loop.step);
6659 expand_omp_build_assign (&gsi, fd->loop.v, t);
6660
6661 if (fd->collapse > 1)
6662 {
6663 i = fd->collapse - 1;
6664 if (POINTER_TYPE_P (TREE_TYPE (fd->loops[i].v)))
6665 {
6666 t = fold_convert (sizetype, fd->loops[i].step);
6667 t = fold_build_pointer_plus (fd->loops[i].v, t);
6668 }
6669 else
6670 {
6671 t = fold_convert (TREE_TYPE (fd->loops[i].v),
6672 fd->loops[i].step);
6673 t = fold_build2 (PLUS_EXPR, TREE_TYPE (fd->loops[i].v),
6674 fd->loops[i].v, t);
6675 }
6676 expand_omp_build_assign (&gsi, fd->loops[i].v, t);
6677
6678 for (i = fd->collapse - 1; i > 0; i--)
6679 {
6680 tree itype = TREE_TYPE (fd->loops[i].v);
6681 tree itype2 = TREE_TYPE (fd->loops[i - 1].v);
6682 if (POINTER_TYPE_P (itype2))
6683 itype2 = signed_type_for (itype2);
6684 t = build3 (COND_EXPR, itype2,
6685 build2 (fd->loops[i].cond_code, boolean_type_node,
6686 fd->loops[i].v,
6687 fold_convert (itype, fd->loops[i].n2)),
6688 build_int_cst (itype2, 0),
6689 fold_convert (itype2, fd->loops[i - 1].step));
6690 if (POINTER_TYPE_P (TREE_TYPE (fd->loops[i - 1].v)))
6691 t = fold_build_pointer_plus (fd->loops[i - 1].v, t);
6692 else
6693 t = fold_build2 (PLUS_EXPR, itype2, fd->loops[i - 1].v, t);
6694 expand_omp_build_assign (&gsi, fd->loops[i - 1].v, t);
6695
6696 t = build3 (COND_EXPR, itype,
6697 build2 (fd->loops[i].cond_code, boolean_type_node,
6698 fd->loops[i].v,
6699 fold_convert (itype, fd->loops[i].n2)),
6700 fd->loops[i].v,
6701 fold_convert (itype, fd->loops[i].n1));
6702 expand_omp_build_assign (&gsi, fd->loops[i].v, t);
6703 }
6704 }
6705
6706 /* Remove GIMPLE_OMP_CONTINUE. */
6707 gsi_remove (&gsi, true);
6708 }
6709
6710 /* Emit the condition in L1_BB. */
6711 gsi = gsi_start_bb (l1_bb);
6712
6713 t = fold_convert (type, n2);
6714 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
6715 false, GSI_CONTINUE_LINKING);
6716 t = build2 (fd->loop.cond_code, boolean_type_node, fd->loop.v, t);
6717 stmt = gimple_build_cond_empty (t);
6718 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
6719 if (walk_tree (gimple_cond_lhs_ptr (stmt), expand_omp_regimplify_p,
6720 NULL, NULL)
6721 || walk_tree (gimple_cond_rhs_ptr (stmt), expand_omp_regimplify_p,
6722 NULL, NULL))
6723 {
6724 gsi = gsi_for_stmt (stmt);
6725 gimple_regimplify_operands (stmt, &gsi);
6726 }
6727
6728 /* Remove GIMPLE_OMP_RETURN. */
6729 gsi = gsi_last_bb (exit_bb);
6730 gsi_remove (&gsi, true);
6731
6732 /* Connect the new blocks. */
6733 remove_edge (FALLTHRU_EDGE (entry_bb));
6734
6735 if (!broken_loop)
6736 {
6737 remove_edge (BRANCH_EDGE (entry_bb));
6738 make_edge (entry_bb, l1_bb, EDGE_FALLTHRU);
6739
6740 e = BRANCH_EDGE (l1_bb);
6741 ne = FALLTHRU_EDGE (l1_bb);
6742 e->flags = EDGE_TRUE_VALUE;
6743 }
6744 else
6745 {
6746 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
6747
6748 ne = single_succ_edge (l1_bb);
6749 e = make_edge (l1_bb, l0_bb, EDGE_TRUE_VALUE);
6750
6751 }
6752 ne->flags = EDGE_FALSE_VALUE;
6753 e->probability = REG_BR_PROB_BASE * 7 / 8;
6754 ne->probability = REG_BR_PROB_BASE / 8;
6755
6756 set_immediate_dominator (CDI_DOMINATORS, l1_bb, entry_bb);
6757 set_immediate_dominator (CDI_DOMINATORS, l2_bb, l2_dom_bb);
6758 set_immediate_dominator (CDI_DOMINATORS, l0_bb, l1_bb);
6759
6760 if (!broken_loop)
6761 {
6762 struct loop *loop = alloc_loop ();
6763 loop->header = l1_bb;
6764 loop->latch = e->dest;
6765 add_loop (loop, l1_bb->loop_father);
6766 if (safelen == NULL_TREE)
6767 loop->safelen = INT_MAX;
6768 else
6769 {
6770 safelen = OMP_CLAUSE_SAFELEN_EXPR (safelen);
6771 if (!host_integerp (safelen, 1)
6772 || (unsigned HOST_WIDE_INT) tree_low_cst (safelen, 1)
6773 > INT_MAX)
6774 loop->safelen = INT_MAX;
6775 else
6776 loop->safelen = tree_low_cst (safelen, 1);
6777 if (loop->safelen == 1)
6778 loop->safelen = 0;
6779 }
6780 if (simduid)
6781 {
6782 loop->simduid = OMP_CLAUSE__SIMDUID__DECL (simduid);
6783 cfun->has_simduid_loops = true;
6784 }
6785 /* If not -fno-tree-loop-vectorize, hint that we want to vectorize
6786 the loop. */
6787 if ((flag_tree_loop_vectorize
6788 || (!global_options_set.x_flag_tree_loop_vectorize
6789 && !global_options_set.x_flag_tree_vectorize))
6790 && loop->safelen > 1)
6791 {
6792 loop->force_vect = true;
6793 cfun->has_force_vect_loops = true;
6794 }
6795 }
6796 }
6797
6798
6799 /* Expand the OpenMP loop defined by REGION. */
6800
6801 static void
6802 expand_omp_for (struct omp_region *region, gimple inner_stmt)
6803 {
6804 struct omp_for_data fd;
6805 struct omp_for_data_loop *loops;
6806
6807 loops
6808 = (struct omp_for_data_loop *)
6809 alloca (gimple_omp_for_collapse (last_stmt (region->entry))
6810 * sizeof (struct omp_for_data_loop));
6811 extract_omp_for_data (last_stmt (region->entry), &fd, loops);
6812 region->sched_kind = fd.sched_kind;
6813
6814 gcc_assert (EDGE_COUNT (region->entry->succs) == 2);
6815 BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
6816 FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
6817 if (region->cont)
6818 {
6819 gcc_assert (EDGE_COUNT (region->cont->succs) == 2);
6820 BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
6821 FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
6822 }
6823 else
6824 /* If there isn't a continue then this is a degerate case where
6825 the introduction of abnormal edges during lowering will prevent
6826 original loops from being detected. Fix that up. */
6827 loops_state_set (LOOPS_NEED_FIXUP);
6828
6829 if (gimple_omp_for_kind (fd.for_stmt) & GF_OMP_FOR_KIND_SIMD)
6830 expand_omp_simd (region, &fd);
6831 else if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
6832 && !fd.have_ordered)
6833 {
6834 if (fd.chunk_size == NULL)
6835 expand_omp_for_static_nochunk (region, &fd, inner_stmt);
6836 else
6837 expand_omp_for_static_chunk (region, &fd, inner_stmt);
6838 }
6839 else
6840 {
6841 int fn_index, start_ix, next_ix;
6842
6843 gcc_assert (gimple_omp_for_kind (fd.for_stmt)
6844 == GF_OMP_FOR_KIND_FOR);
6845 if (fd.chunk_size == NULL
6846 && fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
6847 fd.chunk_size = integer_zero_node;
6848 gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
6849 fn_index = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
6850 ? 3 : fd.sched_kind;
6851 fn_index += fd.have_ordered * 4;
6852 start_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_START) + fn_index;
6853 next_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_NEXT) + fn_index;
6854 if (fd.iter_type == long_long_unsigned_type_node)
6855 {
6856 start_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_START
6857 - (int)BUILT_IN_GOMP_LOOP_STATIC_START);
6858 next_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
6859 - (int)BUILT_IN_GOMP_LOOP_STATIC_NEXT);
6860 }
6861 expand_omp_for_generic (region, &fd, (enum built_in_function) start_ix,
6862 (enum built_in_function) next_ix, inner_stmt);
6863 }
6864
6865 if (gimple_in_ssa_p (cfun))
6866 update_ssa (TODO_update_ssa_only_virtuals);
6867 }
6868
6869
6870 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
6871
6872 v = GOMP_sections_start (n);
6873 L0:
6874 switch (v)
6875 {
6876 case 0:
6877 goto L2;
6878 case 1:
6879 section 1;
6880 goto L1;
6881 case 2:
6882 ...
6883 case n:
6884 ...
6885 default:
6886 abort ();
6887 }
6888 L1:
6889 v = GOMP_sections_next ();
6890 goto L0;
6891 L2:
6892 reduction;
6893
6894 If this is a combined parallel sections, replace the call to
6895 GOMP_sections_start with call to GOMP_sections_next. */
6896
6897 static void
6898 expand_omp_sections (struct omp_region *region)
6899 {
6900 tree t, u, vin = NULL, vmain, vnext, l2;
6901 vec<tree> label_vec;
6902 unsigned len;
6903 basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
6904 gimple_stmt_iterator si, switch_si;
6905 gimple sections_stmt, stmt, cont;
6906 edge_iterator ei;
6907 edge e;
6908 struct omp_region *inner;
6909 unsigned i, casei;
6910 bool exit_reachable = region->cont != NULL;
6911
6912 gcc_assert (region->exit != NULL);
6913 entry_bb = region->entry;
6914 l0_bb = single_succ (entry_bb);
6915 l1_bb = region->cont;
6916 l2_bb = region->exit;
6917 if (single_pred_p (l2_bb) && single_pred (l2_bb) == l0_bb)
6918 l2 = gimple_block_label (l2_bb);
6919 else
6920 {
6921 /* This can happen if there are reductions. */
6922 len = EDGE_COUNT (l0_bb->succs);
6923 gcc_assert (len > 0);
6924 e = EDGE_SUCC (l0_bb, len - 1);
6925 si = gsi_last_bb (e->dest);
6926 l2 = NULL_TREE;
6927 if (gsi_end_p (si)
6928 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
6929 l2 = gimple_block_label (e->dest);
6930 else
6931 FOR_EACH_EDGE (e, ei, l0_bb->succs)
6932 {
6933 si = gsi_last_bb (e->dest);
6934 if (gsi_end_p (si)
6935 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
6936 {
6937 l2 = gimple_block_label (e->dest);
6938 break;
6939 }
6940 }
6941 }
6942 if (exit_reachable)
6943 default_bb = create_empty_bb (l1_bb->prev_bb);
6944 else
6945 default_bb = create_empty_bb (l0_bb);
6946
6947 /* We will build a switch() with enough cases for all the
6948 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
6949 and a default case to abort if something goes wrong. */
6950 len = EDGE_COUNT (l0_bb->succs);
6951
6952 /* Use vec::quick_push on label_vec throughout, since we know the size
6953 in advance. */
6954 label_vec.create (len);
6955
6956 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
6957 GIMPLE_OMP_SECTIONS statement. */
6958 si = gsi_last_bb (entry_bb);
6959 sections_stmt = gsi_stmt (si);
6960 gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS);
6961 vin = gimple_omp_sections_control (sections_stmt);
6962 if (!is_combined_parallel (region))
6963 {
6964 /* If we are not inside a combined parallel+sections region,
6965 call GOMP_sections_start. */
6966 t = build_int_cst (unsigned_type_node, len - 1);
6967 u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_START);
6968 stmt = gimple_build_call (u, 1, t);
6969 }
6970 else
6971 {
6972 /* Otherwise, call GOMP_sections_next. */
6973 u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
6974 stmt = gimple_build_call (u, 0);
6975 }
6976 gimple_call_set_lhs (stmt, vin);
6977 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
6978 gsi_remove (&si, true);
6979
6980 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
6981 L0_BB. */
6982 switch_si = gsi_last_bb (l0_bb);
6983 gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH);
6984 if (exit_reachable)
6985 {
6986 cont = last_stmt (l1_bb);
6987 gcc_assert (gimple_code (cont) == GIMPLE_OMP_CONTINUE);
6988 vmain = gimple_omp_continue_control_use (cont);
6989 vnext = gimple_omp_continue_control_def (cont);
6990 }
6991 else
6992 {
6993 vmain = vin;
6994 vnext = NULL_TREE;
6995 }
6996
6997 t = build_case_label (build_int_cst (unsigned_type_node, 0), NULL, l2);
6998 label_vec.quick_push (t);
6999 i = 1;
7000
7001 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
7002 for (inner = region->inner, casei = 1;
7003 inner;
7004 inner = inner->next, i++, casei++)
7005 {
7006 basic_block s_entry_bb, s_exit_bb;
7007
7008 /* Skip optional reduction region. */
7009 if (inner->type == GIMPLE_OMP_ATOMIC_LOAD)
7010 {
7011 --i;
7012 --casei;
7013 continue;
7014 }
7015
7016 s_entry_bb = inner->entry;
7017 s_exit_bb = inner->exit;
7018
7019 t = gimple_block_label (s_entry_bb);
7020 u = build_int_cst (unsigned_type_node, casei);
7021 u = build_case_label (u, NULL, t);
7022 label_vec.quick_push (u);
7023
7024 si = gsi_last_bb (s_entry_bb);
7025 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION);
7026 gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si)));
7027 gsi_remove (&si, true);
7028 single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
7029
7030 if (s_exit_bb == NULL)
7031 continue;
7032
7033 si = gsi_last_bb (s_exit_bb);
7034 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
7035 gsi_remove (&si, true);
7036
7037 single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
7038 }
7039
7040 /* Error handling code goes in DEFAULT_BB. */
7041 t = gimple_block_label (default_bb);
7042 u = build_case_label (NULL, NULL, t);
7043 make_edge (l0_bb, default_bb, 0);
7044 if (current_loops)
7045 add_bb_to_loop (default_bb, current_loops->tree_root);
7046
7047 stmt = gimple_build_switch (vmain, u, label_vec);
7048 gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT);
7049 gsi_remove (&switch_si, true);
7050 label_vec.release ();
7051
7052 si = gsi_start_bb (default_bb);
7053 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP), 0);
7054 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
7055
7056 if (exit_reachable)
7057 {
7058 tree bfn_decl;
7059
7060 /* Code to get the next section goes in L1_BB. */
7061 si = gsi_last_bb (l1_bb);
7062 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE);
7063
7064 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
7065 stmt = gimple_build_call (bfn_decl, 0);
7066 gimple_call_set_lhs (stmt, vnext);
7067 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
7068 gsi_remove (&si, true);
7069
7070 single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
7071 }
7072
7073 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
7074 si = gsi_last_bb (l2_bb);
7075 if (gimple_omp_return_nowait_p (gsi_stmt (si)))
7076 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_NOWAIT);
7077 else if (gimple_omp_return_lhs (gsi_stmt (si)))
7078 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_CANCEL);
7079 else
7080 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END);
7081 stmt = gimple_build_call (t, 0);
7082 if (gimple_omp_return_lhs (gsi_stmt (si)))
7083 gimple_call_set_lhs (stmt, gimple_omp_return_lhs (gsi_stmt (si)));
7084 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
7085 gsi_remove (&si, true);
7086
7087 set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
7088 }
7089
7090
7091 /* Expand code for an OpenMP single directive. We've already expanded
7092 much of the code, here we simply place the GOMP_barrier call. */
7093
7094 static void
7095 expand_omp_single (struct omp_region *region)
7096 {
7097 basic_block entry_bb, exit_bb;
7098 gimple_stmt_iterator si;
7099
7100 entry_bb = region->entry;
7101 exit_bb = region->exit;
7102
7103 si = gsi_last_bb (entry_bb);
7104 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE);
7105 gsi_remove (&si, true);
7106 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
7107
7108 si = gsi_last_bb (exit_bb);
7109 if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
7110 {
7111 tree t = gimple_omp_return_lhs (gsi_stmt (si));
7112 gsi_insert_after (&si, build_omp_barrier (t), GSI_SAME_STMT);
7113 }
7114 gsi_remove (&si, true);
7115 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
7116 }
7117
7118
7119 /* Generic expansion for OpenMP synchronization directives: master,
7120 ordered and critical. All we need to do here is remove the entry
7121 and exit markers for REGION. */
7122
7123 static void
7124 expand_omp_synch (struct omp_region *region)
7125 {
7126 basic_block entry_bb, exit_bb;
7127 gimple_stmt_iterator si;
7128
7129 entry_bb = region->entry;
7130 exit_bb = region->exit;
7131
7132 si = gsi_last_bb (entry_bb);
7133 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE
7134 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER
7135 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_TASKGROUP
7136 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED
7137 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL
7138 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_TEAMS);
7139 gsi_remove (&si, true);
7140 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
7141
7142 if (exit_bb)
7143 {
7144 si = gsi_last_bb (exit_bb);
7145 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
7146 gsi_remove (&si, true);
7147 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
7148 }
7149 }
7150
7151 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
7152 operation as a normal volatile load. */
7153
7154 static bool
7155 expand_omp_atomic_load (basic_block load_bb, tree addr,
7156 tree loaded_val, int index)
7157 {
7158 enum built_in_function tmpbase;
7159 gimple_stmt_iterator gsi;
7160 basic_block store_bb;
7161 location_t loc;
7162 gimple stmt;
7163 tree decl, call, type, itype;
7164
7165 gsi = gsi_last_bb (load_bb);
7166 stmt = gsi_stmt (gsi);
7167 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
7168 loc = gimple_location (stmt);
7169
7170 /* ??? If the target does not implement atomic_load_optab[mode], and mode
7171 is smaller than word size, then expand_atomic_load assumes that the load
7172 is atomic. We could avoid the builtin entirely in this case. */
7173
7174 tmpbase = (enum built_in_function) (BUILT_IN_ATOMIC_LOAD_N + index + 1);
7175 decl = builtin_decl_explicit (tmpbase);
7176 if (decl == NULL_TREE)
7177 return false;
7178
7179 type = TREE_TYPE (loaded_val);
7180 itype = TREE_TYPE (TREE_TYPE (decl));
7181
7182 call = build_call_expr_loc (loc, decl, 2, addr,
7183 build_int_cst (NULL,
7184 gimple_omp_atomic_seq_cst_p (stmt)
7185 ? MEMMODEL_SEQ_CST
7186 : MEMMODEL_RELAXED));
7187 if (!useless_type_conversion_p (type, itype))
7188 call = fold_build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
7189 call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
7190
7191 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
7192 gsi_remove (&gsi, true);
7193
7194 store_bb = single_succ (load_bb);
7195 gsi = gsi_last_bb (store_bb);
7196 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
7197 gsi_remove (&gsi, true);
7198
7199 if (gimple_in_ssa_p (cfun))
7200 update_ssa (TODO_update_ssa_no_phi);
7201
7202 return true;
7203 }
7204
7205 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
7206 operation as a normal volatile store. */
7207
7208 static bool
7209 expand_omp_atomic_store (basic_block load_bb, tree addr,
7210 tree loaded_val, tree stored_val, int index)
7211 {
7212 enum built_in_function tmpbase;
7213 gimple_stmt_iterator gsi;
7214 basic_block store_bb = single_succ (load_bb);
7215 location_t loc;
7216 gimple stmt;
7217 tree decl, call, type, itype;
7218 enum machine_mode imode;
7219 bool exchange;
7220
7221 gsi = gsi_last_bb (load_bb);
7222 stmt = gsi_stmt (gsi);
7223 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
7224
7225 /* If the load value is needed, then this isn't a store but an exchange. */
7226 exchange = gimple_omp_atomic_need_value_p (stmt);
7227
7228 gsi = gsi_last_bb (store_bb);
7229 stmt = gsi_stmt (gsi);
7230 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_STORE);
7231 loc = gimple_location (stmt);
7232
7233 /* ??? If the target does not implement atomic_store_optab[mode], and mode
7234 is smaller than word size, then expand_atomic_store assumes that the store
7235 is atomic. We could avoid the builtin entirely in this case. */
7236
7237 tmpbase = (exchange ? BUILT_IN_ATOMIC_EXCHANGE_N : BUILT_IN_ATOMIC_STORE_N);
7238 tmpbase = (enum built_in_function) ((int) tmpbase + index + 1);
7239 decl = builtin_decl_explicit (tmpbase);
7240 if (decl == NULL_TREE)
7241 return false;
7242
7243 type = TREE_TYPE (stored_val);
7244
7245 /* Dig out the type of the function's second argument. */
7246 itype = TREE_TYPE (decl);
7247 itype = TYPE_ARG_TYPES (itype);
7248 itype = TREE_CHAIN (itype);
7249 itype = TREE_VALUE (itype);
7250 imode = TYPE_MODE (itype);
7251
7252 if (exchange && !can_atomic_exchange_p (imode, true))
7253 return false;
7254
7255 if (!useless_type_conversion_p (itype, type))
7256 stored_val = fold_build1_loc (loc, VIEW_CONVERT_EXPR, itype, stored_val);
7257 call = build_call_expr_loc (loc, decl, 3, addr, stored_val,
7258 build_int_cst (NULL,
7259 gimple_omp_atomic_seq_cst_p (stmt)
7260 ? MEMMODEL_SEQ_CST
7261 : MEMMODEL_RELAXED));
7262 if (exchange)
7263 {
7264 if (!useless_type_conversion_p (type, itype))
7265 call = build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
7266 call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
7267 }
7268
7269 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
7270 gsi_remove (&gsi, true);
7271
7272 /* Remove the GIMPLE_OMP_ATOMIC_LOAD that we verified above. */
7273 gsi = gsi_last_bb (load_bb);
7274 gsi_remove (&gsi, true);
7275
7276 if (gimple_in_ssa_p (cfun))
7277 update_ssa (TODO_update_ssa_no_phi);
7278
7279 return true;
7280 }
7281
7282 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
7283 operation as a __atomic_fetch_op builtin. INDEX is log2 of the
7284 size of the data type, and thus usable to find the index of the builtin
7285 decl. Returns false if the expression is not of the proper form. */
7286
7287 static bool
7288 expand_omp_atomic_fetch_op (basic_block load_bb,
7289 tree addr, tree loaded_val,
7290 tree stored_val, int index)
7291 {
7292 enum built_in_function oldbase, newbase, tmpbase;
7293 tree decl, itype, call;
7294 tree lhs, rhs;
7295 basic_block store_bb = single_succ (load_bb);
7296 gimple_stmt_iterator gsi;
7297 gimple stmt;
7298 location_t loc;
7299 enum tree_code code;
7300 bool need_old, need_new;
7301 enum machine_mode imode;
7302 bool seq_cst;
7303
7304 /* We expect to find the following sequences:
7305
7306 load_bb:
7307 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
7308
7309 store_bb:
7310 val = tmp OP something; (or: something OP tmp)
7311 GIMPLE_OMP_STORE (val)
7312
7313 ???FIXME: Allow a more flexible sequence.
7314 Perhaps use data flow to pick the statements.
7315
7316 */
7317
7318 gsi = gsi_after_labels (store_bb);
7319 stmt = gsi_stmt (gsi);
7320 loc = gimple_location (stmt);
7321 if (!is_gimple_assign (stmt))
7322 return false;
7323 gsi_next (&gsi);
7324 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE)
7325 return false;
7326 need_new = gimple_omp_atomic_need_value_p (gsi_stmt (gsi));
7327 need_old = gimple_omp_atomic_need_value_p (last_stmt (load_bb));
7328 seq_cst = gimple_omp_atomic_seq_cst_p (last_stmt (load_bb));
7329 gcc_checking_assert (!need_old || !need_new);
7330
7331 if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0))
7332 return false;
7333
7334 /* Check for one of the supported fetch-op operations. */
7335 code = gimple_assign_rhs_code (stmt);
7336 switch (code)
7337 {
7338 case PLUS_EXPR:
7339 case POINTER_PLUS_EXPR:
7340 oldbase = BUILT_IN_ATOMIC_FETCH_ADD_N;
7341 newbase = BUILT_IN_ATOMIC_ADD_FETCH_N;
7342 break;
7343 case MINUS_EXPR:
7344 oldbase = BUILT_IN_ATOMIC_FETCH_SUB_N;
7345 newbase = BUILT_IN_ATOMIC_SUB_FETCH_N;
7346 break;
7347 case BIT_AND_EXPR:
7348 oldbase = BUILT_IN_ATOMIC_FETCH_AND_N;
7349 newbase = BUILT_IN_ATOMIC_AND_FETCH_N;
7350 break;
7351 case BIT_IOR_EXPR:
7352 oldbase = BUILT_IN_ATOMIC_FETCH_OR_N;
7353 newbase = BUILT_IN_ATOMIC_OR_FETCH_N;
7354 break;
7355 case BIT_XOR_EXPR:
7356 oldbase = BUILT_IN_ATOMIC_FETCH_XOR_N;
7357 newbase = BUILT_IN_ATOMIC_XOR_FETCH_N;
7358 break;
7359 default:
7360 return false;
7361 }
7362
7363 /* Make sure the expression is of the proper form. */
7364 if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0))
7365 rhs = gimple_assign_rhs2 (stmt);
7366 else if (commutative_tree_code (gimple_assign_rhs_code (stmt))
7367 && operand_equal_p (gimple_assign_rhs2 (stmt), loaded_val, 0))
7368 rhs = gimple_assign_rhs1 (stmt);
7369 else
7370 return false;
7371
7372 tmpbase = ((enum built_in_function)
7373 ((need_new ? newbase : oldbase) + index + 1));
7374 decl = builtin_decl_explicit (tmpbase);
7375 if (decl == NULL_TREE)
7376 return false;
7377 itype = TREE_TYPE (TREE_TYPE (decl));
7378 imode = TYPE_MODE (itype);
7379
7380 /* We could test all of the various optabs involved, but the fact of the
7381 matter is that (with the exception of i486 vs i586 and xadd) all targets
7382 that support any atomic operaton optab also implements compare-and-swap.
7383 Let optabs.c take care of expanding any compare-and-swap loop. */
7384 if (!can_compare_and_swap_p (imode, true))
7385 return false;
7386
7387 gsi = gsi_last_bb (load_bb);
7388 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD);
7389
7390 /* OpenMP does not imply any barrier-like semantics on its atomic ops.
7391 It only requires that the operation happen atomically. Thus we can
7392 use the RELAXED memory model. */
7393 call = build_call_expr_loc (loc, decl, 3, addr,
7394 fold_convert_loc (loc, itype, rhs),
7395 build_int_cst (NULL,
7396 seq_cst ? MEMMODEL_SEQ_CST
7397 : MEMMODEL_RELAXED));
7398
7399 if (need_old || need_new)
7400 {
7401 lhs = need_old ? loaded_val : stored_val;
7402 call = fold_convert_loc (loc, TREE_TYPE (lhs), call);
7403 call = build2_loc (loc, MODIFY_EXPR, void_type_node, lhs, call);
7404 }
7405 else
7406 call = fold_convert_loc (loc, void_type_node, call);
7407 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
7408 gsi_remove (&gsi, true);
7409
7410 gsi = gsi_last_bb (store_bb);
7411 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
7412 gsi_remove (&gsi, true);
7413 gsi = gsi_last_bb (store_bb);
7414 gsi_remove (&gsi, true);
7415
7416 if (gimple_in_ssa_p (cfun))
7417 update_ssa (TODO_update_ssa_no_phi);
7418
7419 return true;
7420 }
7421
7422 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
7423
7424 oldval = *addr;
7425 repeat:
7426 newval = rhs; // with oldval replacing *addr in rhs
7427 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
7428 if (oldval != newval)
7429 goto repeat;
7430
7431 INDEX is log2 of the size of the data type, and thus usable to find the
7432 index of the builtin decl. */
7433
7434 static bool
7435 expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
7436 tree addr, tree loaded_val, tree stored_val,
7437 int index)
7438 {
7439 tree loadedi, storedi, initial, new_storedi, old_vali;
7440 tree type, itype, cmpxchg, iaddr;
7441 gimple_stmt_iterator si;
7442 basic_block loop_header = single_succ (load_bb);
7443 gimple phi, stmt;
7444 edge e;
7445 enum built_in_function fncode;
7446
7447 /* ??? We need a non-pointer interface to __atomic_compare_exchange in
7448 order to use the RELAXED memory model effectively. */
7449 fncode = (enum built_in_function)((int)BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_N
7450 + index + 1);
7451 cmpxchg = builtin_decl_explicit (fncode);
7452 if (cmpxchg == NULL_TREE)
7453 return false;
7454 type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
7455 itype = TREE_TYPE (TREE_TYPE (cmpxchg));
7456
7457 if (!can_compare_and_swap_p (TYPE_MODE (itype), true))
7458 return false;
7459
7460 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
7461 si = gsi_last_bb (load_bb);
7462 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
7463
7464 /* For floating-point values, we'll need to view-convert them to integers
7465 so that we can perform the atomic compare and swap. Simplify the
7466 following code by always setting up the "i"ntegral variables. */
7467 if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
7468 {
7469 tree iaddr_val;
7470
7471 iaddr = create_tmp_reg (build_pointer_type_for_mode (itype, ptr_mode,
7472 true), NULL);
7473 iaddr_val
7474 = force_gimple_operand_gsi (&si,
7475 fold_convert (TREE_TYPE (iaddr), addr),
7476 false, NULL_TREE, true, GSI_SAME_STMT);
7477 stmt = gimple_build_assign (iaddr, iaddr_val);
7478 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
7479 loadedi = create_tmp_var (itype, NULL);
7480 if (gimple_in_ssa_p (cfun))
7481 loadedi = make_ssa_name (loadedi, NULL);
7482 }
7483 else
7484 {
7485 iaddr = addr;
7486 loadedi = loaded_val;
7487 }
7488
7489 initial
7490 = force_gimple_operand_gsi (&si,
7491 build2 (MEM_REF, TREE_TYPE (TREE_TYPE (iaddr)),
7492 iaddr,
7493 build_int_cst (TREE_TYPE (iaddr), 0)),
7494 true, NULL_TREE, true, GSI_SAME_STMT);
7495
7496 /* Move the value to the LOADEDI temporary. */
7497 if (gimple_in_ssa_p (cfun))
7498 {
7499 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header)));
7500 phi = create_phi_node (loadedi, loop_header);
7501 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
7502 initial);
7503 }
7504 else
7505 gsi_insert_before (&si,
7506 gimple_build_assign (loadedi, initial),
7507 GSI_SAME_STMT);
7508 if (loadedi != loaded_val)
7509 {
7510 gimple_stmt_iterator gsi2;
7511 tree x;
7512
7513 x = build1 (VIEW_CONVERT_EXPR, type, loadedi);
7514 gsi2 = gsi_start_bb (loop_header);
7515 if (gimple_in_ssa_p (cfun))
7516 {
7517 gimple stmt;
7518 x = force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
7519 true, GSI_SAME_STMT);
7520 stmt = gimple_build_assign (loaded_val, x);
7521 gsi_insert_before (&gsi2, stmt, GSI_SAME_STMT);
7522 }
7523 else
7524 {
7525 x = build2 (MODIFY_EXPR, TREE_TYPE (loaded_val), loaded_val, x);
7526 force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
7527 true, GSI_SAME_STMT);
7528 }
7529 }
7530 gsi_remove (&si, true);
7531
7532 si = gsi_last_bb (store_bb);
7533 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
7534
7535 if (iaddr == addr)
7536 storedi = stored_val;
7537 else
7538 storedi =
7539 force_gimple_operand_gsi (&si,
7540 build1 (VIEW_CONVERT_EXPR, itype,
7541 stored_val), true, NULL_TREE, true,
7542 GSI_SAME_STMT);
7543
7544 /* Build the compare&swap statement. */
7545 new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
7546 new_storedi = force_gimple_operand_gsi (&si,
7547 fold_convert (TREE_TYPE (loadedi),
7548 new_storedi),
7549 true, NULL_TREE,
7550 true, GSI_SAME_STMT);
7551
7552 if (gimple_in_ssa_p (cfun))
7553 old_vali = loadedi;
7554 else
7555 {
7556 old_vali = create_tmp_var (TREE_TYPE (loadedi), NULL);
7557 stmt = gimple_build_assign (old_vali, loadedi);
7558 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
7559
7560 stmt = gimple_build_assign (loadedi, new_storedi);
7561 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
7562 }
7563
7564 /* Note that we always perform the comparison as an integer, even for
7565 floating point. This allows the atomic operation to properly
7566 succeed even with NaNs and -0.0. */
7567 stmt = gimple_build_cond_empty
7568 (build2 (NE_EXPR, boolean_type_node,
7569 new_storedi, old_vali));
7570 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
7571
7572 /* Update cfg. */
7573 e = single_succ_edge (store_bb);
7574 e->flags &= ~EDGE_FALLTHRU;
7575 e->flags |= EDGE_FALSE_VALUE;
7576
7577 e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
7578
7579 /* Copy the new value to loadedi (we already did that before the condition
7580 if we are not in SSA). */
7581 if (gimple_in_ssa_p (cfun))
7582 {
7583 phi = gimple_seq_first_stmt (phi_nodes (loop_header));
7584 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi);
7585 }
7586
7587 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
7588 gsi_remove (&si, true);
7589
7590 struct loop *loop = alloc_loop ();
7591 loop->header = loop_header;
7592 loop->latch = store_bb;
7593 add_loop (loop, loop_header->loop_father);
7594
7595 if (gimple_in_ssa_p (cfun))
7596 update_ssa (TODO_update_ssa_no_phi);
7597
7598 return true;
7599 }
7600
7601 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
7602
7603 GOMP_atomic_start ();
7604 *addr = rhs;
7605 GOMP_atomic_end ();
7606
7607 The result is not globally atomic, but works so long as all parallel
7608 references are within #pragma omp atomic directives. According to
7609 responses received from omp@openmp.org, appears to be within spec.
7610 Which makes sense, since that's how several other compilers handle
7611 this situation as well.
7612 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
7613 expanding. STORED_VAL is the operand of the matching
7614 GIMPLE_OMP_ATOMIC_STORE.
7615
7616 We replace
7617 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
7618 loaded_val = *addr;
7619
7620 and replace
7621 GIMPLE_OMP_ATOMIC_STORE (stored_val) with
7622 *addr = stored_val;
7623 */
7624
7625 static bool
7626 expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
7627 tree addr, tree loaded_val, tree stored_val)
7628 {
7629 gimple_stmt_iterator si;
7630 gimple stmt;
7631 tree t;
7632
7633 si = gsi_last_bb (load_bb);
7634 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
7635
7636 t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START);
7637 t = build_call_expr (t, 0);
7638 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
7639
7640 stmt = gimple_build_assign (loaded_val, build_simple_mem_ref (addr));
7641 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
7642 gsi_remove (&si, true);
7643
7644 si = gsi_last_bb (store_bb);
7645 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
7646
7647 stmt = gimple_build_assign (build_simple_mem_ref (unshare_expr (addr)),
7648 stored_val);
7649 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
7650
7651 t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END);
7652 t = build_call_expr (t, 0);
7653 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
7654 gsi_remove (&si, true);
7655
7656 if (gimple_in_ssa_p (cfun))
7657 update_ssa (TODO_update_ssa_no_phi);
7658 return true;
7659 }
7660
7661 /* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
7662 using expand_omp_atomic_fetch_op. If it failed, we try to
7663 call expand_omp_atomic_pipeline, and if it fails too, the
7664 ultimate fallback is wrapping the operation in a mutex
7665 (expand_omp_atomic_mutex). REGION is the atomic region built
7666 by build_omp_regions_1(). */
7667
7668 static void
7669 expand_omp_atomic (struct omp_region *region)
7670 {
7671 basic_block load_bb = region->entry, store_bb = region->exit;
7672 gimple load = last_stmt (load_bb), store = last_stmt (store_bb);
7673 tree loaded_val = gimple_omp_atomic_load_lhs (load);
7674 tree addr = gimple_omp_atomic_load_rhs (load);
7675 tree stored_val = gimple_omp_atomic_store_val (store);
7676 tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
7677 HOST_WIDE_INT index;
7678
7679 /* Make sure the type is one of the supported sizes. */
7680 index = tree_low_cst (TYPE_SIZE_UNIT (type), 1);
7681 index = exact_log2 (index);
7682 if (index >= 0 && index <= 4)
7683 {
7684 unsigned int align = TYPE_ALIGN_UNIT (type);
7685
7686 /* __sync builtins require strict data alignment. */
7687 if (exact_log2 (align) >= index)
7688 {
7689 /* Atomic load. */
7690 if (loaded_val == stored_val
7691 && (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
7692 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
7693 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
7694 && expand_omp_atomic_load (load_bb, addr, loaded_val, index))
7695 return;
7696
7697 /* Atomic store. */
7698 if ((GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
7699 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
7700 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
7701 && store_bb == single_succ (load_bb)
7702 && first_stmt (store_bb) == store
7703 && expand_omp_atomic_store (load_bb, addr, loaded_val,
7704 stored_val, index))
7705 return;
7706
7707 /* When possible, use specialized atomic update functions. */
7708 if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
7709 && store_bb == single_succ (load_bb)
7710 && expand_omp_atomic_fetch_op (load_bb, addr,
7711 loaded_val, stored_val, index))
7712 return;
7713
7714 /* If we don't have specialized __sync builtins, try and implement
7715 as a compare and swap loop. */
7716 if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
7717 loaded_val, stored_val, index))
7718 return;
7719 }
7720 }
7721
7722 /* The ultimate fallback is wrapping the operation in a mutex. */
7723 expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
7724 }
7725
7726
7727 /* Expand the OpenMP target{, data, update} directive starting at REGION. */
7728
7729 static void
7730 expand_omp_target (struct omp_region *region)
7731 {
7732 basic_block entry_bb, exit_bb, new_bb;
7733 struct function *child_cfun = NULL;
7734 tree child_fn = NULL_TREE, block, t;
7735 gimple_stmt_iterator gsi;
7736 gimple entry_stmt, stmt;
7737 edge e;
7738
7739 entry_stmt = last_stmt (region->entry);
7740 new_bb = region->entry;
7741 int kind = gimple_omp_target_kind (entry_stmt);
7742 if (kind == GF_OMP_TARGET_KIND_REGION)
7743 {
7744 child_fn = gimple_omp_target_child_fn (entry_stmt);
7745 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
7746 }
7747
7748 entry_bb = region->entry;
7749 exit_bb = region->exit;
7750
7751 if (kind == GF_OMP_TARGET_KIND_REGION)
7752 {
7753 unsigned srcidx, dstidx, num;
7754
7755 /* If the target region needs data sent from the parent
7756 function, then the very first statement (except possible
7757 tree profile counter updates) of the parallel body
7758 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
7759 &.OMP_DATA_O is passed as an argument to the child function,
7760 we need to replace it with the argument as seen by the child
7761 function.
7762
7763 In most cases, this will end up being the identity assignment
7764 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
7765 a function call that has been inlined, the original PARM_DECL
7766 .OMP_DATA_I may have been converted into a different local
7767 variable. In which case, we need to keep the assignment. */
7768 if (gimple_omp_target_data_arg (entry_stmt))
7769 {
7770 basic_block entry_succ_bb = single_succ (entry_bb);
7771 gimple_stmt_iterator gsi;
7772 tree arg;
7773 gimple tgtcopy_stmt = NULL;
7774 tree sender
7775 = TREE_VEC_ELT (gimple_omp_target_data_arg (entry_stmt), 0);
7776
7777 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
7778 {
7779 gcc_assert (!gsi_end_p (gsi));
7780 stmt = gsi_stmt (gsi);
7781 if (gimple_code (stmt) != GIMPLE_ASSIGN)
7782 continue;
7783
7784 if (gimple_num_ops (stmt) == 2)
7785 {
7786 tree arg = gimple_assign_rhs1 (stmt);
7787
7788 /* We're ignoring the subcode because we're
7789 effectively doing a STRIP_NOPS. */
7790
7791 if (TREE_CODE (arg) == ADDR_EXPR
7792 && TREE_OPERAND (arg, 0) == sender)
7793 {
7794 tgtcopy_stmt = stmt;
7795 break;
7796 }
7797 }
7798 }
7799
7800 gcc_assert (tgtcopy_stmt != NULL);
7801 arg = DECL_ARGUMENTS (child_fn);
7802
7803 gcc_assert (gimple_assign_lhs (tgtcopy_stmt) == arg);
7804 gsi_remove (&gsi, true);
7805 }
7806
7807 /* Declare local variables needed in CHILD_CFUN. */
7808 block = DECL_INITIAL (child_fn);
7809 BLOCK_VARS (block) = vec2chain (child_cfun->local_decls);
7810 /* The gimplifier could record temporaries in target block
7811 rather than in containing function's local_decls chain,
7812 which would mean cgraph missed finalizing them. Do it now. */
7813 for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t))
7814 if (TREE_CODE (t) == VAR_DECL
7815 && TREE_STATIC (t)
7816 && !DECL_EXTERNAL (t))
7817 varpool_finalize_decl (t);
7818 DECL_SAVED_TREE (child_fn) = NULL;
7819 /* We'll create a CFG for child_fn, so no gimple body is needed. */
7820 gimple_set_body (child_fn, NULL);
7821 TREE_USED (block) = 1;
7822
7823 /* Reset DECL_CONTEXT on function arguments. */
7824 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
7825 DECL_CONTEXT (t) = child_fn;
7826
7827 /* Split ENTRY_BB at GIMPLE_OMP_TARGET,
7828 so that it can be moved to the child function. */
7829 gsi = gsi_last_bb (entry_bb);
7830 stmt = gsi_stmt (gsi);
7831 gcc_assert (stmt && gimple_code (stmt) == GIMPLE_OMP_TARGET
7832 && gimple_omp_target_kind (stmt)
7833 == GF_OMP_TARGET_KIND_REGION);
7834 gsi_remove (&gsi, true);
7835 e = split_block (entry_bb, stmt);
7836 entry_bb = e->dest;
7837 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
7838
7839 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
7840 if (exit_bb)
7841 {
7842 gsi = gsi_last_bb (exit_bb);
7843 gcc_assert (!gsi_end_p (gsi)
7844 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
7845 stmt = gimple_build_return (NULL);
7846 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
7847 gsi_remove (&gsi, true);
7848 }
7849
7850 /* Move the target region into CHILD_CFUN. */
7851
7852 block = gimple_block (entry_stmt);
7853
7854 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
7855 if (exit_bb)
7856 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
7857 /* When the OMP expansion process cannot guarantee an up-to-date
7858 loop tree arrange for the child function to fixup loops. */
7859 if (loops_state_satisfies_p (LOOPS_NEED_FIXUP))
7860 child_cfun->x_current_loops->state |= LOOPS_NEED_FIXUP;
7861
7862 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
7863 num = vec_safe_length (child_cfun->local_decls);
7864 for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
7865 {
7866 t = (*child_cfun->local_decls)[srcidx];
7867 if (DECL_CONTEXT (t) == cfun->decl)
7868 continue;
7869 if (srcidx != dstidx)
7870 (*child_cfun->local_decls)[dstidx] = t;
7871 dstidx++;
7872 }
7873 if (dstidx != num)
7874 vec_safe_truncate (child_cfun->local_decls, dstidx);
7875
7876 /* Inform the callgraph about the new function. */
7877 DECL_STRUCT_FUNCTION (child_fn)->curr_properties = cfun->curr_properties;
7878 cgraph_add_new_function (child_fn, true);
7879
7880 /* Fix the callgraph edges for child_cfun. Those for cfun will be
7881 fixed in a following pass. */
7882 push_cfun (child_cfun);
7883 rebuild_cgraph_edges ();
7884
7885 /* Some EH regions might become dead, see PR34608. If
7886 pass_cleanup_cfg isn't the first pass to happen with the
7887 new child, these dead EH edges might cause problems.
7888 Clean them up now. */
7889 if (flag_exceptions)
7890 {
7891 basic_block bb;
7892 bool changed = false;
7893
7894 FOR_EACH_BB (bb)
7895 changed |= gimple_purge_dead_eh_edges (bb);
7896 if (changed)
7897 cleanup_tree_cfg ();
7898 }
7899 pop_cfun ();
7900 }
7901
7902 /* Emit a library call to launch the target region, or do data
7903 transfers. */
7904 tree t1, t2, t3, t4, device, cond, c, clauses;
7905 enum built_in_function start_ix;
7906 location_t clause_loc;
7907
7908 clauses = gimple_omp_target_clauses (entry_stmt);
7909
7910 if (kind == GF_OMP_TARGET_KIND_REGION)
7911 start_ix = BUILT_IN_GOMP_TARGET;
7912 else if (kind == GF_OMP_TARGET_KIND_DATA)
7913 start_ix = BUILT_IN_GOMP_TARGET_DATA;
7914 else
7915 start_ix = BUILT_IN_GOMP_TARGET_UPDATE;
7916
7917 /* By default, the value of DEVICE is -1 (let runtime library choose)
7918 and there is no conditional. */
7919 cond = NULL_TREE;
7920 device = build_int_cst (integer_type_node, -1);
7921
7922 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
7923 if (c)
7924 cond = OMP_CLAUSE_IF_EXPR (c);
7925
7926 c = find_omp_clause (clauses, OMP_CLAUSE_DEVICE);
7927 if (c)
7928 {
7929 device = OMP_CLAUSE_DEVICE_ID (c);
7930 clause_loc = OMP_CLAUSE_LOCATION (c);
7931 }
7932 else
7933 clause_loc = gimple_location (entry_stmt);
7934
7935 /* Ensure 'device' is of the correct type. */
7936 device = fold_convert_loc (clause_loc, integer_type_node, device);
7937
7938 /* If we found the clause 'if (cond)', build
7939 (cond ? device : -2). */
7940 if (cond)
7941 {
7942 cond = gimple_boolify (cond);
7943
7944 basic_block cond_bb, then_bb, else_bb;
7945 edge e;
7946 tree tmp_var;
7947
7948 tmp_var = create_tmp_var (TREE_TYPE (device), NULL);
7949 if (kind != GF_OMP_TARGET_KIND_REGION)
7950 {
7951 gsi = gsi_last_bb (new_bb);
7952 gsi_prev (&gsi);
7953 e = split_block (new_bb, gsi_stmt (gsi));
7954 }
7955 else
7956 e = split_block (new_bb, NULL);
7957 cond_bb = e->src;
7958 new_bb = e->dest;
7959 remove_edge (e);
7960
7961 then_bb = create_empty_bb (cond_bb);
7962 else_bb = create_empty_bb (then_bb);
7963 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
7964 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
7965
7966 stmt = gimple_build_cond_empty (cond);
7967 gsi = gsi_last_bb (cond_bb);
7968 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
7969
7970 gsi = gsi_start_bb (then_bb);
7971 stmt = gimple_build_assign (tmp_var, device);
7972 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
7973
7974 gsi = gsi_start_bb (else_bb);
7975 stmt = gimple_build_assign (tmp_var,
7976 build_int_cst (integer_type_node, -2));
7977 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
7978
7979 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
7980 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
7981 if (current_loops)
7982 {
7983 add_bb_to_loop (then_bb, cond_bb->loop_father);
7984 add_bb_to_loop (else_bb, cond_bb->loop_father);
7985 }
7986 make_edge (then_bb, new_bb, EDGE_FALLTHRU);
7987 make_edge (else_bb, new_bb, EDGE_FALLTHRU);
7988
7989 device = tmp_var;
7990 }
7991
7992 gsi = gsi_last_bb (new_bb);
7993 t = gimple_omp_target_data_arg (entry_stmt);
7994 if (t == NULL)
7995 {
7996 t1 = size_zero_node;
7997 t2 = build_zero_cst (ptr_type_node);
7998 t3 = t2;
7999 t4 = t2;
8000 }
8001 else
8002 {
8003 t1 = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (TREE_VEC_ELT (t, 1))));
8004 t1 = size_binop (PLUS_EXPR, t1, size_int (1));
8005 t2 = build_fold_addr_expr (TREE_VEC_ELT (t, 0));
8006 t3 = build_fold_addr_expr (TREE_VEC_ELT (t, 1));
8007 t4 = build_fold_addr_expr (TREE_VEC_ELT (t, 2));
8008 }
8009
8010 gimple g;
8011 /* FIXME: This will be address of
8012 extern char __OPENMP_TARGET__[] __attribute__((visibility ("hidden")))
8013 symbol, as soon as the linker plugin is able to create it for us. */
8014 tree openmp_target = build_zero_cst (ptr_type_node);
8015 if (kind == GF_OMP_TARGET_KIND_REGION)
8016 {
8017 tree fnaddr = build_fold_addr_expr (child_fn);
8018 g = gimple_build_call (builtin_decl_explicit (start_ix), 7,
8019 device, fnaddr, openmp_target, t1, t2, t3, t4);
8020 }
8021 else
8022 g = gimple_build_call (builtin_decl_explicit (start_ix), 6,
8023 device, openmp_target, t1, t2, t3, t4);
8024 gimple_set_location (g, gimple_location (entry_stmt));
8025 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
8026 if (kind != GF_OMP_TARGET_KIND_REGION)
8027 {
8028 g = gsi_stmt (gsi);
8029 gcc_assert (g && gimple_code (g) == GIMPLE_OMP_TARGET);
8030 gsi_remove (&gsi, true);
8031 }
8032 if (kind == GF_OMP_TARGET_KIND_DATA && region->exit)
8033 {
8034 gsi = gsi_last_bb (region->exit);
8035 g = gsi_stmt (gsi);
8036 gcc_assert (g && gimple_code (g) == GIMPLE_OMP_RETURN);
8037 gsi_remove (&gsi, true);
8038 }
8039 }
8040
8041
8042 /* Expand the parallel region tree rooted at REGION. Expansion
8043 proceeds in depth-first order. Innermost regions are expanded
8044 first. This way, parallel regions that require a new function to
8045 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
8046 internal dependencies in their body. */
8047
8048 static void
8049 expand_omp (struct omp_region *region)
8050 {
8051 while (region)
8052 {
8053 location_t saved_location;
8054 gimple inner_stmt = NULL;
8055
8056 /* First, determine whether this is a combined parallel+workshare
8057 region. */
8058 if (region->type == GIMPLE_OMP_PARALLEL)
8059 determine_parallel_type (region);
8060
8061 if (region->type == GIMPLE_OMP_FOR
8062 && gimple_omp_for_combined_p (last_stmt (region->entry)))
8063 inner_stmt = last_stmt (region->inner->entry);
8064
8065 if (region->inner)
8066 expand_omp (region->inner);
8067
8068 saved_location = input_location;
8069 if (gimple_has_location (last_stmt (region->entry)))
8070 input_location = gimple_location (last_stmt (region->entry));
8071
8072 switch (region->type)
8073 {
8074 case GIMPLE_OMP_PARALLEL:
8075 case GIMPLE_OMP_TASK:
8076 expand_omp_taskreg (region);
8077 break;
8078
8079 case GIMPLE_OMP_FOR:
8080 expand_omp_for (region, inner_stmt);
8081 break;
8082
8083 case GIMPLE_OMP_SECTIONS:
8084 expand_omp_sections (region);
8085 break;
8086
8087 case GIMPLE_OMP_SECTION:
8088 /* Individual omp sections are handled together with their
8089 parent GIMPLE_OMP_SECTIONS region. */
8090 break;
8091
8092 case GIMPLE_OMP_SINGLE:
8093 expand_omp_single (region);
8094 break;
8095
8096 case GIMPLE_OMP_MASTER:
8097 case GIMPLE_OMP_TASKGROUP:
8098 case GIMPLE_OMP_ORDERED:
8099 case GIMPLE_OMP_CRITICAL:
8100 case GIMPLE_OMP_TEAMS:
8101 expand_omp_synch (region);
8102 break;
8103
8104 case GIMPLE_OMP_ATOMIC_LOAD:
8105 expand_omp_atomic (region);
8106 break;
8107
8108 case GIMPLE_OMP_TARGET:
8109 expand_omp_target (region);
8110 break;
8111
8112 default:
8113 gcc_unreachable ();
8114 }
8115
8116 input_location = saved_location;
8117 region = region->next;
8118 }
8119 }
8120
8121
8122 /* Helper for build_omp_regions. Scan the dominator tree starting at
8123 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
8124 true, the function ends once a single tree is built (otherwise, whole
8125 forest of OMP constructs may be built). */
8126
8127 static void
8128 build_omp_regions_1 (basic_block bb, struct omp_region *parent,
8129 bool single_tree)
8130 {
8131 gimple_stmt_iterator gsi;
8132 gimple stmt;
8133 basic_block son;
8134
8135 gsi = gsi_last_bb (bb);
8136 if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi)))
8137 {
8138 struct omp_region *region;
8139 enum gimple_code code;
8140
8141 stmt = gsi_stmt (gsi);
8142 code = gimple_code (stmt);
8143 if (code == GIMPLE_OMP_RETURN)
8144 {
8145 /* STMT is the return point out of region PARENT. Mark it
8146 as the exit point and make PARENT the immediately
8147 enclosing region. */
8148 gcc_assert (parent);
8149 region = parent;
8150 region->exit = bb;
8151 parent = parent->outer;
8152 }
8153 else if (code == GIMPLE_OMP_ATOMIC_STORE)
8154 {
8155 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
8156 GIMPLE_OMP_RETURN, but matches with
8157 GIMPLE_OMP_ATOMIC_LOAD. */
8158 gcc_assert (parent);
8159 gcc_assert (parent->type == GIMPLE_OMP_ATOMIC_LOAD);
8160 region = parent;
8161 region->exit = bb;
8162 parent = parent->outer;
8163 }
8164
8165 else if (code == GIMPLE_OMP_CONTINUE)
8166 {
8167 gcc_assert (parent);
8168 parent->cont = bb;
8169 }
8170 else if (code == GIMPLE_OMP_SECTIONS_SWITCH)
8171 {
8172 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
8173 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
8174 ;
8175 }
8176 else if (code == GIMPLE_OMP_TARGET
8177 && gimple_omp_target_kind (stmt) == GF_OMP_TARGET_KIND_UPDATE)
8178 new_omp_region (bb, code, parent);
8179 else
8180 {
8181 /* Otherwise, this directive becomes the parent for a new
8182 region. */
8183 region = new_omp_region (bb, code, parent);
8184 parent = region;
8185 }
8186 }
8187
8188 if (single_tree && !parent)
8189 return;
8190
8191 for (son = first_dom_son (CDI_DOMINATORS, bb);
8192 son;
8193 son = next_dom_son (CDI_DOMINATORS, son))
8194 build_omp_regions_1 (son, parent, single_tree);
8195 }
8196
8197 /* Builds the tree of OMP regions rooted at ROOT, storing it to
8198 root_omp_region. */
8199
8200 static void
8201 build_omp_regions_root (basic_block root)
8202 {
8203 gcc_assert (root_omp_region == NULL);
8204 build_omp_regions_1 (root, NULL, true);
8205 gcc_assert (root_omp_region != NULL);
8206 }
8207
8208 /* Expands omp construct (and its subconstructs) starting in HEAD. */
8209
8210 void
8211 omp_expand_local (basic_block head)
8212 {
8213 build_omp_regions_root (head);
8214 if (dump_file && (dump_flags & TDF_DETAILS))
8215 {
8216 fprintf (dump_file, "\nOMP region tree\n\n");
8217 dump_omp_region (dump_file, root_omp_region, 0);
8218 fprintf (dump_file, "\n");
8219 }
8220
8221 remove_exit_barriers (root_omp_region);
8222 expand_omp (root_omp_region);
8223
8224 free_omp_regions ();
8225 }
8226
8227 /* Scan the CFG and build a tree of OMP regions. Return the root of
8228 the OMP region tree. */
8229
8230 static void
8231 build_omp_regions (void)
8232 {
8233 gcc_assert (root_omp_region == NULL);
8234 calculate_dominance_info (CDI_DOMINATORS);
8235 build_omp_regions_1 (ENTRY_BLOCK_PTR, NULL, false);
8236 }
8237
8238 /* Main entry point for expanding OMP-GIMPLE into runtime calls. */
8239
8240 static unsigned int
8241 execute_expand_omp (void)
8242 {
8243 build_omp_regions ();
8244
8245 if (!root_omp_region)
8246 return 0;
8247
8248 if (dump_file)
8249 {
8250 fprintf (dump_file, "\nOMP region tree\n\n");
8251 dump_omp_region (dump_file, root_omp_region, 0);
8252 fprintf (dump_file, "\n");
8253 }
8254
8255 remove_exit_barriers (root_omp_region);
8256
8257 expand_omp (root_omp_region);
8258
8259 cleanup_tree_cfg ();
8260
8261 free_omp_regions ();
8262
8263 return 0;
8264 }
8265
8266 /* OMP expansion -- the default pass, run before creation of SSA form. */
8267
8268 static bool
8269 gate_expand_omp (void)
8270 {
8271 return ((flag_openmp != 0 || flag_openmp_simd != 0
8272 || flag_enable_cilkplus != 0) && !seen_error ());
8273 }
8274
8275 namespace {
8276
8277 const pass_data pass_data_expand_omp =
8278 {
8279 GIMPLE_PASS, /* type */
8280 "ompexp", /* name */
8281 OPTGROUP_NONE, /* optinfo_flags */
8282 true, /* has_gate */
8283 true, /* has_execute */
8284 TV_NONE, /* tv_id */
8285 PROP_gimple_any, /* properties_required */
8286 0, /* properties_provided */
8287 0, /* properties_destroyed */
8288 0, /* todo_flags_start */
8289 0, /* todo_flags_finish */
8290 };
8291
8292 class pass_expand_omp : public gimple_opt_pass
8293 {
8294 public:
8295 pass_expand_omp (gcc::context *ctxt)
8296 : gimple_opt_pass (pass_data_expand_omp, ctxt)
8297 {}
8298
8299 /* opt_pass methods: */
8300 bool gate () { return gate_expand_omp (); }
8301 unsigned int execute () { return execute_expand_omp (); }
8302
8303 }; // class pass_expand_omp
8304
8305 } // anon namespace
8306
8307 gimple_opt_pass *
8308 make_pass_expand_omp (gcc::context *ctxt)
8309 {
8310 return new pass_expand_omp (ctxt);
8311 }
8312 \f
8313 /* Routines to lower OpenMP directives into OMP-GIMPLE. */
8314
8315 /* If ctx is a worksharing context inside of a cancellable parallel
8316 region and it isn't nowait, add lhs to its GIMPLE_OMP_RETURN
8317 and conditional branch to parallel's cancel_label to handle
8318 cancellation in the implicit barrier. */
8319
8320 static void
8321 maybe_add_implicit_barrier_cancel (omp_context *ctx, gimple_seq *body)
8322 {
8323 gimple omp_return = gimple_seq_last_stmt (*body);
8324 gcc_assert (gimple_code (omp_return) == GIMPLE_OMP_RETURN);
8325 if (gimple_omp_return_nowait_p (omp_return))
8326 return;
8327 if (ctx->outer
8328 && gimple_code (ctx->outer->stmt) == GIMPLE_OMP_PARALLEL
8329 && ctx->outer->cancellable)
8330 {
8331 tree lhs = create_tmp_var (boolean_type_node, NULL);
8332 gimple_omp_return_set_lhs (omp_return, lhs);
8333 tree fallthru_label = create_artificial_label (UNKNOWN_LOCATION);
8334 gimple g = gimple_build_cond (NE_EXPR, lhs, boolean_false_node,
8335 ctx->outer->cancel_label, fallthru_label);
8336 gimple_seq_add_stmt (body, g);
8337 gimple_seq_add_stmt (body, gimple_build_label (fallthru_label));
8338 }
8339 }
8340
8341 /* Lower the OpenMP sections directive in the current statement in GSI_P.
8342 CTX is the enclosing OMP context for the current statement. */
8343
8344 static void
8345 lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
8346 {
8347 tree block, control;
8348 gimple_stmt_iterator tgsi;
8349 gimple stmt, new_stmt, bind, t;
8350 gimple_seq ilist, dlist, olist, new_body;
8351 struct gimplify_ctx gctx;
8352
8353 stmt = gsi_stmt (*gsi_p);
8354
8355 push_gimplify_context (&gctx);
8356
8357 dlist = NULL;
8358 ilist = NULL;
8359 lower_rec_input_clauses (gimple_omp_sections_clauses (stmt),
8360 &ilist, &dlist, ctx, NULL);
8361
8362 new_body = gimple_omp_body (stmt);
8363 gimple_omp_set_body (stmt, NULL);
8364 tgsi = gsi_start (new_body);
8365 for (; !gsi_end_p (tgsi); gsi_next (&tgsi))
8366 {
8367 omp_context *sctx;
8368 gimple sec_start;
8369
8370 sec_start = gsi_stmt (tgsi);
8371 sctx = maybe_lookup_ctx (sec_start);
8372 gcc_assert (sctx);
8373
8374 lower_omp (gimple_omp_body_ptr (sec_start), sctx);
8375 gsi_insert_seq_after (&tgsi, gimple_omp_body (sec_start),
8376 GSI_CONTINUE_LINKING);
8377 gimple_omp_set_body (sec_start, NULL);
8378
8379 if (gsi_one_before_end_p (tgsi))
8380 {
8381 gimple_seq l = NULL;
8382 lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt), NULL,
8383 &l, ctx);
8384 gsi_insert_seq_after (&tgsi, l, GSI_CONTINUE_LINKING);
8385 gimple_omp_section_set_last (sec_start);
8386 }
8387
8388 gsi_insert_after (&tgsi, gimple_build_omp_return (false),
8389 GSI_CONTINUE_LINKING);
8390 }
8391
8392 block = make_node (BLOCK);
8393 bind = gimple_build_bind (NULL, new_body, block);
8394
8395 olist = NULL;
8396 lower_reduction_clauses (gimple_omp_sections_clauses (stmt), &olist, ctx);
8397
8398 block = make_node (BLOCK);
8399 new_stmt = gimple_build_bind (NULL, NULL, block);
8400 gsi_replace (gsi_p, new_stmt, true);
8401
8402 pop_gimplify_context (new_stmt);
8403 gimple_bind_append_vars (new_stmt, ctx->block_vars);
8404 BLOCK_VARS (block) = gimple_bind_vars (bind);
8405 if (BLOCK_VARS (block))
8406 TREE_USED (block) = 1;
8407
8408 new_body = NULL;
8409 gimple_seq_add_seq (&new_body, ilist);
8410 gimple_seq_add_stmt (&new_body, stmt);
8411 gimple_seq_add_stmt (&new_body, gimple_build_omp_sections_switch ());
8412 gimple_seq_add_stmt (&new_body, bind);
8413
8414 control = create_tmp_var (unsigned_type_node, ".section");
8415 t = gimple_build_omp_continue (control, control);
8416 gimple_omp_sections_set_control (stmt, control);
8417 gimple_seq_add_stmt (&new_body, t);
8418
8419 gimple_seq_add_seq (&new_body, olist);
8420 if (ctx->cancellable)
8421 gimple_seq_add_stmt (&new_body, gimple_build_label (ctx->cancel_label));
8422 gimple_seq_add_seq (&new_body, dlist);
8423
8424 new_body = maybe_catch_exception (new_body);
8425
8426 t = gimple_build_omp_return
8427 (!!find_omp_clause (gimple_omp_sections_clauses (stmt),
8428 OMP_CLAUSE_NOWAIT));
8429 gimple_seq_add_stmt (&new_body, t);
8430 maybe_add_implicit_barrier_cancel (ctx, &new_body);
8431
8432 gimple_bind_set_body (new_stmt, new_body);
8433 }
8434
8435
8436 /* A subroutine of lower_omp_single. Expand the simple form of
8437 a GIMPLE_OMP_SINGLE, without a copyprivate clause:
8438
8439 if (GOMP_single_start ())
8440 BODY;
8441 [ GOMP_barrier (); ] -> unless 'nowait' is present.
8442
8443 FIXME. It may be better to delay expanding the logic of this until
8444 pass_expand_omp. The expanded logic may make the job more difficult
8445 to a synchronization analysis pass. */
8446
8447 static void
8448 lower_omp_single_simple (gimple single_stmt, gimple_seq *pre_p)
8449 {
8450 location_t loc = gimple_location (single_stmt);
8451 tree tlabel = create_artificial_label (loc);
8452 tree flabel = create_artificial_label (loc);
8453 gimple call, cond;
8454 tree lhs, decl;
8455
8456 decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_START);
8457 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (decl)), NULL);
8458 call = gimple_build_call (decl, 0);
8459 gimple_call_set_lhs (call, lhs);
8460 gimple_seq_add_stmt (pre_p, call);
8461
8462 cond = gimple_build_cond (EQ_EXPR, lhs,
8463 fold_convert_loc (loc, TREE_TYPE (lhs),
8464 boolean_true_node),
8465 tlabel, flabel);
8466 gimple_seq_add_stmt (pre_p, cond);
8467 gimple_seq_add_stmt (pre_p, gimple_build_label (tlabel));
8468 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
8469 gimple_seq_add_stmt (pre_p, gimple_build_label (flabel));
8470 }
8471
8472
8473 /* A subroutine of lower_omp_single. Expand the simple form of
8474 a GIMPLE_OMP_SINGLE, with a copyprivate clause:
8475
8476 #pragma omp single copyprivate (a, b, c)
8477
8478 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
8479
8480 {
8481 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
8482 {
8483 BODY;
8484 copyout.a = a;
8485 copyout.b = b;
8486 copyout.c = c;
8487 GOMP_single_copy_end (&copyout);
8488 }
8489 else
8490 {
8491 a = copyout_p->a;
8492 b = copyout_p->b;
8493 c = copyout_p->c;
8494 }
8495 GOMP_barrier ();
8496 }
8497
8498 FIXME. It may be better to delay expanding the logic of this until
8499 pass_expand_omp. The expanded logic may make the job more difficult
8500 to a synchronization analysis pass. */
8501
8502 static void
8503 lower_omp_single_copy (gimple single_stmt, gimple_seq *pre_p, omp_context *ctx)
8504 {
8505 tree ptr_type, t, l0, l1, l2, bfn_decl;
8506 gimple_seq copyin_seq;
8507 location_t loc = gimple_location (single_stmt);
8508
8509 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
8510
8511 ptr_type = build_pointer_type (ctx->record_type);
8512 ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
8513
8514 l0 = create_artificial_label (loc);
8515 l1 = create_artificial_label (loc);
8516 l2 = create_artificial_label (loc);
8517
8518 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_START);
8519 t = build_call_expr_loc (loc, bfn_decl, 0);
8520 t = fold_convert_loc (loc, ptr_type, t);
8521 gimplify_assign (ctx->receiver_decl, t, pre_p);
8522
8523 t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
8524 build_int_cst (ptr_type, 0));
8525 t = build3 (COND_EXPR, void_type_node, t,
8526 build_and_jump (&l0), build_and_jump (&l1));
8527 gimplify_and_add (t, pre_p);
8528
8529 gimple_seq_add_stmt (pre_p, gimple_build_label (l0));
8530
8531 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
8532
8533 copyin_seq = NULL;
8534 lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt), pre_p,
8535 &copyin_seq, ctx);
8536
8537 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
8538 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_END);
8539 t = build_call_expr_loc (loc, bfn_decl, 1, t);
8540 gimplify_and_add (t, pre_p);
8541
8542 t = build_and_jump (&l2);
8543 gimplify_and_add (t, pre_p);
8544
8545 gimple_seq_add_stmt (pre_p, gimple_build_label (l1));
8546
8547 gimple_seq_add_seq (pre_p, copyin_seq);
8548
8549 gimple_seq_add_stmt (pre_p, gimple_build_label (l2));
8550 }
8551
8552
8553 /* Expand code for an OpenMP single directive. */
8554
8555 static void
8556 lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
8557 {
8558 tree block;
8559 gimple t, bind, single_stmt = gsi_stmt (*gsi_p);
8560 gimple_seq bind_body, bind_body_tail = NULL, dlist;
8561 struct gimplify_ctx gctx;
8562
8563 push_gimplify_context (&gctx);
8564
8565 block = make_node (BLOCK);
8566 bind = gimple_build_bind (NULL, NULL, block);
8567 gsi_replace (gsi_p, bind, true);
8568 bind_body = NULL;
8569 dlist = NULL;
8570 lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt),
8571 &bind_body, &dlist, ctx, NULL);
8572 lower_omp (gimple_omp_body_ptr (single_stmt), ctx);
8573
8574 gimple_seq_add_stmt (&bind_body, single_stmt);
8575
8576 if (ctx->record_type)
8577 lower_omp_single_copy (single_stmt, &bind_body, ctx);
8578 else
8579 lower_omp_single_simple (single_stmt, &bind_body);
8580
8581 gimple_omp_set_body (single_stmt, NULL);
8582
8583 gimple_seq_add_seq (&bind_body, dlist);
8584
8585 bind_body = maybe_catch_exception (bind_body);
8586
8587 t = gimple_build_omp_return
8588 (!!find_omp_clause (gimple_omp_single_clauses (single_stmt),
8589 OMP_CLAUSE_NOWAIT));
8590 gimple_seq_add_stmt (&bind_body_tail, t);
8591 maybe_add_implicit_barrier_cancel (ctx, &bind_body_tail);
8592 if (ctx->record_type)
8593 {
8594 gimple_stmt_iterator gsi = gsi_start (bind_body_tail);
8595 tree clobber = build_constructor (ctx->record_type, NULL);
8596 TREE_THIS_VOLATILE (clobber) = 1;
8597 gsi_insert_after (&gsi, gimple_build_assign (ctx->sender_decl,
8598 clobber), GSI_SAME_STMT);
8599 }
8600 gimple_seq_add_seq (&bind_body, bind_body_tail);
8601 gimple_bind_set_body (bind, bind_body);
8602
8603 pop_gimplify_context (bind);
8604
8605 gimple_bind_append_vars (bind, ctx->block_vars);
8606 BLOCK_VARS (block) = ctx->block_vars;
8607 if (BLOCK_VARS (block))
8608 TREE_USED (block) = 1;
8609 }
8610
8611
8612 /* Expand code for an OpenMP master directive. */
8613
8614 static void
8615 lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
8616 {
8617 tree block, lab = NULL, x, bfn_decl;
8618 gimple stmt = gsi_stmt (*gsi_p), bind;
8619 location_t loc = gimple_location (stmt);
8620 gimple_seq tseq;
8621 struct gimplify_ctx gctx;
8622
8623 push_gimplify_context (&gctx);
8624
8625 block = make_node (BLOCK);
8626 bind = gimple_build_bind (NULL, NULL, block);
8627 gsi_replace (gsi_p, bind, true);
8628 gimple_bind_add_stmt (bind, stmt);
8629
8630 bfn_decl = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
8631 x = build_call_expr_loc (loc, bfn_decl, 0);
8632 x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
8633 x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
8634 tseq = NULL;
8635 gimplify_and_add (x, &tseq);
8636 gimple_bind_add_seq (bind, tseq);
8637
8638 lower_omp (gimple_omp_body_ptr (stmt), ctx);
8639 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
8640 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
8641 gimple_omp_set_body (stmt, NULL);
8642
8643 gimple_bind_add_stmt (bind, gimple_build_label (lab));
8644
8645 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
8646
8647 pop_gimplify_context (bind);
8648
8649 gimple_bind_append_vars (bind, ctx->block_vars);
8650 BLOCK_VARS (block) = ctx->block_vars;
8651 }
8652
8653
8654 /* Expand code for an OpenMP taskgroup directive. */
8655
8656 static void
8657 lower_omp_taskgroup (gimple_stmt_iterator *gsi_p, omp_context *ctx)
8658 {
8659 gimple stmt = gsi_stmt (*gsi_p), bind, x;
8660 tree block = make_node (BLOCK);
8661
8662 bind = gimple_build_bind (NULL, NULL, block);
8663 gsi_replace (gsi_p, bind, true);
8664 gimple_bind_add_stmt (bind, stmt);
8665
8666 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_TASKGROUP_START),
8667 0);
8668 gimple_bind_add_stmt (bind, x);
8669
8670 lower_omp (gimple_omp_body_ptr (stmt), ctx);
8671 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
8672 gimple_omp_set_body (stmt, NULL);
8673
8674 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
8675
8676 gimple_bind_append_vars (bind, ctx->block_vars);
8677 BLOCK_VARS (block) = ctx->block_vars;
8678 }
8679
8680
8681 /* Expand code for an OpenMP ordered directive. */
8682
8683 static void
8684 lower_omp_ordered (gimple_stmt_iterator *gsi_p, omp_context *ctx)
8685 {
8686 tree block;
8687 gimple stmt = gsi_stmt (*gsi_p), bind, x;
8688 struct gimplify_ctx gctx;
8689
8690 push_gimplify_context (&gctx);
8691
8692 block = make_node (BLOCK);
8693 bind = gimple_build_bind (NULL, NULL, block);
8694 gsi_replace (gsi_p, bind, true);
8695 gimple_bind_add_stmt (bind, stmt);
8696
8697 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_START),
8698 0);
8699 gimple_bind_add_stmt (bind, x);
8700
8701 lower_omp (gimple_omp_body_ptr (stmt), ctx);
8702 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
8703 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
8704 gimple_omp_set_body (stmt, NULL);
8705
8706 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_END), 0);
8707 gimple_bind_add_stmt (bind, x);
8708
8709 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
8710
8711 pop_gimplify_context (bind);
8712
8713 gimple_bind_append_vars (bind, ctx->block_vars);
8714 BLOCK_VARS (block) = gimple_bind_vars (bind);
8715 }
8716
8717
8718 /* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
8719 substitution of a couple of function calls. But in the NAMED case,
8720 requires that languages coordinate a symbol name. It is therefore
8721 best put here in common code. */
8722
8723 static GTY((param1_is (tree), param2_is (tree)))
8724 splay_tree critical_name_mutexes;
8725
8726 static void
8727 lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx)
8728 {
8729 tree block;
8730 tree name, lock, unlock;
8731 gimple stmt = gsi_stmt (*gsi_p), bind;
8732 location_t loc = gimple_location (stmt);
8733 gimple_seq tbody;
8734 struct gimplify_ctx gctx;
8735
8736 name = gimple_omp_critical_name (stmt);
8737 if (name)
8738 {
8739 tree decl;
8740 splay_tree_node n;
8741
8742 if (!critical_name_mutexes)
8743 critical_name_mutexes
8744 = splay_tree_new_ggc (splay_tree_compare_pointers,
8745 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_s,
8746 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_node_s);
8747
8748 n = splay_tree_lookup (critical_name_mutexes, (splay_tree_key) name);
8749 if (n == NULL)
8750 {
8751 char *new_str;
8752
8753 decl = create_tmp_var_raw (ptr_type_node, NULL);
8754
8755 new_str = ACONCAT ((".gomp_critical_user_",
8756 IDENTIFIER_POINTER (name), NULL));
8757 DECL_NAME (decl) = get_identifier (new_str);
8758 TREE_PUBLIC (decl) = 1;
8759 TREE_STATIC (decl) = 1;
8760 DECL_COMMON (decl) = 1;
8761 DECL_ARTIFICIAL (decl) = 1;
8762 DECL_IGNORED_P (decl) = 1;
8763 varpool_finalize_decl (decl);
8764
8765 splay_tree_insert (critical_name_mutexes, (splay_tree_key) name,
8766 (splay_tree_value) decl);
8767 }
8768 else
8769 decl = (tree) n->value;
8770
8771 lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_START);
8772 lock = build_call_expr_loc (loc, lock, 1, build_fold_addr_expr_loc (loc, decl));
8773
8774 unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_END);
8775 unlock = build_call_expr_loc (loc, unlock, 1,
8776 build_fold_addr_expr_loc (loc, decl));
8777 }
8778 else
8779 {
8780 lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_START);
8781 lock = build_call_expr_loc (loc, lock, 0);
8782
8783 unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_END);
8784 unlock = build_call_expr_loc (loc, unlock, 0);
8785 }
8786
8787 push_gimplify_context (&gctx);
8788
8789 block = make_node (BLOCK);
8790 bind = gimple_build_bind (NULL, NULL, block);
8791 gsi_replace (gsi_p, bind, true);
8792 gimple_bind_add_stmt (bind, stmt);
8793
8794 tbody = gimple_bind_body (bind);
8795 gimplify_and_add (lock, &tbody);
8796 gimple_bind_set_body (bind, tbody);
8797
8798 lower_omp (gimple_omp_body_ptr (stmt), ctx);
8799 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
8800 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
8801 gimple_omp_set_body (stmt, NULL);
8802
8803 tbody = gimple_bind_body (bind);
8804 gimplify_and_add (unlock, &tbody);
8805 gimple_bind_set_body (bind, tbody);
8806
8807 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
8808
8809 pop_gimplify_context (bind);
8810 gimple_bind_append_vars (bind, ctx->block_vars);
8811 BLOCK_VARS (block) = gimple_bind_vars (bind);
8812 }
8813
8814
8815 /* A subroutine of lower_omp_for. Generate code to emit the predicate
8816 for a lastprivate clause. Given a loop control predicate of (V
8817 cond N2), we gate the clause on (!(V cond N2)). The lowered form
8818 is appended to *DLIST, iterator initialization is appended to
8819 *BODY_P. */
8820
8821 static void
8822 lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p,
8823 gimple_seq *dlist, struct omp_context *ctx)
8824 {
8825 tree clauses, cond, vinit;
8826 enum tree_code cond_code;
8827 gimple_seq stmts;
8828
8829 cond_code = fd->loop.cond_code;
8830 cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
8831
8832 /* When possible, use a strict equality expression. This can let VRP
8833 type optimizations deduce the value and remove a copy. */
8834 if (host_integerp (fd->loop.step, 0))
8835 {
8836 HOST_WIDE_INT step = TREE_INT_CST_LOW (fd->loop.step);
8837 if (step == 1 || step == -1)
8838 cond_code = EQ_EXPR;
8839 }
8840
8841 cond = build2 (cond_code, boolean_type_node, fd->loop.v, fd->loop.n2);
8842
8843 clauses = gimple_omp_for_clauses (fd->for_stmt);
8844 stmts = NULL;
8845 lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
8846 if (!gimple_seq_empty_p (stmts))
8847 {
8848 gimple_seq_add_seq (&stmts, *dlist);
8849 *dlist = stmts;
8850
8851 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
8852 vinit = fd->loop.n1;
8853 if (cond_code == EQ_EXPR
8854 && host_integerp (fd->loop.n2, 0)
8855 && ! integer_zerop (fd->loop.n2))
8856 vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0);
8857 else
8858 vinit = unshare_expr (vinit);
8859
8860 /* Initialize the iterator variable, so that threads that don't execute
8861 any iterations don't execute the lastprivate clauses by accident. */
8862 gimplify_assign (fd->loop.v, vinit, body_p);
8863 }
8864 }
8865
8866
8867 /* Lower code for an OpenMP loop directive. */
8868
8869 static void
8870 lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
8871 {
8872 tree *rhs_p, block;
8873 struct omp_for_data fd, *fdp = NULL;
8874 gimple stmt = gsi_stmt (*gsi_p), new_stmt;
8875 gimple_seq omp_for_body, body, dlist;
8876 size_t i;
8877 struct gimplify_ctx gctx;
8878
8879 push_gimplify_context (&gctx);
8880
8881 lower_omp (gimple_omp_for_pre_body_ptr (stmt), ctx);
8882
8883 block = make_node (BLOCK);
8884 new_stmt = gimple_build_bind (NULL, NULL, block);
8885 /* Replace at gsi right away, so that 'stmt' is no member
8886 of a sequence anymore as we're going to add to to a different
8887 one below. */
8888 gsi_replace (gsi_p, new_stmt, true);
8889
8890 /* Move declaration of temporaries in the loop body before we make
8891 it go away. */
8892 omp_for_body = gimple_omp_body (stmt);
8893 if (!gimple_seq_empty_p (omp_for_body)
8894 && gimple_code (gimple_seq_first_stmt (omp_for_body)) == GIMPLE_BIND)
8895 {
8896 tree vars = gimple_bind_vars (gimple_seq_first_stmt (omp_for_body));
8897 gimple_bind_append_vars (new_stmt, vars);
8898 }
8899
8900 if (gimple_omp_for_combined_into_p (stmt))
8901 {
8902 extract_omp_for_data (stmt, &fd, NULL);
8903 fdp = &fd;
8904
8905 /* We need two temporaries with fd.loop.v type (istart/iend)
8906 and then (fd.collapse - 1) temporaries with the same
8907 type for count2 ... countN-1 vars if not constant. */
8908 size_t count = 2;
8909 tree type = fd.iter_type;
8910 if (fd.collapse > 1
8911 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
8912 count += fd.collapse - 1;
8913 bool parallel_for = gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_FOR;
8914 tree outerc = NULL, *pc = gimple_omp_for_clauses_ptr (stmt);
8915 tree clauses = *pc;
8916 if (parallel_for)
8917 outerc
8918 = find_omp_clause (gimple_omp_parallel_clauses (ctx->outer->stmt),
8919 OMP_CLAUSE__LOOPTEMP_);
8920 for (i = 0; i < count; i++)
8921 {
8922 tree temp;
8923 if (parallel_for)
8924 {
8925 gcc_assert (outerc);
8926 temp = lookup_decl (OMP_CLAUSE_DECL (outerc), ctx->outer);
8927 outerc = find_omp_clause (OMP_CLAUSE_CHAIN (outerc),
8928 OMP_CLAUSE__LOOPTEMP_);
8929 }
8930 else
8931 temp = create_tmp_var (type, NULL);
8932 *pc = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__LOOPTEMP_);
8933 OMP_CLAUSE_DECL (*pc) = temp;
8934 pc = &OMP_CLAUSE_CHAIN (*pc);
8935 }
8936 *pc = clauses;
8937 }
8938
8939 /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
8940 dlist = NULL;
8941 body = NULL;
8942 lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx,
8943 fdp);
8944 gimple_seq_add_seq (&body, gimple_omp_for_pre_body (stmt));
8945
8946 lower_omp (gimple_omp_body_ptr (stmt), ctx);
8947
8948 /* Lower the header expressions. At this point, we can assume that
8949 the header is of the form:
8950
8951 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
8952
8953 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
8954 using the .omp_data_s mapping, if needed. */
8955 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
8956 {
8957 rhs_p = gimple_omp_for_initial_ptr (stmt, i);
8958 if (!is_gimple_min_invariant (*rhs_p))
8959 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
8960
8961 rhs_p = gimple_omp_for_final_ptr (stmt, i);
8962 if (!is_gimple_min_invariant (*rhs_p))
8963 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
8964
8965 rhs_p = &TREE_OPERAND (gimple_omp_for_incr (stmt, i), 1);
8966 if (!is_gimple_min_invariant (*rhs_p))
8967 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
8968 }
8969
8970 /* Once lowered, extract the bounds and clauses. */
8971 extract_omp_for_data (stmt, &fd, NULL);
8972
8973 lower_omp_for_lastprivate (&fd, &body, &dlist, ctx);
8974
8975 gimple_seq_add_stmt (&body, stmt);
8976 gimple_seq_add_seq (&body, gimple_omp_body (stmt));
8977
8978 gimple_seq_add_stmt (&body, gimple_build_omp_continue (fd.loop.v,
8979 fd.loop.v));
8980
8981 /* After the loop, add exit clauses. */
8982 lower_reduction_clauses (gimple_omp_for_clauses (stmt), &body, ctx);
8983
8984 if (ctx->cancellable)
8985 gimple_seq_add_stmt (&body, gimple_build_label (ctx->cancel_label));
8986
8987 gimple_seq_add_seq (&body, dlist);
8988
8989 body = maybe_catch_exception (body);
8990
8991 /* Region exit marker goes at the end of the loop body. */
8992 gimple_seq_add_stmt (&body, gimple_build_omp_return (fd.have_nowait));
8993 maybe_add_implicit_barrier_cancel (ctx, &body);
8994 pop_gimplify_context (new_stmt);
8995
8996 gimple_bind_append_vars (new_stmt, ctx->block_vars);
8997 BLOCK_VARS (block) = gimple_bind_vars (new_stmt);
8998 if (BLOCK_VARS (block))
8999 TREE_USED (block) = 1;
9000
9001 gimple_bind_set_body (new_stmt, body);
9002 gimple_omp_set_body (stmt, NULL);
9003 gimple_omp_for_set_pre_body (stmt, NULL);
9004 }
9005
9006 /* Callback for walk_stmts. Check if the current statement only contains
9007 GIMPLE_OMP_FOR or GIMPLE_OMP_PARALLEL. */
9008
9009 static tree
9010 check_combined_parallel (gimple_stmt_iterator *gsi_p,
9011 bool *handled_ops_p,
9012 struct walk_stmt_info *wi)
9013 {
9014 int *info = (int *) wi->info;
9015 gimple stmt = gsi_stmt (*gsi_p);
9016
9017 *handled_ops_p = true;
9018 switch (gimple_code (stmt))
9019 {
9020 WALK_SUBSTMTS;
9021
9022 case GIMPLE_OMP_FOR:
9023 case GIMPLE_OMP_SECTIONS:
9024 *info = *info == 0 ? 1 : -1;
9025 break;
9026 default:
9027 *info = -1;
9028 break;
9029 }
9030 return NULL;
9031 }
9032
9033 struct omp_taskcopy_context
9034 {
9035 /* This field must be at the beginning, as we do "inheritance": Some
9036 callback functions for tree-inline.c (e.g., omp_copy_decl)
9037 receive a copy_body_data pointer that is up-casted to an
9038 omp_context pointer. */
9039 copy_body_data cb;
9040 omp_context *ctx;
9041 };
9042
9043 static tree
9044 task_copyfn_copy_decl (tree var, copy_body_data *cb)
9045 {
9046 struct omp_taskcopy_context *tcctx = (struct omp_taskcopy_context *) cb;
9047
9048 if (splay_tree_lookup (tcctx->ctx->sfield_map, (splay_tree_key) var))
9049 return create_tmp_var (TREE_TYPE (var), NULL);
9050
9051 return var;
9052 }
9053
9054 static tree
9055 task_copyfn_remap_type (struct omp_taskcopy_context *tcctx, tree orig_type)
9056 {
9057 tree name, new_fields = NULL, type, f;
9058
9059 type = lang_hooks.types.make_type (RECORD_TYPE);
9060 name = DECL_NAME (TYPE_NAME (orig_type));
9061 name = build_decl (gimple_location (tcctx->ctx->stmt),
9062 TYPE_DECL, name, type);
9063 TYPE_NAME (type) = name;
9064
9065 for (f = TYPE_FIELDS (orig_type); f ; f = TREE_CHAIN (f))
9066 {
9067 tree new_f = copy_node (f);
9068 DECL_CONTEXT (new_f) = type;
9069 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &tcctx->cb);
9070 TREE_CHAIN (new_f) = new_fields;
9071 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &tcctx->cb, NULL);
9072 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, &tcctx->cb, NULL);
9073 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
9074 &tcctx->cb, NULL);
9075 new_fields = new_f;
9076 *pointer_map_insert (tcctx->cb.decl_map, f) = new_f;
9077 }
9078 TYPE_FIELDS (type) = nreverse (new_fields);
9079 layout_type (type);
9080 return type;
9081 }
9082
9083 /* Create task copyfn. */
9084
9085 static void
9086 create_task_copyfn (gimple task_stmt, omp_context *ctx)
9087 {
9088 struct function *child_cfun;
9089 tree child_fn, t, c, src, dst, f, sf, arg, sarg, decl;
9090 tree record_type, srecord_type, bind, list;
9091 bool record_needs_remap = false, srecord_needs_remap = false;
9092 splay_tree_node n;
9093 struct omp_taskcopy_context tcctx;
9094 struct gimplify_ctx gctx;
9095 location_t loc = gimple_location (task_stmt);
9096
9097 child_fn = gimple_omp_task_copy_fn (task_stmt);
9098 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
9099 gcc_assert (child_cfun->cfg == NULL);
9100 DECL_SAVED_TREE (child_fn) = alloc_stmt_list ();
9101
9102 /* Reset DECL_CONTEXT on function arguments. */
9103 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
9104 DECL_CONTEXT (t) = child_fn;
9105
9106 /* Populate the function. */
9107 push_gimplify_context (&gctx);
9108 push_cfun (child_cfun);
9109
9110 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
9111 TREE_SIDE_EFFECTS (bind) = 1;
9112 list = NULL;
9113 DECL_SAVED_TREE (child_fn) = bind;
9114 DECL_SOURCE_LOCATION (child_fn) = gimple_location (task_stmt);
9115
9116 /* Remap src and dst argument types if needed. */
9117 record_type = ctx->record_type;
9118 srecord_type = ctx->srecord_type;
9119 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
9120 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
9121 {
9122 record_needs_remap = true;
9123 break;
9124 }
9125 for (f = TYPE_FIELDS (srecord_type); f ; f = DECL_CHAIN (f))
9126 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
9127 {
9128 srecord_needs_remap = true;
9129 break;
9130 }
9131
9132 if (record_needs_remap || srecord_needs_remap)
9133 {
9134 memset (&tcctx, '\0', sizeof (tcctx));
9135 tcctx.cb.src_fn = ctx->cb.src_fn;
9136 tcctx.cb.dst_fn = child_fn;
9137 tcctx.cb.src_node = cgraph_get_node (tcctx.cb.src_fn);
9138 gcc_checking_assert (tcctx.cb.src_node);
9139 tcctx.cb.dst_node = tcctx.cb.src_node;
9140 tcctx.cb.src_cfun = ctx->cb.src_cfun;
9141 tcctx.cb.copy_decl = task_copyfn_copy_decl;
9142 tcctx.cb.eh_lp_nr = 0;
9143 tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE;
9144 tcctx.cb.decl_map = pointer_map_create ();
9145 tcctx.ctx = ctx;
9146
9147 if (record_needs_remap)
9148 record_type = task_copyfn_remap_type (&tcctx, record_type);
9149 if (srecord_needs_remap)
9150 srecord_type = task_copyfn_remap_type (&tcctx, srecord_type);
9151 }
9152 else
9153 tcctx.cb.decl_map = NULL;
9154
9155 arg = DECL_ARGUMENTS (child_fn);
9156 TREE_TYPE (arg) = build_pointer_type (record_type);
9157 sarg = DECL_CHAIN (arg);
9158 TREE_TYPE (sarg) = build_pointer_type (srecord_type);
9159
9160 /* First pass: initialize temporaries used in record_type and srecord_type
9161 sizes and field offsets. */
9162 if (tcctx.cb.decl_map)
9163 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
9164 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
9165 {
9166 tree *p;
9167
9168 decl = OMP_CLAUSE_DECL (c);
9169 p = (tree *) pointer_map_contains (tcctx.cb.decl_map, decl);
9170 if (p == NULL)
9171 continue;
9172 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
9173 sf = (tree) n->value;
9174 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
9175 src = build_simple_mem_ref_loc (loc, sarg);
9176 src = omp_build_component_ref (src, sf);
9177 t = build2 (MODIFY_EXPR, TREE_TYPE (*p), *p, src);
9178 append_to_statement_list (t, &list);
9179 }
9180
9181 /* Second pass: copy shared var pointers and copy construct non-VLA
9182 firstprivate vars. */
9183 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
9184 switch (OMP_CLAUSE_CODE (c))
9185 {
9186 case OMP_CLAUSE_SHARED:
9187 decl = OMP_CLAUSE_DECL (c);
9188 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
9189 if (n == NULL)
9190 break;
9191 f = (tree) n->value;
9192 if (tcctx.cb.decl_map)
9193 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
9194 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
9195 sf = (tree) n->value;
9196 if (tcctx.cb.decl_map)
9197 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
9198 src = build_simple_mem_ref_loc (loc, sarg);
9199 src = omp_build_component_ref (src, sf);
9200 dst = build_simple_mem_ref_loc (loc, arg);
9201 dst = omp_build_component_ref (dst, f);
9202 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
9203 append_to_statement_list (t, &list);
9204 break;
9205 case OMP_CLAUSE_FIRSTPRIVATE:
9206 decl = OMP_CLAUSE_DECL (c);
9207 if (is_variable_sized (decl))
9208 break;
9209 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
9210 if (n == NULL)
9211 break;
9212 f = (tree) n->value;
9213 if (tcctx.cb.decl_map)
9214 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
9215 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
9216 if (n != NULL)
9217 {
9218 sf = (tree) n->value;
9219 if (tcctx.cb.decl_map)
9220 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
9221 src = build_simple_mem_ref_loc (loc, sarg);
9222 src = omp_build_component_ref (src, sf);
9223 if (use_pointer_for_field (decl, NULL) || is_reference (decl))
9224 src = build_simple_mem_ref_loc (loc, src);
9225 }
9226 else
9227 src = decl;
9228 dst = build_simple_mem_ref_loc (loc, arg);
9229 dst = omp_build_component_ref (dst, f);
9230 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
9231 append_to_statement_list (t, &list);
9232 break;
9233 case OMP_CLAUSE_PRIVATE:
9234 if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c))
9235 break;
9236 decl = OMP_CLAUSE_DECL (c);
9237 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
9238 f = (tree) n->value;
9239 if (tcctx.cb.decl_map)
9240 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
9241 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
9242 if (n != NULL)
9243 {
9244 sf = (tree) n->value;
9245 if (tcctx.cb.decl_map)
9246 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
9247 src = build_simple_mem_ref_loc (loc, sarg);
9248 src = omp_build_component_ref (src, sf);
9249 if (use_pointer_for_field (decl, NULL))
9250 src = build_simple_mem_ref_loc (loc, src);
9251 }
9252 else
9253 src = decl;
9254 dst = build_simple_mem_ref_loc (loc, arg);
9255 dst = omp_build_component_ref (dst, f);
9256 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
9257 append_to_statement_list (t, &list);
9258 break;
9259 default:
9260 break;
9261 }
9262
9263 /* Last pass: handle VLA firstprivates. */
9264 if (tcctx.cb.decl_map)
9265 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
9266 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
9267 {
9268 tree ind, ptr, df;
9269
9270 decl = OMP_CLAUSE_DECL (c);
9271 if (!is_variable_sized (decl))
9272 continue;
9273 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
9274 if (n == NULL)
9275 continue;
9276 f = (tree) n->value;
9277 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
9278 gcc_assert (DECL_HAS_VALUE_EXPR_P (decl));
9279 ind = DECL_VALUE_EXPR (decl);
9280 gcc_assert (TREE_CODE (ind) == INDIRECT_REF);
9281 gcc_assert (DECL_P (TREE_OPERAND (ind, 0)));
9282 n = splay_tree_lookup (ctx->sfield_map,
9283 (splay_tree_key) TREE_OPERAND (ind, 0));
9284 sf = (tree) n->value;
9285 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
9286 src = build_simple_mem_ref_loc (loc, sarg);
9287 src = omp_build_component_ref (src, sf);
9288 src = build_simple_mem_ref_loc (loc, src);
9289 dst = build_simple_mem_ref_loc (loc, arg);
9290 dst = omp_build_component_ref (dst, f);
9291 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
9292 append_to_statement_list (t, &list);
9293 n = splay_tree_lookup (ctx->field_map,
9294 (splay_tree_key) TREE_OPERAND (ind, 0));
9295 df = (tree) n->value;
9296 df = *(tree *) pointer_map_contains (tcctx.cb.decl_map, df);
9297 ptr = build_simple_mem_ref_loc (loc, arg);
9298 ptr = omp_build_component_ref (ptr, df);
9299 t = build2 (MODIFY_EXPR, TREE_TYPE (ptr), ptr,
9300 build_fold_addr_expr_loc (loc, dst));
9301 append_to_statement_list (t, &list);
9302 }
9303
9304 t = build1 (RETURN_EXPR, void_type_node, NULL);
9305 append_to_statement_list (t, &list);
9306
9307 if (tcctx.cb.decl_map)
9308 pointer_map_destroy (tcctx.cb.decl_map);
9309 pop_gimplify_context (NULL);
9310 BIND_EXPR_BODY (bind) = list;
9311 pop_cfun ();
9312 }
9313
9314 static void
9315 lower_depend_clauses (gimple stmt, gimple_seq *iseq, gimple_seq *oseq)
9316 {
9317 tree c, clauses;
9318 gimple g;
9319 size_t n_in = 0, n_out = 0, idx = 2, i;
9320
9321 clauses = find_omp_clause (gimple_omp_task_clauses (stmt),
9322 OMP_CLAUSE_DEPEND);
9323 gcc_assert (clauses);
9324 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
9325 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND)
9326 switch (OMP_CLAUSE_DEPEND_KIND (c))
9327 {
9328 case OMP_CLAUSE_DEPEND_IN:
9329 n_in++;
9330 break;
9331 case OMP_CLAUSE_DEPEND_OUT:
9332 case OMP_CLAUSE_DEPEND_INOUT:
9333 n_out++;
9334 break;
9335 default:
9336 gcc_unreachable ();
9337 }
9338 tree type = build_array_type_nelts (ptr_type_node, n_in + n_out + 2);
9339 tree array = create_tmp_var (type, NULL);
9340 tree r = build4 (ARRAY_REF, ptr_type_node, array, size_int (0), NULL_TREE,
9341 NULL_TREE);
9342 g = gimple_build_assign (r, build_int_cst (ptr_type_node, n_in + n_out));
9343 gimple_seq_add_stmt (iseq, g);
9344 r = build4 (ARRAY_REF, ptr_type_node, array, size_int (1), NULL_TREE,
9345 NULL_TREE);
9346 g = gimple_build_assign (r, build_int_cst (ptr_type_node, n_out));
9347 gimple_seq_add_stmt (iseq, g);
9348 for (i = 0; i < 2; i++)
9349 {
9350 if ((i ? n_in : n_out) == 0)
9351 continue;
9352 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
9353 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND
9354 && ((OMP_CLAUSE_DEPEND_KIND (c) != OMP_CLAUSE_DEPEND_IN) ^ i))
9355 {
9356 tree t = OMP_CLAUSE_DECL (c);
9357 t = fold_convert (ptr_type_node, t);
9358 gimplify_expr (&t, iseq, NULL, is_gimple_val, fb_rvalue);
9359 r = build4 (ARRAY_REF, ptr_type_node, array, size_int (idx++),
9360 NULL_TREE, NULL_TREE);
9361 g = gimple_build_assign (r, t);
9362 gimple_seq_add_stmt (iseq, g);
9363 }
9364 }
9365 tree *p = gimple_omp_task_clauses_ptr (stmt);
9366 c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE_DEPEND);
9367 OMP_CLAUSE_DECL (c) = build_fold_addr_expr (array);
9368 OMP_CLAUSE_CHAIN (c) = *p;
9369 *p = c;
9370 tree clobber = build_constructor (type, NULL);
9371 TREE_THIS_VOLATILE (clobber) = 1;
9372 g = gimple_build_assign (array, clobber);
9373 gimple_seq_add_stmt (oseq, g);
9374 }
9375
9376 /* Lower the OpenMP parallel or task directive in the current statement
9377 in GSI_P. CTX holds context information for the directive. */
9378
9379 static void
9380 lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
9381 {
9382 tree clauses;
9383 tree child_fn, t;
9384 gimple stmt = gsi_stmt (*gsi_p);
9385 gimple par_bind, bind, dep_bind = NULL;
9386 gimple_seq par_body, olist, ilist, par_olist, par_rlist, par_ilist, new_body;
9387 struct gimplify_ctx gctx, dep_gctx;
9388 location_t loc = gimple_location (stmt);
9389
9390 clauses = gimple_omp_taskreg_clauses (stmt);
9391 par_bind = gimple_seq_first_stmt (gimple_omp_body (stmt));
9392 par_body = gimple_bind_body (par_bind);
9393 child_fn = ctx->cb.dst_fn;
9394 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
9395 && !gimple_omp_parallel_combined_p (stmt))
9396 {
9397 struct walk_stmt_info wi;
9398 int ws_num = 0;
9399
9400 memset (&wi, 0, sizeof (wi));
9401 wi.info = &ws_num;
9402 wi.val_only = true;
9403 walk_gimple_seq (par_body, check_combined_parallel, NULL, &wi);
9404 if (ws_num == 1)
9405 gimple_omp_parallel_set_combined_p (stmt, true);
9406 }
9407 gimple_seq dep_ilist = NULL;
9408 gimple_seq dep_olist = NULL;
9409 if (gimple_code (stmt) == GIMPLE_OMP_TASK
9410 && find_omp_clause (clauses, OMP_CLAUSE_DEPEND))
9411 {
9412 push_gimplify_context (&dep_gctx);
9413 dep_bind = gimple_build_bind (NULL, NULL, make_node (BLOCK));
9414 lower_depend_clauses (stmt, &dep_ilist, &dep_olist);
9415 }
9416
9417 if (ctx->srecord_type)
9418 create_task_copyfn (stmt, ctx);
9419
9420 push_gimplify_context (&gctx);
9421
9422 par_olist = NULL;
9423 par_ilist = NULL;
9424 par_rlist = NULL;
9425 lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx, NULL);
9426 lower_omp (&par_body, ctx);
9427 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL)
9428 lower_reduction_clauses (clauses, &par_rlist, ctx);
9429
9430 /* Declare all the variables created by mapping and the variables
9431 declared in the scope of the parallel body. */
9432 record_vars_into (ctx->block_vars, child_fn);
9433 record_vars_into (gimple_bind_vars (par_bind), child_fn);
9434
9435 if (ctx->record_type)
9436 {
9437 ctx->sender_decl
9438 = create_tmp_var (ctx->srecord_type ? ctx->srecord_type
9439 : ctx->record_type, ".omp_data_o");
9440 DECL_NAMELESS (ctx->sender_decl) = 1;
9441 TREE_ADDRESSABLE (ctx->sender_decl) = 1;
9442 gimple_omp_taskreg_set_data_arg (stmt, ctx->sender_decl);
9443 }
9444
9445 olist = NULL;
9446 ilist = NULL;
9447 lower_send_clauses (clauses, &ilist, &olist, ctx);
9448 lower_send_shared_vars (&ilist, &olist, ctx);
9449
9450 if (ctx->record_type)
9451 {
9452 tree clobber = build_constructor (TREE_TYPE (ctx->sender_decl), NULL);
9453 TREE_THIS_VOLATILE (clobber) = 1;
9454 gimple_seq_add_stmt (&olist, gimple_build_assign (ctx->sender_decl,
9455 clobber));
9456 }
9457
9458 /* Once all the expansions are done, sequence all the different
9459 fragments inside gimple_omp_body. */
9460
9461 new_body = NULL;
9462
9463 if (ctx->record_type)
9464 {
9465 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
9466 /* fixup_child_record_type might have changed receiver_decl's type. */
9467 t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
9468 gimple_seq_add_stmt (&new_body,
9469 gimple_build_assign (ctx->receiver_decl, t));
9470 }
9471
9472 gimple_seq_add_seq (&new_body, par_ilist);
9473 gimple_seq_add_seq (&new_body, par_body);
9474 gimple_seq_add_seq (&new_body, par_rlist);
9475 if (ctx->cancellable)
9476 gimple_seq_add_stmt (&new_body, gimple_build_label (ctx->cancel_label));
9477 gimple_seq_add_seq (&new_body, par_olist);
9478 new_body = maybe_catch_exception (new_body);
9479 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
9480 gimple_omp_set_body (stmt, new_body);
9481
9482 bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
9483 gsi_replace (gsi_p, dep_bind ? dep_bind : bind, true);
9484 gimple_bind_add_seq (bind, ilist);
9485 gimple_bind_add_stmt (bind, stmt);
9486 gimple_bind_add_seq (bind, olist);
9487
9488 pop_gimplify_context (NULL);
9489
9490 if (dep_bind)
9491 {
9492 gimple_bind_add_seq (dep_bind, dep_ilist);
9493 gimple_bind_add_stmt (dep_bind, bind);
9494 gimple_bind_add_seq (dep_bind, dep_olist);
9495 pop_gimplify_context (dep_bind);
9496 }
9497 }
9498
9499 /* Lower the OpenMP target directive in the current statement
9500 in GSI_P. CTX holds context information for the directive. */
9501
9502 static void
9503 lower_omp_target (gimple_stmt_iterator *gsi_p, omp_context *ctx)
9504 {
9505 tree clauses;
9506 tree child_fn, t, c;
9507 gimple stmt = gsi_stmt (*gsi_p);
9508 gimple tgt_bind = NULL, bind;
9509 gimple_seq tgt_body = NULL, olist, ilist, new_body;
9510 struct gimplify_ctx gctx;
9511 location_t loc = gimple_location (stmt);
9512 int kind = gimple_omp_target_kind (stmt);
9513 unsigned int map_cnt = 0;
9514
9515 clauses = gimple_omp_target_clauses (stmt);
9516 if (kind == GF_OMP_TARGET_KIND_REGION)
9517 {
9518 tgt_bind = gimple_seq_first_stmt (gimple_omp_body (stmt));
9519 tgt_body = gimple_bind_body (tgt_bind);
9520 }
9521 else if (kind == GF_OMP_TARGET_KIND_DATA)
9522 tgt_body = gimple_omp_body (stmt);
9523 child_fn = ctx->cb.dst_fn;
9524
9525 push_gimplify_context (&gctx);
9526
9527 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
9528 switch (OMP_CLAUSE_CODE (c))
9529 {
9530 tree var, x;
9531
9532 default:
9533 break;
9534 case OMP_CLAUSE_MAP:
9535 case OMP_CLAUSE_TO:
9536 case OMP_CLAUSE_FROM:
9537 var = OMP_CLAUSE_DECL (c);
9538 if (!DECL_P (var))
9539 {
9540 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP
9541 || !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c))
9542 map_cnt++;
9543 continue;
9544 }
9545
9546 if (DECL_SIZE (var)
9547 && TREE_CODE (DECL_SIZE (var)) != INTEGER_CST)
9548 {
9549 tree var2 = DECL_VALUE_EXPR (var);
9550 gcc_assert (TREE_CODE (var2) == INDIRECT_REF);
9551 var2 = TREE_OPERAND (var2, 0);
9552 gcc_assert (DECL_P (var2));
9553 var = var2;
9554 }
9555
9556 if (!maybe_lookup_field (var, ctx))
9557 continue;
9558
9559 if (kind == GF_OMP_TARGET_KIND_REGION)
9560 {
9561 x = build_receiver_ref (var, true, ctx);
9562 tree new_var = lookup_decl (var, ctx);
9563 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
9564 && OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_POINTER
9565 && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
9566 && TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE)
9567 x = build_simple_mem_ref (x);
9568 SET_DECL_VALUE_EXPR (new_var, x);
9569 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
9570 }
9571 map_cnt++;
9572 }
9573
9574 if (kind == GF_OMP_TARGET_KIND_REGION)
9575 {
9576 target_nesting_level++;
9577 lower_omp (&tgt_body, ctx);
9578 target_nesting_level--;
9579 }
9580 else if (kind == GF_OMP_TARGET_KIND_DATA)
9581 lower_omp (&tgt_body, ctx);
9582
9583 if (kind == GF_OMP_TARGET_KIND_REGION)
9584 {
9585 /* Declare all the variables created by mapping and the variables
9586 declared in the scope of the target body. */
9587 record_vars_into (ctx->block_vars, child_fn);
9588 record_vars_into (gimple_bind_vars (tgt_bind), child_fn);
9589 }
9590
9591 olist = NULL;
9592 ilist = NULL;
9593 if (ctx->record_type)
9594 {
9595 ctx->sender_decl
9596 = create_tmp_var (ctx->record_type, ".omp_data_arr");
9597 DECL_NAMELESS (ctx->sender_decl) = 1;
9598 TREE_ADDRESSABLE (ctx->sender_decl) = 1;
9599 t = make_tree_vec (3);
9600 TREE_VEC_ELT (t, 0) = ctx->sender_decl;
9601 TREE_VEC_ELT (t, 1)
9602 = create_tmp_var (build_array_type_nelts (size_type_node, map_cnt),
9603 ".omp_data_sizes");
9604 DECL_NAMELESS (TREE_VEC_ELT (t, 1)) = 1;
9605 TREE_ADDRESSABLE (TREE_VEC_ELT (t, 1)) = 1;
9606 TREE_STATIC (TREE_VEC_ELT (t, 1)) = 1;
9607 TREE_VEC_ELT (t, 2)
9608 = create_tmp_var (build_array_type_nelts (unsigned_char_type_node,
9609 map_cnt),
9610 ".omp_data_kinds");
9611 DECL_NAMELESS (TREE_VEC_ELT (t, 2)) = 1;
9612 TREE_ADDRESSABLE (TREE_VEC_ELT (t, 2)) = 1;
9613 TREE_STATIC (TREE_VEC_ELT (t, 2)) = 1;
9614 gimple_omp_target_set_data_arg (stmt, t);
9615
9616 vec<constructor_elt, va_gc> *vsize;
9617 vec<constructor_elt, va_gc> *vkind;
9618 vec_alloc (vsize, map_cnt);
9619 vec_alloc (vkind, map_cnt);
9620 unsigned int map_idx = 0;
9621
9622 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
9623 switch (OMP_CLAUSE_CODE (c))
9624 {
9625 tree ovar, nc;
9626
9627 default:
9628 break;
9629 case OMP_CLAUSE_MAP:
9630 case OMP_CLAUSE_TO:
9631 case OMP_CLAUSE_FROM:
9632 nc = c;
9633 ovar = OMP_CLAUSE_DECL (c);
9634 if (!DECL_P (ovar))
9635 {
9636 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
9637 && OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c))
9638 {
9639 gcc_checking_assert (OMP_CLAUSE_DECL (OMP_CLAUSE_CHAIN (c))
9640 == get_base_address (ovar));
9641 nc = OMP_CLAUSE_CHAIN (c);
9642 ovar = OMP_CLAUSE_DECL (nc);
9643 }
9644 else
9645 {
9646 tree x = build_sender_ref (ovar, ctx);
9647 tree v
9648 = build_fold_addr_expr_with_type (ovar, ptr_type_node);
9649 gimplify_assign (x, v, &ilist);
9650 nc = NULL_TREE;
9651 }
9652 }
9653 else
9654 {
9655 if (DECL_SIZE (ovar)
9656 && TREE_CODE (DECL_SIZE (ovar)) != INTEGER_CST)
9657 {
9658 tree ovar2 = DECL_VALUE_EXPR (ovar);
9659 gcc_assert (TREE_CODE (ovar2) == INDIRECT_REF);
9660 ovar2 = TREE_OPERAND (ovar2, 0);
9661 gcc_assert (DECL_P (ovar2));
9662 ovar = ovar2;
9663 }
9664 if (!maybe_lookup_field (ovar, ctx))
9665 continue;
9666 }
9667
9668 if (nc)
9669 {
9670 tree var = lookup_decl_in_outer_ctx (ovar, ctx);
9671 tree x = build_sender_ref (ovar, ctx);
9672 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
9673 && OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_POINTER
9674 && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
9675 && TREE_CODE (TREE_TYPE (ovar)) == ARRAY_TYPE)
9676 {
9677 gcc_assert (kind == GF_OMP_TARGET_KIND_REGION);
9678 tree avar
9679 = create_tmp_var (TREE_TYPE (TREE_TYPE (x)), NULL);
9680 mark_addressable (avar);
9681 gimplify_assign (avar, build_fold_addr_expr (var), &ilist);
9682 avar = build_fold_addr_expr (avar);
9683 gimplify_assign (x, avar, &ilist);
9684 }
9685 else if (is_gimple_reg (var))
9686 {
9687 gcc_assert (kind == GF_OMP_TARGET_KIND_REGION);
9688 tree avar = create_tmp_var (TREE_TYPE (var), NULL);
9689 mark_addressable (avar);
9690 if (OMP_CLAUSE_MAP_KIND (c) != OMP_CLAUSE_MAP_ALLOC
9691 && OMP_CLAUSE_MAP_KIND (c) != OMP_CLAUSE_MAP_FROM)
9692 gimplify_assign (avar, var, &ilist);
9693 avar = build_fold_addr_expr (avar);
9694 gimplify_assign (x, avar, &ilist);
9695 if ((OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_FROM
9696 || OMP_CLAUSE_MAP_KIND (c) == OMP_CLAUSE_MAP_TOFROM)
9697 && !TYPE_READONLY (TREE_TYPE (var)))
9698 {
9699 x = build_sender_ref (ovar, ctx);
9700 x = build_simple_mem_ref (x);
9701 gimplify_assign (var, x, &olist);
9702 }
9703 }
9704 else
9705 {
9706 var = build_fold_addr_expr (var);
9707 gimplify_assign (x, var, &ilist);
9708 }
9709 }
9710 tree s = OMP_CLAUSE_SIZE (c);
9711 if (s == NULL_TREE)
9712 s = TYPE_SIZE_UNIT (TREE_TYPE (ovar));
9713 s = fold_convert (size_type_node, s);
9714 tree purpose = size_int (map_idx++);
9715 CONSTRUCTOR_APPEND_ELT (vsize, purpose, s);
9716 if (TREE_CODE (s) != INTEGER_CST)
9717 TREE_STATIC (TREE_VEC_ELT (t, 1)) = 0;
9718
9719 unsigned char tkind = 0;
9720 switch (OMP_CLAUSE_CODE (c))
9721 {
9722 case OMP_CLAUSE_MAP:
9723 tkind = OMP_CLAUSE_MAP_KIND (c);
9724 break;
9725 case OMP_CLAUSE_TO:
9726 tkind = OMP_CLAUSE_MAP_TO;
9727 break;
9728 case OMP_CLAUSE_FROM:
9729 tkind = OMP_CLAUSE_MAP_FROM;
9730 break;
9731 default:
9732 gcc_unreachable ();
9733 }
9734 unsigned int talign = TYPE_ALIGN_UNIT (TREE_TYPE (ovar));
9735 if (DECL_P (ovar) && DECL_ALIGN_UNIT (ovar) > talign)
9736 talign = DECL_ALIGN_UNIT (ovar);
9737 talign = ceil_log2 (talign);
9738 tkind |= talign << 3;
9739 CONSTRUCTOR_APPEND_ELT (vkind, purpose,
9740 build_int_cst (unsigned_char_type_node,
9741 tkind));
9742 if (nc && nc != c)
9743 c = nc;
9744 }
9745
9746 gcc_assert (map_idx == map_cnt);
9747
9748 DECL_INITIAL (TREE_VEC_ELT (t, 1))
9749 = build_constructor (TREE_TYPE (TREE_VEC_ELT (t, 1)), vsize);
9750 DECL_INITIAL (TREE_VEC_ELT (t, 2))
9751 = build_constructor (TREE_TYPE (TREE_VEC_ELT (t, 2)), vkind);
9752 if (!TREE_STATIC (TREE_VEC_ELT (t, 1)))
9753 {
9754 gimple_seq initlist = NULL;
9755 force_gimple_operand (build1 (DECL_EXPR, void_type_node,
9756 TREE_VEC_ELT (t, 1)),
9757 &initlist, true, NULL_TREE);
9758 gimple_seq_add_seq (&ilist, initlist);
9759 }
9760
9761 tree clobber = build_constructor (ctx->record_type, NULL);
9762 TREE_THIS_VOLATILE (clobber) = 1;
9763 gimple_seq_add_stmt (&olist, gimple_build_assign (ctx->sender_decl,
9764 clobber));
9765 }
9766
9767 /* Once all the expansions are done, sequence all the different
9768 fragments inside gimple_omp_body. */
9769
9770 new_body = NULL;
9771
9772 if (ctx->record_type && kind == GF_OMP_TARGET_KIND_REGION)
9773 {
9774 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
9775 /* fixup_child_record_type might have changed receiver_decl's type. */
9776 t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
9777 gimple_seq_add_stmt (&new_body,
9778 gimple_build_assign (ctx->receiver_decl, t));
9779 }
9780
9781 if (kind == GF_OMP_TARGET_KIND_REGION)
9782 {
9783 gimple_seq_add_seq (&new_body, tgt_body);
9784 new_body = maybe_catch_exception (new_body);
9785 }
9786 else if (kind == GF_OMP_TARGET_KIND_DATA)
9787 new_body = tgt_body;
9788 if (kind != GF_OMP_TARGET_KIND_UPDATE)
9789 {
9790 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
9791 gimple_omp_set_body (stmt, new_body);
9792 }
9793
9794 bind = gimple_build_bind (NULL, NULL,
9795 tgt_bind ? gimple_bind_block (tgt_bind)
9796 : NULL_TREE);
9797 gsi_replace (gsi_p, bind, true);
9798 gimple_bind_add_seq (bind, ilist);
9799 gimple_bind_add_stmt (bind, stmt);
9800 gimple_bind_add_seq (bind, olist);
9801
9802 pop_gimplify_context (NULL);
9803 }
9804
9805 /* Expand code for an OpenMP teams directive. */
9806
9807 static void
9808 lower_omp_teams (gimple_stmt_iterator *gsi_p, omp_context *ctx)
9809 {
9810 gimple teams_stmt = gsi_stmt (*gsi_p);
9811 struct gimplify_ctx gctx;
9812 push_gimplify_context (&gctx);
9813
9814 tree block = make_node (BLOCK);
9815 gimple bind = gimple_build_bind (NULL, NULL, block);
9816 gsi_replace (gsi_p, bind, true);
9817 gimple_seq bind_body = NULL;
9818 gimple_seq dlist = NULL;
9819 gimple_seq olist = NULL;
9820
9821 tree num_teams = find_omp_clause (gimple_omp_teams_clauses (teams_stmt),
9822 OMP_CLAUSE_NUM_TEAMS);
9823 if (num_teams == NULL_TREE)
9824 num_teams = build_int_cst (unsigned_type_node, 0);
9825 else
9826 {
9827 num_teams = OMP_CLAUSE_NUM_TEAMS_EXPR (num_teams);
9828 num_teams = fold_convert (unsigned_type_node, num_teams);
9829 gimplify_expr (&num_teams, &bind_body, NULL, is_gimple_val, fb_rvalue);
9830 }
9831 tree thread_limit = find_omp_clause (gimple_omp_teams_clauses (teams_stmt),
9832 OMP_CLAUSE_THREAD_LIMIT);
9833 if (thread_limit == NULL_TREE)
9834 thread_limit = build_int_cst (unsigned_type_node, 0);
9835 else
9836 {
9837 thread_limit = OMP_CLAUSE_THREAD_LIMIT_EXPR (thread_limit);
9838 thread_limit = fold_convert (unsigned_type_node, thread_limit);
9839 gimplify_expr (&thread_limit, &bind_body, NULL, is_gimple_val,
9840 fb_rvalue);
9841 }
9842
9843 lower_rec_input_clauses (gimple_omp_teams_clauses (teams_stmt),
9844 &bind_body, &dlist, ctx, NULL);
9845 lower_omp (gimple_omp_body_ptr (teams_stmt), ctx);
9846 lower_reduction_clauses (gimple_omp_teams_clauses (teams_stmt), &olist, ctx);
9847 gimple_seq_add_stmt (&bind_body, teams_stmt);
9848
9849 location_t loc = gimple_location (teams_stmt);
9850 tree decl = builtin_decl_explicit (BUILT_IN_GOMP_TEAMS);
9851 gimple call = gimple_build_call (decl, 2, num_teams, thread_limit);
9852 gimple_set_location (call, loc);
9853 gimple_seq_add_stmt (&bind_body, call);
9854
9855 gimple_seq_add_seq (&bind_body, gimple_omp_body (teams_stmt));
9856 gimple_omp_set_body (teams_stmt, NULL);
9857 gimple_seq_add_seq (&bind_body, olist);
9858 gimple_seq_add_seq (&bind_body, dlist);
9859 gimple_seq_add_stmt (&bind_body, gimple_build_omp_return (true));
9860 gimple_bind_set_body (bind, bind_body);
9861
9862 pop_gimplify_context (bind);
9863
9864 gimple_bind_append_vars (bind, ctx->block_vars);
9865 BLOCK_VARS (block) = ctx->block_vars;
9866 if (BLOCK_VARS (block))
9867 TREE_USED (block) = 1;
9868 }
9869
9870
9871 /* Callback for lower_omp_1. Return non-NULL if *tp needs to be
9872 regimplified. If DATA is non-NULL, lower_omp_1 is outside
9873 of OpenMP context, but with task_shared_vars set. */
9874
9875 static tree
9876 lower_omp_regimplify_p (tree *tp, int *walk_subtrees,
9877 void *data)
9878 {
9879 tree t = *tp;
9880
9881 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
9882 if (TREE_CODE (t) == VAR_DECL && data == NULL && DECL_HAS_VALUE_EXPR_P (t))
9883 return t;
9884
9885 if (task_shared_vars
9886 && DECL_P (t)
9887 && bitmap_bit_p (task_shared_vars, DECL_UID (t)))
9888 return t;
9889
9890 /* If a global variable has been privatized, TREE_CONSTANT on
9891 ADDR_EXPR might be wrong. */
9892 if (data == NULL && TREE_CODE (t) == ADDR_EXPR)
9893 recompute_tree_invariant_for_addr_expr (t);
9894
9895 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
9896 return NULL_TREE;
9897 }
9898
9899 static void
9900 lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
9901 {
9902 gimple stmt = gsi_stmt (*gsi_p);
9903 struct walk_stmt_info wi;
9904
9905 if (gimple_has_location (stmt))
9906 input_location = gimple_location (stmt);
9907
9908 if (task_shared_vars)
9909 memset (&wi, '\0', sizeof (wi));
9910
9911 /* If we have issued syntax errors, avoid doing any heavy lifting.
9912 Just replace the OpenMP directives with a NOP to avoid
9913 confusing RTL expansion. */
9914 if (seen_error () && is_gimple_omp (stmt))
9915 {
9916 gsi_replace (gsi_p, gimple_build_nop (), true);
9917 return;
9918 }
9919
9920 switch (gimple_code (stmt))
9921 {
9922 case GIMPLE_COND:
9923 if ((ctx || task_shared_vars)
9924 && (walk_tree (gimple_cond_lhs_ptr (stmt), lower_omp_regimplify_p,
9925 ctx ? NULL : &wi, NULL)
9926 || walk_tree (gimple_cond_rhs_ptr (stmt), lower_omp_regimplify_p,
9927 ctx ? NULL : &wi, NULL)))
9928 gimple_regimplify_operands (stmt, gsi_p);
9929 break;
9930 case GIMPLE_CATCH:
9931 lower_omp (gimple_catch_handler_ptr (stmt), ctx);
9932 break;
9933 case GIMPLE_EH_FILTER:
9934 lower_omp (gimple_eh_filter_failure_ptr (stmt), ctx);
9935 break;
9936 case GIMPLE_TRY:
9937 lower_omp (gimple_try_eval_ptr (stmt), ctx);
9938 lower_omp (gimple_try_cleanup_ptr (stmt), ctx);
9939 break;
9940 case GIMPLE_TRANSACTION:
9941 lower_omp (gimple_transaction_body_ptr (stmt), ctx);
9942 break;
9943 case GIMPLE_BIND:
9944 lower_omp (gimple_bind_body_ptr (stmt), ctx);
9945 break;
9946 case GIMPLE_OMP_PARALLEL:
9947 case GIMPLE_OMP_TASK:
9948 ctx = maybe_lookup_ctx (stmt);
9949 gcc_assert (ctx);
9950 if (ctx->cancellable)
9951 ctx->cancel_label = create_artificial_label (UNKNOWN_LOCATION);
9952 lower_omp_taskreg (gsi_p, ctx);
9953 break;
9954 case GIMPLE_OMP_FOR:
9955 ctx = maybe_lookup_ctx (stmt);
9956 gcc_assert (ctx);
9957 if (ctx->cancellable)
9958 ctx->cancel_label = create_artificial_label (UNKNOWN_LOCATION);
9959 lower_omp_for (gsi_p, ctx);
9960 break;
9961 case GIMPLE_OMP_SECTIONS:
9962 ctx = maybe_lookup_ctx (stmt);
9963 gcc_assert (ctx);
9964 if (ctx->cancellable)
9965 ctx->cancel_label = create_artificial_label (UNKNOWN_LOCATION);
9966 lower_omp_sections (gsi_p, ctx);
9967 break;
9968 case GIMPLE_OMP_SINGLE:
9969 ctx = maybe_lookup_ctx (stmt);
9970 gcc_assert (ctx);
9971 lower_omp_single (gsi_p, ctx);
9972 break;
9973 case GIMPLE_OMP_MASTER:
9974 ctx = maybe_lookup_ctx (stmt);
9975 gcc_assert (ctx);
9976 lower_omp_master (gsi_p, ctx);
9977 break;
9978 case GIMPLE_OMP_TASKGROUP:
9979 ctx = maybe_lookup_ctx (stmt);
9980 gcc_assert (ctx);
9981 lower_omp_taskgroup (gsi_p, ctx);
9982 break;
9983 case GIMPLE_OMP_ORDERED:
9984 ctx = maybe_lookup_ctx (stmt);
9985 gcc_assert (ctx);
9986 lower_omp_ordered (gsi_p, ctx);
9987 break;
9988 case GIMPLE_OMP_CRITICAL:
9989 ctx = maybe_lookup_ctx (stmt);
9990 gcc_assert (ctx);
9991 lower_omp_critical (gsi_p, ctx);
9992 break;
9993 case GIMPLE_OMP_ATOMIC_LOAD:
9994 if ((ctx || task_shared_vars)
9995 && walk_tree (gimple_omp_atomic_load_rhs_ptr (stmt),
9996 lower_omp_regimplify_p, ctx ? NULL : &wi, NULL))
9997 gimple_regimplify_operands (stmt, gsi_p);
9998 break;
9999 case GIMPLE_OMP_TARGET:
10000 ctx = maybe_lookup_ctx (stmt);
10001 gcc_assert (ctx);
10002 lower_omp_target (gsi_p, ctx);
10003 break;
10004 case GIMPLE_OMP_TEAMS:
10005 ctx = maybe_lookup_ctx (stmt);
10006 gcc_assert (ctx);
10007 lower_omp_teams (gsi_p, ctx);
10008 break;
10009 case GIMPLE_CALL:
10010 tree fndecl;
10011 fndecl = gimple_call_fndecl (stmt);
10012 if (fndecl
10013 && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
10014 switch (DECL_FUNCTION_CODE (fndecl))
10015 {
10016 case BUILT_IN_GOMP_BARRIER:
10017 if (ctx == NULL)
10018 break;
10019 /* FALLTHRU */
10020 case BUILT_IN_GOMP_CANCEL:
10021 case BUILT_IN_GOMP_CANCELLATION_POINT:
10022 omp_context *cctx;
10023 cctx = ctx;
10024 if (gimple_code (cctx->stmt) == GIMPLE_OMP_SECTION)
10025 cctx = cctx->outer;
10026 gcc_assert (gimple_call_lhs (stmt) == NULL_TREE);
10027 if (!cctx->cancellable)
10028 {
10029 if (DECL_FUNCTION_CODE (fndecl)
10030 == BUILT_IN_GOMP_CANCELLATION_POINT)
10031 {
10032 stmt = gimple_build_nop ();
10033 gsi_replace (gsi_p, stmt, false);
10034 }
10035 break;
10036 }
10037 tree lhs;
10038 lhs = create_tmp_var (boolean_type_node, NULL);
10039 if (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
10040 {
10041 fndecl = builtin_decl_explicit (BUILT_IN_GOMP_BARRIER_CANCEL);
10042 gimple_call_set_fndecl (stmt, fndecl);
10043 gimple_call_set_fntype (stmt, TREE_TYPE (fndecl));
10044 }
10045 gimple_call_set_lhs (stmt, lhs);
10046 tree fallthru_label;
10047 fallthru_label = create_artificial_label (UNKNOWN_LOCATION);
10048 gimple g;
10049 g = gimple_build_label (fallthru_label);
10050 gsi_insert_after (gsi_p, g, GSI_SAME_STMT);
10051 g = gimple_build_cond (NE_EXPR, lhs, boolean_false_node,
10052 cctx->cancel_label, fallthru_label);
10053 gsi_insert_after (gsi_p, g, GSI_SAME_STMT);
10054 break;
10055 default:
10056 break;
10057 }
10058 /* FALLTHRU */
10059 default:
10060 if ((ctx || task_shared_vars)
10061 && walk_gimple_op (stmt, lower_omp_regimplify_p,
10062 ctx ? NULL : &wi))
10063 gimple_regimplify_operands (stmt, gsi_p);
10064 break;
10065 }
10066 }
10067
10068 static void
10069 lower_omp (gimple_seq *body, omp_context *ctx)
10070 {
10071 location_t saved_location = input_location;
10072 gimple_stmt_iterator gsi;
10073 for (gsi = gsi_start (*body); !gsi_end_p (gsi); gsi_next (&gsi))
10074 lower_omp_1 (&gsi, ctx);
10075 /* Inside target region we haven't called fold_stmt during gimplification,
10076 because it can break code by adding decl references that weren't in the
10077 source. Call fold_stmt now. */
10078 if (target_nesting_level)
10079 for (gsi = gsi_start (*body); !gsi_end_p (gsi); gsi_next (&gsi))
10080 fold_stmt (&gsi);
10081 input_location = saved_location;
10082 }
10083 \f
10084 /* Main entry point. */
10085
10086 static unsigned int
10087 execute_lower_omp (void)
10088 {
10089 gimple_seq body;
10090
10091 /* This pass always runs, to provide PROP_gimple_lomp.
10092 But there is nothing to do unless -fopenmp is given. */
10093 if (flag_openmp == 0 && flag_openmp_simd == 0 && flag_enable_cilkplus == 0)
10094 return 0;
10095
10096 all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
10097 delete_omp_context);
10098
10099 body = gimple_body (current_function_decl);
10100 scan_omp (&body, NULL);
10101 gcc_assert (taskreg_nesting_level == 0);
10102
10103 if (all_contexts->root)
10104 {
10105 struct gimplify_ctx gctx;
10106
10107 if (task_shared_vars)
10108 push_gimplify_context (&gctx);
10109 lower_omp (&body, NULL);
10110 if (task_shared_vars)
10111 pop_gimplify_context (NULL);
10112 }
10113
10114 if (all_contexts)
10115 {
10116 splay_tree_delete (all_contexts);
10117 all_contexts = NULL;
10118 }
10119 BITMAP_FREE (task_shared_vars);
10120 return 0;
10121 }
10122
10123 namespace {
10124
10125 const pass_data pass_data_lower_omp =
10126 {
10127 GIMPLE_PASS, /* type */
10128 "omplower", /* name */
10129 OPTGROUP_NONE, /* optinfo_flags */
10130 false, /* has_gate */
10131 true, /* has_execute */
10132 TV_NONE, /* tv_id */
10133 PROP_gimple_any, /* properties_required */
10134 PROP_gimple_lomp, /* properties_provided */
10135 0, /* properties_destroyed */
10136 0, /* todo_flags_start */
10137 0, /* todo_flags_finish */
10138 };
10139
10140 class pass_lower_omp : public gimple_opt_pass
10141 {
10142 public:
10143 pass_lower_omp (gcc::context *ctxt)
10144 : gimple_opt_pass (pass_data_lower_omp, ctxt)
10145 {}
10146
10147 /* opt_pass methods: */
10148 unsigned int execute () { return execute_lower_omp (); }
10149
10150 }; // class pass_lower_omp
10151
10152 } // anon namespace
10153
10154 gimple_opt_pass *
10155 make_pass_lower_omp (gcc::context *ctxt)
10156 {
10157 return new pass_lower_omp (ctxt);
10158 }
10159 \f
10160 /* The following is a utility to diagnose OpenMP structured block violations.
10161 It is not part of the "omplower" pass, as that's invoked too late. It
10162 should be invoked by the respective front ends after gimplification. */
10163
10164 static splay_tree all_labels;
10165
10166 /* Check for mismatched contexts and generate an error if needed. Return
10167 true if an error is detected. */
10168
10169 static bool
10170 diagnose_sb_0 (gimple_stmt_iterator *gsi_p,
10171 gimple branch_ctx, gimple label_ctx)
10172 {
10173 if (label_ctx == branch_ctx)
10174 return false;
10175
10176
10177 /*
10178 Previously we kept track of the label's entire context in diagnose_sb_[12]
10179 so we could traverse it and issue a correct "exit" or "enter" error
10180 message upon a structured block violation.
10181
10182 We built the context by building a list with tree_cons'ing, but there is
10183 no easy counterpart in gimple tuples. It seems like far too much work
10184 for issuing exit/enter error messages. If someone really misses the
10185 distinct error message... patches welcome.
10186 */
10187
10188 #if 0
10189 /* Try to avoid confusing the user by producing and error message
10190 with correct "exit" or "enter" verbiage. We prefer "exit"
10191 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
10192 if (branch_ctx == NULL)
10193 exit_p = false;
10194 else
10195 {
10196 while (label_ctx)
10197 {
10198 if (TREE_VALUE (label_ctx) == branch_ctx)
10199 {
10200 exit_p = false;
10201 break;
10202 }
10203 label_ctx = TREE_CHAIN (label_ctx);
10204 }
10205 }
10206
10207 if (exit_p)
10208 error ("invalid exit from OpenMP structured block");
10209 else
10210 error ("invalid entry to OpenMP structured block");
10211 #endif
10212
10213 bool cilkplus_block = false;
10214 if (flag_enable_cilkplus)
10215 {
10216 if ((branch_ctx
10217 && gimple_code (branch_ctx) == GIMPLE_OMP_FOR
10218 && gimple_omp_for_kind (branch_ctx) == GF_OMP_FOR_KIND_CILKSIMD)
10219 || (gimple_code (label_ctx) == GIMPLE_OMP_FOR
10220 && gimple_omp_for_kind (label_ctx) == GF_OMP_FOR_KIND_CILKSIMD))
10221 cilkplus_block = true;
10222 }
10223
10224 /* If it's obvious we have an invalid entry, be specific about the error. */
10225 if (branch_ctx == NULL)
10226 {
10227 if (cilkplus_block)
10228 error ("invalid entry to Cilk Plus structured block");
10229 else
10230 error ("invalid entry to OpenMP structured block");
10231 }
10232 else
10233 {
10234 /* Otherwise, be vague and lazy, but efficient. */
10235 if (cilkplus_block)
10236 error ("invalid branch to/from a Cilk Plus structured block");
10237 else
10238 error ("invalid branch to/from an OpenMP structured block");
10239 }
10240
10241 gsi_replace (gsi_p, gimple_build_nop (), false);
10242 return true;
10243 }
10244
10245 /* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
10246 where each label is found. */
10247
10248 static tree
10249 diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
10250 struct walk_stmt_info *wi)
10251 {
10252 gimple context = (gimple) wi->info;
10253 gimple inner_context;
10254 gimple stmt = gsi_stmt (*gsi_p);
10255
10256 *handled_ops_p = true;
10257
10258 switch (gimple_code (stmt))
10259 {
10260 WALK_SUBSTMTS;
10261
10262 case GIMPLE_OMP_PARALLEL:
10263 case GIMPLE_OMP_TASK:
10264 case GIMPLE_OMP_SECTIONS:
10265 case GIMPLE_OMP_SINGLE:
10266 case GIMPLE_OMP_SECTION:
10267 case GIMPLE_OMP_MASTER:
10268 case GIMPLE_OMP_ORDERED:
10269 case GIMPLE_OMP_CRITICAL:
10270 case GIMPLE_OMP_TARGET:
10271 case GIMPLE_OMP_TEAMS:
10272 case GIMPLE_OMP_TASKGROUP:
10273 /* The minimal context here is just the current OMP construct. */
10274 inner_context = stmt;
10275 wi->info = inner_context;
10276 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
10277 wi->info = context;
10278 break;
10279
10280 case GIMPLE_OMP_FOR:
10281 inner_context = stmt;
10282 wi->info = inner_context;
10283 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
10284 walk them. */
10285 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
10286 diagnose_sb_1, NULL, wi);
10287 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
10288 wi->info = context;
10289 break;
10290
10291 case GIMPLE_LABEL:
10292 splay_tree_insert (all_labels, (splay_tree_key) gimple_label_label (stmt),
10293 (splay_tree_value) context);
10294 break;
10295
10296 default:
10297 break;
10298 }
10299
10300 return NULL_TREE;
10301 }
10302
10303 /* Pass 2: Check each branch and see if its context differs from that of
10304 the destination label's context. */
10305
10306 static tree
10307 diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
10308 struct walk_stmt_info *wi)
10309 {
10310 gimple context = (gimple) wi->info;
10311 splay_tree_node n;
10312 gimple stmt = gsi_stmt (*gsi_p);
10313
10314 *handled_ops_p = true;
10315
10316 switch (gimple_code (stmt))
10317 {
10318 WALK_SUBSTMTS;
10319
10320 case GIMPLE_OMP_PARALLEL:
10321 case GIMPLE_OMP_TASK:
10322 case GIMPLE_OMP_SECTIONS:
10323 case GIMPLE_OMP_SINGLE:
10324 case GIMPLE_OMP_SECTION:
10325 case GIMPLE_OMP_MASTER:
10326 case GIMPLE_OMP_ORDERED:
10327 case GIMPLE_OMP_CRITICAL:
10328 case GIMPLE_OMP_TARGET:
10329 case GIMPLE_OMP_TEAMS:
10330 case GIMPLE_OMP_TASKGROUP:
10331 wi->info = stmt;
10332 walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), diagnose_sb_2, NULL, wi);
10333 wi->info = context;
10334 break;
10335
10336 case GIMPLE_OMP_FOR:
10337 wi->info = stmt;
10338 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
10339 walk them. */
10340 walk_gimple_seq_mod (gimple_omp_for_pre_body_ptr (stmt),
10341 diagnose_sb_2, NULL, wi);
10342 walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), diagnose_sb_2, NULL, wi);
10343 wi->info = context;
10344 break;
10345
10346 case GIMPLE_COND:
10347 {
10348 tree lab = gimple_cond_true_label (stmt);
10349 if (lab)
10350 {
10351 n = splay_tree_lookup (all_labels,
10352 (splay_tree_key) lab);
10353 diagnose_sb_0 (gsi_p, context,
10354 n ? (gimple) n->value : NULL);
10355 }
10356 lab = gimple_cond_false_label (stmt);
10357 if (lab)
10358 {
10359 n = splay_tree_lookup (all_labels,
10360 (splay_tree_key) lab);
10361 diagnose_sb_0 (gsi_p, context,
10362 n ? (gimple) n->value : NULL);
10363 }
10364 }
10365 break;
10366
10367 case GIMPLE_GOTO:
10368 {
10369 tree lab = gimple_goto_dest (stmt);
10370 if (TREE_CODE (lab) != LABEL_DECL)
10371 break;
10372
10373 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
10374 diagnose_sb_0 (gsi_p, context, n ? (gimple) n->value : NULL);
10375 }
10376 break;
10377
10378 case GIMPLE_SWITCH:
10379 {
10380 unsigned int i;
10381 for (i = 0; i < gimple_switch_num_labels (stmt); ++i)
10382 {
10383 tree lab = CASE_LABEL (gimple_switch_label (stmt, i));
10384 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
10385 if (n && diagnose_sb_0 (gsi_p, context, (gimple) n->value))
10386 break;
10387 }
10388 }
10389 break;
10390
10391 case GIMPLE_RETURN:
10392 diagnose_sb_0 (gsi_p, context, NULL);
10393 break;
10394
10395 default:
10396 break;
10397 }
10398
10399 return NULL_TREE;
10400 }
10401
10402 /* Called from tree-cfg.c::make_edges to create cfg edges for all GIMPLE_OMP
10403 codes. */
10404 bool
10405 make_gimple_omp_edges (basic_block bb, struct omp_region **region)
10406 {
10407 gimple last = last_stmt (bb);
10408 enum gimple_code code = gimple_code (last);
10409 struct omp_region *cur_region = *region;
10410 bool fallthru = false;
10411
10412 switch (code)
10413 {
10414 case GIMPLE_OMP_PARALLEL:
10415 case GIMPLE_OMP_TASK:
10416 case GIMPLE_OMP_FOR:
10417 case GIMPLE_OMP_SINGLE:
10418 case GIMPLE_OMP_TEAMS:
10419 case GIMPLE_OMP_MASTER:
10420 case GIMPLE_OMP_TASKGROUP:
10421 case GIMPLE_OMP_ORDERED:
10422 case GIMPLE_OMP_CRITICAL:
10423 case GIMPLE_OMP_SECTION:
10424 cur_region = new_omp_region (bb, code, cur_region);
10425 fallthru = true;
10426 break;
10427
10428 case GIMPLE_OMP_TARGET:
10429 cur_region = new_omp_region (bb, code, cur_region);
10430 fallthru = true;
10431 if (gimple_omp_target_kind (last) == GF_OMP_TARGET_KIND_UPDATE)
10432 cur_region = cur_region->outer;
10433 break;
10434
10435 case GIMPLE_OMP_SECTIONS:
10436 cur_region = new_omp_region (bb, code, cur_region);
10437 fallthru = true;
10438 break;
10439
10440 case GIMPLE_OMP_SECTIONS_SWITCH:
10441 fallthru = false;
10442 break;
10443
10444 case GIMPLE_OMP_ATOMIC_LOAD:
10445 case GIMPLE_OMP_ATOMIC_STORE:
10446 fallthru = true;
10447 break;
10448
10449 case GIMPLE_OMP_RETURN:
10450 /* In the case of a GIMPLE_OMP_SECTION, the edge will go
10451 somewhere other than the next block. This will be
10452 created later. */
10453 cur_region->exit = bb;
10454 fallthru = cur_region->type != GIMPLE_OMP_SECTION;
10455 cur_region = cur_region->outer;
10456 break;
10457
10458 case GIMPLE_OMP_CONTINUE:
10459 cur_region->cont = bb;
10460 switch (cur_region->type)
10461 {
10462 case GIMPLE_OMP_FOR:
10463 /* Mark all GIMPLE_OMP_FOR and GIMPLE_OMP_CONTINUE
10464 succs edges as abnormal to prevent splitting
10465 them. */
10466 single_succ_edge (cur_region->entry)->flags |= EDGE_ABNORMAL;
10467 /* Make the loopback edge. */
10468 make_edge (bb, single_succ (cur_region->entry),
10469 EDGE_ABNORMAL);
10470
10471 /* Create an edge from GIMPLE_OMP_FOR to exit, which
10472 corresponds to the case that the body of the loop
10473 is not executed at all. */
10474 make_edge (cur_region->entry, bb->next_bb, EDGE_ABNORMAL);
10475 make_edge (bb, bb->next_bb, EDGE_FALLTHRU | EDGE_ABNORMAL);
10476 fallthru = false;
10477 break;
10478
10479 case GIMPLE_OMP_SECTIONS:
10480 /* Wire up the edges into and out of the nested sections. */
10481 {
10482 basic_block switch_bb = single_succ (cur_region->entry);
10483
10484 struct omp_region *i;
10485 for (i = cur_region->inner; i ; i = i->next)
10486 {
10487 gcc_assert (i->type == GIMPLE_OMP_SECTION);
10488 make_edge (switch_bb, i->entry, 0);
10489 make_edge (i->exit, bb, EDGE_FALLTHRU);
10490 }
10491
10492 /* Make the loopback edge to the block with
10493 GIMPLE_OMP_SECTIONS_SWITCH. */
10494 make_edge (bb, switch_bb, 0);
10495
10496 /* Make the edge from the switch to exit. */
10497 make_edge (switch_bb, bb->next_bb, 0);
10498 fallthru = false;
10499 }
10500 break;
10501
10502 default:
10503 gcc_unreachable ();
10504 }
10505 break;
10506
10507 default:
10508 gcc_unreachable ();
10509 }
10510
10511 if (*region != cur_region)
10512 *region = cur_region;
10513
10514 return fallthru;
10515 }
10516
10517 static unsigned int
10518 diagnose_omp_structured_block_errors (void)
10519 {
10520 struct walk_stmt_info wi;
10521 gimple_seq body = gimple_body (current_function_decl);
10522
10523 all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
10524
10525 memset (&wi, 0, sizeof (wi));
10526 walk_gimple_seq (body, diagnose_sb_1, NULL, &wi);
10527
10528 memset (&wi, 0, sizeof (wi));
10529 wi.want_locations = true;
10530 walk_gimple_seq_mod (&body, diagnose_sb_2, NULL, &wi);
10531
10532 gimple_set_body (current_function_decl, body);
10533
10534 splay_tree_delete (all_labels);
10535 all_labels = NULL;
10536
10537 return 0;
10538 }
10539
10540 static bool
10541 gate_diagnose_omp_blocks (void)
10542 {
10543 return flag_openmp || flag_enable_cilkplus;
10544 }
10545
10546 namespace {
10547
10548 const pass_data pass_data_diagnose_omp_blocks =
10549 {
10550 GIMPLE_PASS, /* type */
10551 "*diagnose_omp_blocks", /* name */
10552 OPTGROUP_NONE, /* optinfo_flags */
10553 true, /* has_gate */
10554 true, /* has_execute */
10555 TV_NONE, /* tv_id */
10556 PROP_gimple_any, /* properties_required */
10557 0, /* properties_provided */
10558 0, /* properties_destroyed */
10559 0, /* todo_flags_start */
10560 0, /* todo_flags_finish */
10561 };
10562
10563 class pass_diagnose_omp_blocks : public gimple_opt_pass
10564 {
10565 public:
10566 pass_diagnose_omp_blocks (gcc::context *ctxt)
10567 : gimple_opt_pass (pass_data_diagnose_omp_blocks, ctxt)
10568 {}
10569
10570 /* opt_pass methods: */
10571 bool gate () { return gate_diagnose_omp_blocks (); }
10572 unsigned int execute () {
10573 return diagnose_omp_structured_block_errors ();
10574 }
10575
10576 }; // class pass_diagnose_omp_blocks
10577
10578 } // anon namespace
10579
10580 gimple_opt_pass *
10581 make_pass_diagnose_omp_blocks (gcc::context *ctxt)
10582 {
10583 return new pass_diagnose_omp_blocks (ctxt);
10584 }
10585
10586 #include "gt-omp-low.h"