Makefile.in (omp-low.o): Depend on $(TARGET_H).
[gcc.git] / gcc / omp-low.c
1 /* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
5
6 Copyright (C) 2005-2013 Free Software Foundation, Inc.
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
14
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "tree.h"
29 #include "rtl.h"
30 #include "gimple.h"
31 #include "tree-iterator.h"
32 #include "tree-inline.h"
33 #include "langhooks.h"
34 #include "diagnostic-core.h"
35 #include "tree-flow.h"
36 #include "flags.h"
37 #include "function.h"
38 #include "expr.h"
39 #include "tree-pass.h"
40 #include "ggc.h"
41 #include "except.h"
42 #include "splay-tree.h"
43 #include "optabs.h"
44 #include "cfgloop.h"
45 #include "target.h"
46
47
48 /* Lowering of OpenMP parallel and workshare constructs proceeds in two
49 phases. The first phase scans the function looking for OMP statements
50 and then for variables that must be replaced to satisfy data sharing
51 clauses. The second phase expands code for the constructs, as well as
52 re-gimplifying things when variables have been replaced with complex
53 expressions.
54
55 Final code generation is done by pass_expand_omp. The flowgraph is
56 scanned for parallel regions which are then moved to a new
57 function, to be invoked by the thread library. */
58
59 /* Context structure. Used to store information about each parallel
60 directive in the code. */
61
62 typedef struct omp_context
63 {
64 /* This field must be at the beginning, as we do "inheritance": Some
65 callback functions for tree-inline.c (e.g., omp_copy_decl)
66 receive a copy_body_data pointer that is up-casted to an
67 omp_context pointer. */
68 copy_body_data cb;
69
70 /* The tree of contexts corresponding to the encountered constructs. */
71 struct omp_context *outer;
72 gimple stmt;
73
74 /* Map variables to fields in a structure that allows communication
75 between sending and receiving threads. */
76 splay_tree field_map;
77 tree record_type;
78 tree sender_decl;
79 tree receiver_decl;
80
81 /* These are used just by task contexts, if task firstprivate fn is
82 needed. srecord_type is used to communicate from the thread
83 that encountered the task construct to task firstprivate fn,
84 record_type is allocated by GOMP_task, initialized by task firstprivate
85 fn and passed to the task body fn. */
86 splay_tree sfield_map;
87 tree srecord_type;
88
89 /* A chain of variables to add to the top-level block surrounding the
90 construct. In the case of a parallel, this is in the child function. */
91 tree block_vars;
92
93 /* What to do with variables with implicitly determined sharing
94 attributes. */
95 enum omp_clause_default_kind default_kind;
96
97 /* Nesting depth of this context. Used to beautify error messages re
98 invalid gotos. The outermost ctx is depth 1, with depth 0 being
99 reserved for the main body of the function. */
100 int depth;
101
102 /* True if this parallel directive is nested within another. */
103 bool is_nested;
104 } omp_context;
105
106
107 struct omp_for_data_loop
108 {
109 tree v, n1, n2, step;
110 enum tree_code cond_code;
111 };
112
113 /* A structure describing the main elements of a parallel loop. */
114
115 struct omp_for_data
116 {
117 struct omp_for_data_loop loop;
118 tree chunk_size;
119 gimple for_stmt;
120 tree pre, iter_type;
121 int collapse;
122 bool have_nowait, have_ordered;
123 enum omp_clause_schedule_kind sched_kind;
124 struct omp_for_data_loop *loops;
125 };
126
127
128 static splay_tree all_contexts;
129 static int taskreg_nesting_level;
130 struct omp_region *root_omp_region;
131 static bitmap task_shared_vars;
132
133 static void scan_omp (gimple_seq *, omp_context *);
134 static tree scan_omp_1_op (tree *, int *, void *);
135
136 #define WALK_SUBSTMTS \
137 case GIMPLE_BIND: \
138 case GIMPLE_TRY: \
139 case GIMPLE_CATCH: \
140 case GIMPLE_EH_FILTER: \
141 case GIMPLE_TRANSACTION: \
142 /* The sub-statements for these should be walked. */ \
143 *handled_ops_p = false; \
144 break;
145
146 /* Convenience function for calling scan_omp_1_op on tree operands. */
147
148 static inline tree
149 scan_omp_op (tree *tp, omp_context *ctx)
150 {
151 struct walk_stmt_info wi;
152
153 memset (&wi, 0, sizeof (wi));
154 wi.info = ctx;
155 wi.want_locations = true;
156
157 return walk_tree (tp, scan_omp_1_op, &wi, NULL);
158 }
159
160 static void lower_omp (gimple_seq *, omp_context *);
161 static tree lookup_decl_in_outer_ctx (tree, omp_context *);
162 static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
163
164 /* Find an OpenMP clause of type KIND within CLAUSES. */
165
166 tree
167 find_omp_clause (tree clauses, enum omp_clause_code kind)
168 {
169 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
170 if (OMP_CLAUSE_CODE (clauses) == kind)
171 return clauses;
172
173 return NULL_TREE;
174 }
175
176 /* Return true if CTX is for an omp parallel. */
177
178 static inline bool
179 is_parallel_ctx (omp_context *ctx)
180 {
181 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL;
182 }
183
184
185 /* Return true if CTX is for an omp task. */
186
187 static inline bool
188 is_task_ctx (omp_context *ctx)
189 {
190 return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
191 }
192
193
194 /* Return true if CTX is for an omp parallel or omp task. */
195
196 static inline bool
197 is_taskreg_ctx (omp_context *ctx)
198 {
199 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL
200 || gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
201 }
202
203
204 /* Return true if REGION is a combined parallel+workshare region. */
205
206 static inline bool
207 is_combined_parallel (struct omp_region *region)
208 {
209 return region->is_combined_parallel;
210 }
211
212
213 /* Extract the header elements of parallel loop FOR_STMT and store
214 them into *FD. */
215
216 static void
217 extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
218 struct omp_for_data_loop *loops)
219 {
220 tree t, var, *collapse_iter, *collapse_count;
221 tree count = NULL_TREE, iter_type = long_integer_type_node;
222 struct omp_for_data_loop *loop;
223 int i;
224 struct omp_for_data_loop dummy_loop;
225 location_t loc = gimple_location (for_stmt);
226 bool simd = gimple_omp_for_kind (for_stmt) == GF_OMP_FOR_KIND_SIMD;
227
228 fd->for_stmt = for_stmt;
229 fd->pre = NULL;
230 fd->collapse = gimple_omp_for_collapse (for_stmt);
231 if (fd->collapse > 1)
232 fd->loops = loops;
233 else
234 fd->loops = &fd->loop;
235
236 fd->have_nowait = fd->have_ordered = false;
237 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
238 fd->chunk_size = NULL_TREE;
239 collapse_iter = NULL;
240 collapse_count = NULL;
241
242 for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
243 switch (OMP_CLAUSE_CODE (t))
244 {
245 case OMP_CLAUSE_NOWAIT:
246 fd->have_nowait = true;
247 break;
248 case OMP_CLAUSE_ORDERED:
249 fd->have_ordered = true;
250 break;
251 case OMP_CLAUSE_SCHEDULE:
252 fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
253 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
254 break;
255 case OMP_CLAUSE_COLLAPSE:
256 if (fd->collapse > 1)
257 {
258 collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
259 collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
260 }
261 default:
262 break;
263 }
264
265 /* FIXME: for now map schedule(auto) to schedule(static).
266 There should be analysis to determine whether all iterations
267 are approximately the same amount of work (then schedule(static)
268 is best) or if it varies (then schedule(dynamic,N) is better). */
269 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
270 {
271 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
272 gcc_assert (fd->chunk_size == NULL);
273 }
274 gcc_assert (fd->collapse == 1 || collapse_iter != NULL);
275 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
276 gcc_assert (fd->chunk_size == NULL);
277 else if (fd->chunk_size == NULL)
278 {
279 /* We only need to compute a default chunk size for ordered
280 static loops and dynamic loops. */
281 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
282 || fd->have_ordered
283 || fd->collapse > 1)
284 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
285 ? integer_zero_node : integer_one_node;
286 }
287
288 for (i = 0; i < fd->collapse; i++)
289 {
290 if (fd->collapse == 1)
291 loop = &fd->loop;
292 else if (loops != NULL)
293 loop = loops + i;
294 else
295 loop = &dummy_loop;
296
297
298 loop->v = gimple_omp_for_index (for_stmt, i);
299 gcc_assert (SSA_VAR_P (loop->v));
300 gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
301 || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
302 var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
303 loop->n1 = gimple_omp_for_initial (for_stmt, i);
304
305 loop->cond_code = gimple_omp_for_cond (for_stmt, i);
306 loop->n2 = gimple_omp_for_final (for_stmt, i);
307 switch (loop->cond_code)
308 {
309 case LT_EXPR:
310 case GT_EXPR:
311 break;
312 case LE_EXPR:
313 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
314 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, 1);
315 else
316 loop->n2 = fold_build2_loc (loc,
317 PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
318 build_int_cst (TREE_TYPE (loop->n2), 1));
319 loop->cond_code = LT_EXPR;
320 break;
321 case GE_EXPR:
322 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
323 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, -1);
324 else
325 loop->n2 = fold_build2_loc (loc,
326 MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
327 build_int_cst (TREE_TYPE (loop->n2), 1));
328 loop->cond_code = GT_EXPR;
329 break;
330 default:
331 gcc_unreachable ();
332 }
333
334 t = gimple_omp_for_incr (for_stmt, i);
335 gcc_assert (TREE_OPERAND (t, 0) == var);
336 switch (TREE_CODE (t))
337 {
338 case PLUS_EXPR:
339 loop->step = TREE_OPERAND (t, 1);
340 break;
341 case POINTER_PLUS_EXPR:
342 loop->step = fold_convert (ssizetype, TREE_OPERAND (t, 1));
343 break;
344 case MINUS_EXPR:
345 loop->step = TREE_OPERAND (t, 1);
346 loop->step = fold_build1_loc (loc,
347 NEGATE_EXPR, TREE_TYPE (loop->step),
348 loop->step);
349 break;
350 default:
351 gcc_unreachable ();
352 }
353
354 if (simd)
355 {
356 if (fd->collapse == 1)
357 iter_type = TREE_TYPE (loop->v);
358 else if (i == 0
359 || TYPE_PRECISION (iter_type)
360 < TYPE_PRECISION (TREE_TYPE (loop->v)))
361 iter_type
362 = build_nonstandard_integer_type
363 (TYPE_PRECISION (TREE_TYPE (loop->v)), 1);
364 }
365 else if (iter_type != long_long_unsigned_type_node)
366 {
367 if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
368 iter_type = long_long_unsigned_type_node;
369 else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
370 && TYPE_PRECISION (TREE_TYPE (loop->v))
371 >= TYPE_PRECISION (iter_type))
372 {
373 tree n;
374
375 if (loop->cond_code == LT_EXPR)
376 n = fold_build2_loc (loc,
377 PLUS_EXPR, TREE_TYPE (loop->v),
378 loop->n2, loop->step);
379 else
380 n = loop->n1;
381 if (TREE_CODE (n) != INTEGER_CST
382 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
383 iter_type = long_long_unsigned_type_node;
384 }
385 else if (TYPE_PRECISION (TREE_TYPE (loop->v))
386 > TYPE_PRECISION (iter_type))
387 {
388 tree n1, n2;
389
390 if (loop->cond_code == LT_EXPR)
391 {
392 n1 = loop->n1;
393 n2 = fold_build2_loc (loc,
394 PLUS_EXPR, TREE_TYPE (loop->v),
395 loop->n2, loop->step);
396 }
397 else
398 {
399 n1 = fold_build2_loc (loc,
400 MINUS_EXPR, TREE_TYPE (loop->v),
401 loop->n2, loop->step);
402 n2 = loop->n1;
403 }
404 if (TREE_CODE (n1) != INTEGER_CST
405 || TREE_CODE (n2) != INTEGER_CST
406 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
407 || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
408 iter_type = long_long_unsigned_type_node;
409 }
410 }
411
412 if (collapse_count && *collapse_count == NULL)
413 {
414 t = fold_binary (loop->cond_code, boolean_type_node,
415 fold_convert (TREE_TYPE (loop->v), loop->n1),
416 fold_convert (TREE_TYPE (loop->v), loop->n2));
417 if (t && integer_zerop (t))
418 count = build_zero_cst (long_long_unsigned_type_node);
419 else if ((i == 0 || count != NULL_TREE)
420 && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
421 && TREE_CONSTANT (loop->n1)
422 && TREE_CONSTANT (loop->n2)
423 && TREE_CODE (loop->step) == INTEGER_CST)
424 {
425 tree itype = TREE_TYPE (loop->v);
426
427 if (POINTER_TYPE_P (itype))
428 itype = signed_type_for (itype);
429 t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
430 t = fold_build2_loc (loc,
431 PLUS_EXPR, itype,
432 fold_convert_loc (loc, itype, loop->step), t);
433 t = fold_build2_loc (loc, PLUS_EXPR, itype, t,
434 fold_convert_loc (loc, itype, loop->n2));
435 t = fold_build2_loc (loc, MINUS_EXPR, itype, t,
436 fold_convert_loc (loc, itype, loop->n1));
437 if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
438 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype,
439 fold_build1_loc (loc, NEGATE_EXPR, itype, t),
440 fold_build1_loc (loc, NEGATE_EXPR, itype,
441 fold_convert_loc (loc, itype,
442 loop->step)));
443 else
444 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, t,
445 fold_convert_loc (loc, itype, loop->step));
446 t = fold_convert_loc (loc, long_long_unsigned_type_node, t);
447 if (count != NULL_TREE)
448 count = fold_build2_loc (loc,
449 MULT_EXPR, long_long_unsigned_type_node,
450 count, t);
451 else
452 count = t;
453 if (TREE_CODE (count) != INTEGER_CST)
454 count = NULL_TREE;
455 }
456 else if (count && !integer_zerop (count))
457 count = NULL_TREE;
458 }
459 }
460
461 if (count
462 && !simd)
463 {
464 if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
465 iter_type = long_long_unsigned_type_node;
466 else
467 iter_type = long_integer_type_node;
468 }
469 else if (collapse_iter && *collapse_iter != NULL)
470 iter_type = TREE_TYPE (*collapse_iter);
471 fd->iter_type = iter_type;
472 if (collapse_iter && *collapse_iter == NULL)
473 *collapse_iter = create_tmp_var (iter_type, ".iter");
474 if (collapse_count && *collapse_count == NULL)
475 {
476 if (count)
477 *collapse_count = fold_convert_loc (loc, iter_type, count);
478 else
479 *collapse_count = create_tmp_var (iter_type, ".count");
480 }
481
482 if (fd->collapse > 1)
483 {
484 fd->loop.v = *collapse_iter;
485 fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
486 fd->loop.n2 = *collapse_count;
487 fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
488 fd->loop.cond_code = LT_EXPR;
489 }
490 }
491
492
493 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
494 is the immediate dominator of PAR_ENTRY_BB, return true if there
495 are no data dependencies that would prevent expanding the parallel
496 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
497
498 When expanding a combined parallel+workshare region, the call to
499 the child function may need additional arguments in the case of
500 GIMPLE_OMP_FOR regions. In some cases, these arguments are
501 computed out of variables passed in from the parent to the child
502 via 'struct .omp_data_s'. For instance:
503
504 #pragma omp parallel for schedule (guided, i * 4)
505 for (j ...)
506
507 Is lowered into:
508
509 # BLOCK 2 (PAR_ENTRY_BB)
510 .omp_data_o.i = i;
511 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
512
513 # BLOCK 3 (WS_ENTRY_BB)
514 .omp_data_i = &.omp_data_o;
515 D.1667 = .omp_data_i->i;
516 D.1598 = D.1667 * 4;
517 #pragma omp for schedule (guided, D.1598)
518
519 When we outline the parallel region, the call to the child function
520 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
521 that value is computed *after* the call site. So, in principle we
522 cannot do the transformation.
523
524 To see whether the code in WS_ENTRY_BB blocks the combined
525 parallel+workshare call, we collect all the variables used in the
526 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
527 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
528 call.
529
530 FIXME. If we had the SSA form built at this point, we could merely
531 hoist the code in block 3 into block 2 and be done with it. But at
532 this point we don't have dataflow information and though we could
533 hack something up here, it is really not worth the aggravation. */
534
535 static bool
536 workshare_safe_to_combine_p (basic_block ws_entry_bb)
537 {
538 struct omp_for_data fd;
539 gimple ws_stmt = last_stmt (ws_entry_bb);
540
541 if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
542 return true;
543
544 gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR);
545
546 extract_omp_for_data (ws_stmt, &fd, NULL);
547
548 if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
549 return false;
550 if (fd.iter_type != long_integer_type_node)
551 return false;
552
553 /* FIXME. We give up too easily here. If any of these arguments
554 are not constants, they will likely involve variables that have
555 been mapped into fields of .omp_data_s for sharing with the child
556 function. With appropriate data flow, it would be possible to
557 see through this. */
558 if (!is_gimple_min_invariant (fd.loop.n1)
559 || !is_gimple_min_invariant (fd.loop.n2)
560 || !is_gimple_min_invariant (fd.loop.step)
561 || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
562 return false;
563
564 return true;
565 }
566
567
568 /* Collect additional arguments needed to emit a combined
569 parallel+workshare call. WS_STMT is the workshare directive being
570 expanded. */
571
572 static vec<tree, va_gc> *
573 get_ws_args_for (gimple ws_stmt)
574 {
575 tree t;
576 location_t loc = gimple_location (ws_stmt);
577 vec<tree, va_gc> *ws_args;
578
579 if (gimple_code (ws_stmt) == GIMPLE_OMP_FOR)
580 {
581 struct omp_for_data fd;
582
583 extract_omp_for_data (ws_stmt, &fd, NULL);
584
585 vec_alloc (ws_args, 3 + (fd.chunk_size != 0));
586
587 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n1);
588 ws_args->quick_push (t);
589
590 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n2);
591 ws_args->quick_push (t);
592
593 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.step);
594 ws_args->quick_push (t);
595
596 if (fd.chunk_size)
597 {
598 t = fold_convert_loc (loc, long_integer_type_node, fd.chunk_size);
599 ws_args->quick_push (t);
600 }
601
602 return ws_args;
603 }
604 else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
605 {
606 /* Number of sections is equal to the number of edges from the
607 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
608 the exit of the sections region. */
609 basic_block bb = single_succ (gimple_bb (ws_stmt));
610 t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
611 vec_alloc (ws_args, 1);
612 ws_args->quick_push (t);
613 return ws_args;
614 }
615
616 gcc_unreachable ();
617 }
618
619
620 /* Discover whether REGION is a combined parallel+workshare region. */
621
622 static void
623 determine_parallel_type (struct omp_region *region)
624 {
625 basic_block par_entry_bb, par_exit_bb;
626 basic_block ws_entry_bb, ws_exit_bb;
627
628 if (region == NULL || region->inner == NULL
629 || region->exit == NULL || region->inner->exit == NULL
630 || region->inner->cont == NULL)
631 return;
632
633 /* We only support parallel+for and parallel+sections. */
634 if (region->type != GIMPLE_OMP_PARALLEL
635 || (region->inner->type != GIMPLE_OMP_FOR
636 && region->inner->type != GIMPLE_OMP_SECTIONS))
637 return;
638
639 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
640 WS_EXIT_BB -> PAR_EXIT_BB. */
641 par_entry_bb = region->entry;
642 par_exit_bb = region->exit;
643 ws_entry_bb = region->inner->entry;
644 ws_exit_bb = region->inner->exit;
645
646 if (single_succ (par_entry_bb) == ws_entry_bb
647 && single_succ (ws_exit_bb) == par_exit_bb
648 && workshare_safe_to_combine_p (ws_entry_bb)
649 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb))
650 || (last_and_only_stmt (ws_entry_bb)
651 && last_and_only_stmt (par_exit_bb))))
652 {
653 gimple ws_stmt = last_stmt (ws_entry_bb);
654
655 if (region->inner->type == GIMPLE_OMP_FOR)
656 {
657 /* If this is a combined parallel loop, we need to determine
658 whether or not to use the combined library calls. There
659 are two cases where we do not apply the transformation:
660 static loops and any kind of ordered loop. In the first
661 case, we already open code the loop so there is no need
662 to do anything else. In the latter case, the combined
663 parallel loop call would still need extra synchronization
664 to implement ordered semantics, so there would not be any
665 gain in using the combined call. */
666 tree clauses = gimple_omp_for_clauses (ws_stmt);
667 tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
668 if (c == NULL
669 || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC
670 || find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
671 {
672 region->is_combined_parallel = false;
673 region->inner->is_combined_parallel = false;
674 return;
675 }
676 }
677
678 region->is_combined_parallel = true;
679 region->inner->is_combined_parallel = true;
680 region->ws_args = get_ws_args_for (ws_stmt);
681 }
682 }
683
684
685 /* Return true if EXPR is variable sized. */
686
687 static inline bool
688 is_variable_sized (const_tree expr)
689 {
690 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
691 }
692
693 /* Return true if DECL is a reference type. */
694
695 static inline bool
696 is_reference (tree decl)
697 {
698 return lang_hooks.decls.omp_privatize_by_reference (decl);
699 }
700
701 /* Lookup variables in the decl or field splay trees. The "maybe" form
702 allows for the variable form to not have been entered, otherwise we
703 assert that the variable must have been entered. */
704
705 static inline tree
706 lookup_decl (tree var, omp_context *ctx)
707 {
708 tree *n;
709 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
710 return *n;
711 }
712
713 static inline tree
714 maybe_lookup_decl (const_tree var, omp_context *ctx)
715 {
716 tree *n;
717 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
718 return n ? *n : NULL_TREE;
719 }
720
721 static inline tree
722 lookup_field (tree var, omp_context *ctx)
723 {
724 splay_tree_node n;
725 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
726 return (tree) n->value;
727 }
728
729 static inline tree
730 lookup_sfield (tree var, omp_context *ctx)
731 {
732 splay_tree_node n;
733 n = splay_tree_lookup (ctx->sfield_map
734 ? ctx->sfield_map : ctx->field_map,
735 (splay_tree_key) var);
736 return (tree) n->value;
737 }
738
739 static inline tree
740 maybe_lookup_field (tree var, omp_context *ctx)
741 {
742 splay_tree_node n;
743 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
744 return n ? (tree) n->value : NULL_TREE;
745 }
746
747 /* Return true if DECL should be copied by pointer. SHARED_CTX is
748 the parallel context if DECL is to be shared. */
749
750 static bool
751 use_pointer_for_field (tree decl, omp_context *shared_ctx)
752 {
753 if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
754 return true;
755
756 /* We can only use copy-in/copy-out semantics for shared variables
757 when we know the value is not accessible from an outer scope. */
758 if (shared_ctx)
759 {
760 /* ??? Trivially accessible from anywhere. But why would we even
761 be passing an address in this case? Should we simply assert
762 this to be false, or should we have a cleanup pass that removes
763 these from the list of mappings? */
764 if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
765 return true;
766
767 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
768 without analyzing the expression whether or not its location
769 is accessible to anyone else. In the case of nested parallel
770 regions it certainly may be. */
771 if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
772 return true;
773
774 /* Do not use copy-in/copy-out for variables that have their
775 address taken. */
776 if (TREE_ADDRESSABLE (decl))
777 return true;
778
779 /* lower_send_shared_vars only uses copy-in, but not copy-out
780 for these. */
781 if (TREE_READONLY (decl)
782 || ((TREE_CODE (decl) == RESULT_DECL
783 || TREE_CODE (decl) == PARM_DECL)
784 && DECL_BY_REFERENCE (decl)))
785 return false;
786
787 /* Disallow copy-in/out in nested parallel if
788 decl is shared in outer parallel, otherwise
789 each thread could store the shared variable
790 in its own copy-in location, making the
791 variable no longer really shared. */
792 if (shared_ctx->is_nested)
793 {
794 omp_context *up;
795
796 for (up = shared_ctx->outer; up; up = up->outer)
797 if (is_taskreg_ctx (up) && maybe_lookup_decl (decl, up))
798 break;
799
800 if (up)
801 {
802 tree c;
803
804 for (c = gimple_omp_taskreg_clauses (up->stmt);
805 c; c = OMP_CLAUSE_CHAIN (c))
806 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
807 && OMP_CLAUSE_DECL (c) == decl)
808 break;
809
810 if (c)
811 goto maybe_mark_addressable_and_ret;
812 }
813 }
814
815 /* For tasks avoid using copy-in/out. As tasks can be
816 deferred or executed in different thread, when GOMP_task
817 returns, the task hasn't necessarily terminated. */
818 if (is_task_ctx (shared_ctx))
819 {
820 tree outer;
821 maybe_mark_addressable_and_ret:
822 outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
823 if (is_gimple_reg (outer))
824 {
825 /* Taking address of OUTER in lower_send_shared_vars
826 might need regimplification of everything that uses the
827 variable. */
828 if (!task_shared_vars)
829 task_shared_vars = BITMAP_ALLOC (NULL);
830 bitmap_set_bit (task_shared_vars, DECL_UID (outer));
831 TREE_ADDRESSABLE (outer) = 1;
832 }
833 return true;
834 }
835 }
836
837 return false;
838 }
839
840 /* Create a new VAR_DECL and copy information from VAR to it. */
841
842 tree
843 copy_var_decl (tree var, tree name, tree type)
844 {
845 tree copy = build_decl (DECL_SOURCE_LOCATION (var), VAR_DECL, name, type);
846
847 TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (var);
848 TREE_THIS_VOLATILE (copy) = TREE_THIS_VOLATILE (var);
849 DECL_GIMPLE_REG_P (copy) = DECL_GIMPLE_REG_P (var);
850 DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (var);
851 DECL_IGNORED_P (copy) = DECL_IGNORED_P (var);
852 DECL_CONTEXT (copy) = DECL_CONTEXT (var);
853 TREE_USED (copy) = 1;
854 DECL_SEEN_IN_BIND_EXPR_P (copy) = 1;
855
856 return copy;
857 }
858
859 /* Construct a new automatic decl similar to VAR. */
860
861 static tree
862 omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
863 {
864 tree copy = copy_var_decl (var, name, type);
865
866 DECL_CONTEXT (copy) = current_function_decl;
867 DECL_CHAIN (copy) = ctx->block_vars;
868 ctx->block_vars = copy;
869
870 return copy;
871 }
872
873 static tree
874 omp_copy_decl_1 (tree var, omp_context *ctx)
875 {
876 return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
877 }
878
879 /* Build COMPONENT_REF and set TREE_THIS_VOLATILE and TREE_READONLY on it
880 as appropriate. */
881 static tree
882 omp_build_component_ref (tree obj, tree field)
883 {
884 tree ret = build3 (COMPONENT_REF, TREE_TYPE (field), obj, field, NULL);
885 if (TREE_THIS_VOLATILE (field))
886 TREE_THIS_VOLATILE (ret) |= 1;
887 if (TREE_READONLY (field))
888 TREE_READONLY (ret) |= 1;
889 return ret;
890 }
891
892 /* Build tree nodes to access the field for VAR on the receiver side. */
893
894 static tree
895 build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
896 {
897 tree x, field = lookup_field (var, ctx);
898
899 /* If the receiver record type was remapped in the child function,
900 remap the field into the new record type. */
901 x = maybe_lookup_field (field, ctx);
902 if (x != NULL)
903 field = x;
904
905 x = build_simple_mem_ref (ctx->receiver_decl);
906 x = omp_build_component_ref (x, field);
907 if (by_ref)
908 x = build_simple_mem_ref (x);
909
910 return x;
911 }
912
913 /* Build tree nodes to access VAR in the scope outer to CTX. In the case
914 of a parallel, this is a component reference; for workshare constructs
915 this is some variable. */
916
917 static tree
918 build_outer_var_ref (tree var, omp_context *ctx)
919 {
920 tree x;
921
922 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
923 x = var;
924 else if (is_variable_sized (var))
925 {
926 x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
927 x = build_outer_var_ref (x, ctx);
928 x = build_simple_mem_ref (x);
929 }
930 else if (is_taskreg_ctx (ctx))
931 {
932 bool by_ref = use_pointer_for_field (var, NULL);
933 x = build_receiver_ref (var, by_ref, ctx);
934 }
935 else if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
936 && gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_SIMD)
937 {
938 /* #pragma omp simd isn't a worksharing construct, and can reference even
939 private vars in its linear etc. clauses. */
940 x = NULL_TREE;
941 if (ctx->outer && is_taskreg_ctx (ctx))
942 x = lookup_decl (var, ctx->outer);
943 else if (ctx->outer)
944 x = maybe_lookup_decl (var, ctx->outer);
945 if (x == NULL_TREE)
946 x = var;
947 }
948 else if (ctx->outer)
949 x = lookup_decl (var, ctx->outer);
950 else if (is_reference (var))
951 /* This can happen with orphaned constructs. If var is reference, it is
952 possible it is shared and as such valid. */
953 x = var;
954 else
955 gcc_unreachable ();
956
957 if (is_reference (var))
958 x = build_simple_mem_ref (x);
959
960 return x;
961 }
962
963 /* Build tree nodes to access the field for VAR on the sender side. */
964
965 static tree
966 build_sender_ref (tree var, omp_context *ctx)
967 {
968 tree field = lookup_sfield (var, ctx);
969 return omp_build_component_ref (ctx->sender_decl, field);
970 }
971
972 /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
973
974 static void
975 install_var_field (tree var, bool by_ref, int mask, omp_context *ctx)
976 {
977 tree field, type, sfield = NULL_TREE;
978
979 gcc_assert ((mask & 1) == 0
980 || !splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
981 gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
982 || !splay_tree_lookup (ctx->sfield_map, (splay_tree_key) var));
983
984 type = TREE_TYPE (var);
985 if (by_ref)
986 type = build_pointer_type (type);
987 else if ((mask & 3) == 1 && is_reference (var))
988 type = TREE_TYPE (type);
989
990 field = build_decl (DECL_SOURCE_LOCATION (var),
991 FIELD_DECL, DECL_NAME (var), type);
992
993 /* Remember what variable this field was created for. This does have a
994 side effect of making dwarf2out ignore this member, so for helpful
995 debugging we clear it later in delete_omp_context. */
996 DECL_ABSTRACT_ORIGIN (field) = var;
997 if (type == TREE_TYPE (var))
998 {
999 DECL_ALIGN (field) = DECL_ALIGN (var);
1000 DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
1001 TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
1002 }
1003 else
1004 DECL_ALIGN (field) = TYPE_ALIGN (type);
1005
1006 if ((mask & 3) == 3)
1007 {
1008 insert_field_into_struct (ctx->record_type, field);
1009 if (ctx->srecord_type)
1010 {
1011 sfield = build_decl (DECL_SOURCE_LOCATION (var),
1012 FIELD_DECL, DECL_NAME (var), type);
1013 DECL_ABSTRACT_ORIGIN (sfield) = var;
1014 DECL_ALIGN (sfield) = DECL_ALIGN (field);
1015 DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
1016 TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
1017 insert_field_into_struct (ctx->srecord_type, sfield);
1018 }
1019 }
1020 else
1021 {
1022 if (ctx->srecord_type == NULL_TREE)
1023 {
1024 tree t;
1025
1026 ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
1027 ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1028 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
1029 {
1030 sfield = build_decl (DECL_SOURCE_LOCATION (var),
1031 FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
1032 DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
1033 insert_field_into_struct (ctx->srecord_type, sfield);
1034 splay_tree_insert (ctx->sfield_map,
1035 (splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
1036 (splay_tree_value) sfield);
1037 }
1038 }
1039 sfield = field;
1040 insert_field_into_struct ((mask & 1) ? ctx->record_type
1041 : ctx->srecord_type, field);
1042 }
1043
1044 if (mask & 1)
1045 splay_tree_insert (ctx->field_map, (splay_tree_key) var,
1046 (splay_tree_value) field);
1047 if ((mask & 2) && ctx->sfield_map)
1048 splay_tree_insert (ctx->sfield_map, (splay_tree_key) var,
1049 (splay_tree_value) sfield);
1050 }
1051
1052 static tree
1053 install_var_local (tree var, omp_context *ctx)
1054 {
1055 tree new_var = omp_copy_decl_1 (var, ctx);
1056 insert_decl_map (&ctx->cb, var, new_var);
1057 return new_var;
1058 }
1059
1060 /* Adjust the replacement for DECL in CTX for the new context. This means
1061 copying the DECL_VALUE_EXPR, and fixing up the type. */
1062
1063 static void
1064 fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
1065 {
1066 tree new_decl, size;
1067
1068 new_decl = lookup_decl (decl, ctx);
1069
1070 TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
1071
1072 if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
1073 && DECL_HAS_VALUE_EXPR_P (decl))
1074 {
1075 tree ve = DECL_VALUE_EXPR (decl);
1076 walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL);
1077 SET_DECL_VALUE_EXPR (new_decl, ve);
1078 DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
1079 }
1080
1081 if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
1082 {
1083 size = remap_decl (DECL_SIZE (decl), &ctx->cb);
1084 if (size == error_mark_node)
1085 size = TYPE_SIZE (TREE_TYPE (new_decl));
1086 DECL_SIZE (new_decl) = size;
1087
1088 size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
1089 if (size == error_mark_node)
1090 size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
1091 DECL_SIZE_UNIT (new_decl) = size;
1092 }
1093 }
1094
1095 /* The callback for remap_decl. Search all containing contexts for a
1096 mapping of the variable; this avoids having to duplicate the splay
1097 tree ahead of time. We know a mapping doesn't already exist in the
1098 given context. Create new mappings to implement default semantics. */
1099
1100 static tree
1101 omp_copy_decl (tree var, copy_body_data *cb)
1102 {
1103 omp_context *ctx = (omp_context *) cb;
1104 tree new_var;
1105
1106 if (TREE_CODE (var) == LABEL_DECL)
1107 {
1108 new_var = create_artificial_label (DECL_SOURCE_LOCATION (var));
1109 DECL_CONTEXT (new_var) = current_function_decl;
1110 insert_decl_map (&ctx->cb, var, new_var);
1111 return new_var;
1112 }
1113
1114 while (!is_taskreg_ctx (ctx))
1115 {
1116 ctx = ctx->outer;
1117 if (ctx == NULL)
1118 return var;
1119 new_var = maybe_lookup_decl (var, ctx);
1120 if (new_var)
1121 return new_var;
1122 }
1123
1124 if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
1125 return var;
1126
1127 return error_mark_node;
1128 }
1129
1130
1131 /* Return the parallel region associated with STMT. */
1132
1133 /* Debugging dumps for parallel regions. */
1134 void dump_omp_region (FILE *, struct omp_region *, int);
1135 void debug_omp_region (struct omp_region *);
1136 void debug_all_omp_regions (void);
1137
1138 /* Dump the parallel region tree rooted at REGION. */
1139
1140 void
1141 dump_omp_region (FILE *file, struct omp_region *region, int indent)
1142 {
1143 fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
1144 gimple_code_name[region->type]);
1145
1146 if (region->inner)
1147 dump_omp_region (file, region->inner, indent + 4);
1148
1149 if (region->cont)
1150 {
1151 fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
1152 region->cont->index);
1153 }
1154
1155 if (region->exit)
1156 fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
1157 region->exit->index);
1158 else
1159 fprintf (file, "%*s[no exit marker]\n", indent, "");
1160
1161 if (region->next)
1162 dump_omp_region (file, region->next, indent);
1163 }
1164
1165 DEBUG_FUNCTION void
1166 debug_omp_region (struct omp_region *region)
1167 {
1168 dump_omp_region (stderr, region, 0);
1169 }
1170
1171 DEBUG_FUNCTION void
1172 debug_all_omp_regions (void)
1173 {
1174 dump_omp_region (stderr, root_omp_region, 0);
1175 }
1176
1177
1178 /* Create a new parallel region starting at STMT inside region PARENT. */
1179
1180 struct omp_region *
1181 new_omp_region (basic_block bb, enum gimple_code type,
1182 struct omp_region *parent)
1183 {
1184 struct omp_region *region = XCNEW (struct omp_region);
1185
1186 region->outer = parent;
1187 region->entry = bb;
1188 region->type = type;
1189
1190 if (parent)
1191 {
1192 /* This is a nested region. Add it to the list of inner
1193 regions in PARENT. */
1194 region->next = parent->inner;
1195 parent->inner = region;
1196 }
1197 else
1198 {
1199 /* This is a toplevel region. Add it to the list of toplevel
1200 regions in ROOT_OMP_REGION. */
1201 region->next = root_omp_region;
1202 root_omp_region = region;
1203 }
1204
1205 return region;
1206 }
1207
1208 /* Release the memory associated with the region tree rooted at REGION. */
1209
1210 static void
1211 free_omp_region_1 (struct omp_region *region)
1212 {
1213 struct omp_region *i, *n;
1214
1215 for (i = region->inner; i ; i = n)
1216 {
1217 n = i->next;
1218 free_omp_region_1 (i);
1219 }
1220
1221 free (region);
1222 }
1223
1224 /* Release the memory for the entire omp region tree. */
1225
1226 void
1227 free_omp_regions (void)
1228 {
1229 struct omp_region *r, *n;
1230 for (r = root_omp_region; r ; r = n)
1231 {
1232 n = r->next;
1233 free_omp_region_1 (r);
1234 }
1235 root_omp_region = NULL;
1236 }
1237
1238
1239 /* Create a new context, with OUTER_CTX being the surrounding context. */
1240
1241 static omp_context *
1242 new_omp_context (gimple stmt, omp_context *outer_ctx)
1243 {
1244 omp_context *ctx = XCNEW (omp_context);
1245
1246 splay_tree_insert (all_contexts, (splay_tree_key) stmt,
1247 (splay_tree_value) ctx);
1248 ctx->stmt = stmt;
1249
1250 if (outer_ctx)
1251 {
1252 ctx->outer = outer_ctx;
1253 ctx->cb = outer_ctx->cb;
1254 ctx->cb.block = NULL;
1255 ctx->depth = outer_ctx->depth + 1;
1256 }
1257 else
1258 {
1259 ctx->cb.src_fn = current_function_decl;
1260 ctx->cb.dst_fn = current_function_decl;
1261 ctx->cb.src_node = cgraph_get_node (current_function_decl);
1262 gcc_checking_assert (ctx->cb.src_node);
1263 ctx->cb.dst_node = ctx->cb.src_node;
1264 ctx->cb.src_cfun = cfun;
1265 ctx->cb.copy_decl = omp_copy_decl;
1266 ctx->cb.eh_lp_nr = 0;
1267 ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
1268 ctx->depth = 1;
1269 }
1270
1271 ctx->cb.decl_map = pointer_map_create ();
1272
1273 return ctx;
1274 }
1275
1276 static gimple_seq maybe_catch_exception (gimple_seq);
1277
1278 /* Finalize task copyfn. */
1279
1280 static void
1281 finalize_task_copyfn (gimple task_stmt)
1282 {
1283 struct function *child_cfun;
1284 tree child_fn;
1285 gimple_seq seq = NULL, new_seq;
1286 gimple bind;
1287
1288 child_fn = gimple_omp_task_copy_fn (task_stmt);
1289 if (child_fn == NULL_TREE)
1290 return;
1291
1292 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
1293 DECL_STRUCT_FUNCTION (child_fn)->curr_properties = cfun->curr_properties;
1294
1295 push_cfun (child_cfun);
1296 bind = gimplify_body (child_fn, false);
1297 gimple_seq_add_stmt (&seq, bind);
1298 new_seq = maybe_catch_exception (seq);
1299 if (new_seq != seq)
1300 {
1301 bind = gimple_build_bind (NULL, new_seq, NULL);
1302 seq = NULL;
1303 gimple_seq_add_stmt (&seq, bind);
1304 }
1305 gimple_set_body (child_fn, seq);
1306 pop_cfun ();
1307
1308 /* Inform the callgraph about the new function. */
1309 cgraph_add_new_function (child_fn, false);
1310 }
1311
1312 /* Destroy a omp_context data structures. Called through the splay tree
1313 value delete callback. */
1314
1315 static void
1316 delete_omp_context (splay_tree_value value)
1317 {
1318 omp_context *ctx = (omp_context *) value;
1319
1320 pointer_map_destroy (ctx->cb.decl_map);
1321
1322 if (ctx->field_map)
1323 splay_tree_delete (ctx->field_map);
1324 if (ctx->sfield_map)
1325 splay_tree_delete (ctx->sfield_map);
1326
1327 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1328 it produces corrupt debug information. */
1329 if (ctx->record_type)
1330 {
1331 tree t;
1332 for (t = TYPE_FIELDS (ctx->record_type); t ; t = DECL_CHAIN (t))
1333 DECL_ABSTRACT_ORIGIN (t) = NULL;
1334 }
1335 if (ctx->srecord_type)
1336 {
1337 tree t;
1338 for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = DECL_CHAIN (t))
1339 DECL_ABSTRACT_ORIGIN (t) = NULL;
1340 }
1341
1342 if (is_task_ctx (ctx))
1343 finalize_task_copyfn (ctx->stmt);
1344
1345 XDELETE (ctx);
1346 }
1347
1348 /* Fix up RECEIVER_DECL with a type that has been remapped to the child
1349 context. */
1350
1351 static void
1352 fixup_child_record_type (omp_context *ctx)
1353 {
1354 tree f, type = ctx->record_type;
1355
1356 /* ??? It isn't sufficient to just call remap_type here, because
1357 variably_modified_type_p doesn't work the way we expect for
1358 record types. Testing each field for whether it needs remapping
1359 and creating a new record by hand works, however. */
1360 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
1361 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
1362 break;
1363 if (f)
1364 {
1365 tree name, new_fields = NULL;
1366
1367 type = lang_hooks.types.make_type (RECORD_TYPE);
1368 name = DECL_NAME (TYPE_NAME (ctx->record_type));
1369 name = build_decl (DECL_SOURCE_LOCATION (ctx->receiver_decl),
1370 TYPE_DECL, name, type);
1371 TYPE_NAME (type) = name;
1372
1373 for (f = TYPE_FIELDS (ctx->record_type); f ; f = DECL_CHAIN (f))
1374 {
1375 tree new_f = copy_node (f);
1376 DECL_CONTEXT (new_f) = type;
1377 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
1378 DECL_CHAIN (new_f) = new_fields;
1379 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
1380 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
1381 &ctx->cb, NULL);
1382 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
1383 &ctx->cb, NULL);
1384 new_fields = new_f;
1385
1386 /* Arrange to be able to look up the receiver field
1387 given the sender field. */
1388 splay_tree_insert (ctx->field_map, (splay_tree_key) f,
1389 (splay_tree_value) new_f);
1390 }
1391 TYPE_FIELDS (type) = nreverse (new_fields);
1392 layout_type (type);
1393 }
1394
1395 TREE_TYPE (ctx->receiver_decl) = build_pointer_type (type);
1396 }
1397
1398 /* Instantiate decls as necessary in CTX to satisfy the data sharing
1399 specified by CLAUSES. */
1400
1401 static void
1402 scan_sharing_clauses (tree clauses, omp_context *ctx)
1403 {
1404 tree c, decl;
1405 bool scan_array_reductions = false;
1406
1407 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1408 {
1409 bool by_ref;
1410
1411 switch (OMP_CLAUSE_CODE (c))
1412 {
1413 case OMP_CLAUSE_PRIVATE:
1414 decl = OMP_CLAUSE_DECL (c);
1415 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
1416 goto do_private;
1417 else if (!is_variable_sized (decl))
1418 install_var_local (decl, ctx);
1419 break;
1420
1421 case OMP_CLAUSE_SHARED:
1422 gcc_assert (is_taskreg_ctx (ctx));
1423 decl = OMP_CLAUSE_DECL (c);
1424 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
1425 || !is_variable_sized (decl));
1426 /* Global variables don't need to be copied,
1427 the receiver side will use them directly. */
1428 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1429 break;
1430 by_ref = use_pointer_for_field (decl, ctx);
1431 if (! TREE_READONLY (decl)
1432 || TREE_ADDRESSABLE (decl)
1433 || by_ref
1434 || is_reference (decl))
1435 {
1436 install_var_field (decl, by_ref, 3, ctx);
1437 install_var_local (decl, ctx);
1438 break;
1439 }
1440 /* We don't need to copy const scalar vars back. */
1441 OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
1442 goto do_private;
1443
1444 case OMP_CLAUSE_LASTPRIVATE:
1445 /* Let the corresponding firstprivate clause create
1446 the variable. */
1447 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1448 break;
1449 /* FALLTHRU */
1450
1451 case OMP_CLAUSE_FIRSTPRIVATE:
1452 case OMP_CLAUSE_REDUCTION:
1453 case OMP_CLAUSE_LINEAR:
1454 decl = OMP_CLAUSE_DECL (c);
1455 do_private:
1456 if (is_variable_sized (decl))
1457 {
1458 if (is_task_ctx (ctx))
1459 install_var_field (decl, false, 1, ctx);
1460 break;
1461 }
1462 else if (is_taskreg_ctx (ctx))
1463 {
1464 bool global
1465 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
1466 by_ref = use_pointer_for_field (decl, NULL);
1467
1468 if (is_task_ctx (ctx)
1469 && (global || by_ref || is_reference (decl)))
1470 {
1471 install_var_field (decl, false, 1, ctx);
1472 if (!global)
1473 install_var_field (decl, by_ref, 2, ctx);
1474 }
1475 else if (!global)
1476 install_var_field (decl, by_ref, 3, ctx);
1477 }
1478 install_var_local (decl, ctx);
1479 break;
1480
1481 case OMP_CLAUSE_COPYPRIVATE:
1482 case OMP_CLAUSE_COPYIN:
1483 decl = OMP_CLAUSE_DECL (c);
1484 by_ref = use_pointer_for_field (decl, NULL);
1485 install_var_field (decl, by_ref, 3, ctx);
1486 break;
1487
1488 case OMP_CLAUSE_DEFAULT:
1489 ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
1490 break;
1491
1492 case OMP_CLAUSE_FINAL:
1493 case OMP_CLAUSE_IF:
1494 case OMP_CLAUSE_NUM_THREADS:
1495 case OMP_CLAUSE_SCHEDULE:
1496 if (ctx->outer)
1497 scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
1498 break;
1499
1500 case OMP_CLAUSE_NOWAIT:
1501 case OMP_CLAUSE_ORDERED:
1502 case OMP_CLAUSE_COLLAPSE:
1503 case OMP_CLAUSE_UNTIED:
1504 case OMP_CLAUSE_MERGEABLE:
1505 case OMP_CLAUSE_SAFELEN:
1506 break;
1507
1508 default:
1509 gcc_unreachable ();
1510 }
1511 }
1512
1513 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1514 {
1515 switch (OMP_CLAUSE_CODE (c))
1516 {
1517 case OMP_CLAUSE_LASTPRIVATE:
1518 /* Let the corresponding firstprivate clause create
1519 the variable. */
1520 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1521 scan_array_reductions = true;
1522 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1523 break;
1524 /* FALLTHRU */
1525
1526 case OMP_CLAUSE_PRIVATE:
1527 case OMP_CLAUSE_FIRSTPRIVATE:
1528 case OMP_CLAUSE_REDUCTION:
1529 case OMP_CLAUSE_LINEAR:
1530 decl = OMP_CLAUSE_DECL (c);
1531 if (is_variable_sized (decl))
1532 install_var_local (decl, ctx);
1533 fixup_remapped_decl (decl, ctx,
1534 OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
1535 && OMP_CLAUSE_PRIVATE_DEBUG (c));
1536 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1537 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1538 scan_array_reductions = true;
1539 break;
1540
1541 case OMP_CLAUSE_SHARED:
1542 decl = OMP_CLAUSE_DECL (c);
1543 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1544 fixup_remapped_decl (decl, ctx, false);
1545 break;
1546
1547 case OMP_CLAUSE_COPYPRIVATE:
1548 case OMP_CLAUSE_COPYIN:
1549 case OMP_CLAUSE_DEFAULT:
1550 case OMP_CLAUSE_IF:
1551 case OMP_CLAUSE_NUM_THREADS:
1552 case OMP_CLAUSE_SCHEDULE:
1553 case OMP_CLAUSE_NOWAIT:
1554 case OMP_CLAUSE_ORDERED:
1555 case OMP_CLAUSE_COLLAPSE:
1556 case OMP_CLAUSE_UNTIED:
1557 case OMP_CLAUSE_FINAL:
1558 case OMP_CLAUSE_MERGEABLE:
1559 case OMP_CLAUSE_SAFELEN:
1560 break;
1561
1562 default:
1563 gcc_unreachable ();
1564 }
1565 }
1566
1567 if (scan_array_reductions)
1568 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1569 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1570 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1571 {
1572 scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
1573 scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
1574 }
1575 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
1576 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1577 scan_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
1578 }
1579
1580 /* Create a new name for omp child function. Returns an identifier. */
1581
1582 static GTY(()) unsigned int tmp_ompfn_id_num;
1583
1584 static tree
1585 create_omp_child_function_name (bool task_copy)
1586 {
1587 return (clone_function_name (current_function_decl,
1588 task_copy ? "_omp_cpyfn" : "_omp_fn"));
1589 }
1590
1591 /* Build a decl for the omp child function. It'll not contain a body
1592 yet, just the bare decl. */
1593
1594 static void
1595 create_omp_child_function (omp_context *ctx, bool task_copy)
1596 {
1597 tree decl, type, name, t;
1598
1599 name = create_omp_child_function_name (task_copy);
1600 if (task_copy)
1601 type = build_function_type_list (void_type_node, ptr_type_node,
1602 ptr_type_node, NULL_TREE);
1603 else
1604 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
1605
1606 decl = build_decl (gimple_location (ctx->stmt),
1607 FUNCTION_DECL, name, type);
1608
1609 if (!task_copy)
1610 ctx->cb.dst_fn = decl;
1611 else
1612 gimple_omp_task_set_copy_fn (ctx->stmt, decl);
1613
1614 TREE_STATIC (decl) = 1;
1615 TREE_USED (decl) = 1;
1616 DECL_ARTIFICIAL (decl) = 1;
1617 DECL_NAMELESS (decl) = 1;
1618 DECL_IGNORED_P (decl) = 0;
1619 TREE_PUBLIC (decl) = 0;
1620 DECL_UNINLINABLE (decl) = 1;
1621 DECL_EXTERNAL (decl) = 0;
1622 DECL_CONTEXT (decl) = NULL_TREE;
1623 DECL_INITIAL (decl) = make_node (BLOCK);
1624
1625 t = build_decl (DECL_SOURCE_LOCATION (decl),
1626 RESULT_DECL, NULL_TREE, void_type_node);
1627 DECL_ARTIFICIAL (t) = 1;
1628 DECL_IGNORED_P (t) = 1;
1629 DECL_CONTEXT (t) = decl;
1630 DECL_RESULT (decl) = t;
1631
1632 t = build_decl (DECL_SOURCE_LOCATION (decl),
1633 PARM_DECL, get_identifier (".omp_data_i"), ptr_type_node);
1634 DECL_ARTIFICIAL (t) = 1;
1635 DECL_NAMELESS (t) = 1;
1636 DECL_ARG_TYPE (t) = ptr_type_node;
1637 DECL_CONTEXT (t) = current_function_decl;
1638 TREE_USED (t) = 1;
1639 DECL_ARGUMENTS (decl) = t;
1640 if (!task_copy)
1641 ctx->receiver_decl = t;
1642 else
1643 {
1644 t = build_decl (DECL_SOURCE_LOCATION (decl),
1645 PARM_DECL, get_identifier (".omp_data_o"),
1646 ptr_type_node);
1647 DECL_ARTIFICIAL (t) = 1;
1648 DECL_NAMELESS (t) = 1;
1649 DECL_ARG_TYPE (t) = ptr_type_node;
1650 DECL_CONTEXT (t) = current_function_decl;
1651 TREE_USED (t) = 1;
1652 TREE_ADDRESSABLE (t) = 1;
1653 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
1654 DECL_ARGUMENTS (decl) = t;
1655 }
1656
1657 /* Allocate memory for the function structure. The call to
1658 allocate_struct_function clobbers CFUN, so we need to restore
1659 it afterward. */
1660 push_struct_function (decl);
1661 cfun->function_end_locus = gimple_location (ctx->stmt);
1662 pop_cfun ();
1663 }
1664
1665 /* Scan an OpenMP parallel directive. */
1666
1667 static void
1668 scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1669 {
1670 omp_context *ctx;
1671 tree name;
1672 gimple stmt = gsi_stmt (*gsi);
1673
1674 /* Ignore parallel directives with empty bodies, unless there
1675 are copyin clauses. */
1676 if (optimize > 0
1677 && empty_body_p (gimple_omp_body (stmt))
1678 && find_omp_clause (gimple_omp_parallel_clauses (stmt),
1679 OMP_CLAUSE_COPYIN) == NULL)
1680 {
1681 gsi_replace (gsi, gimple_build_nop (), false);
1682 return;
1683 }
1684
1685 ctx = new_omp_context (stmt, outer_ctx);
1686 if (taskreg_nesting_level > 1)
1687 ctx->is_nested = true;
1688 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1689 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1690 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1691 name = create_tmp_var_name (".omp_data_s");
1692 name = build_decl (gimple_location (stmt),
1693 TYPE_DECL, name, ctx->record_type);
1694 DECL_ARTIFICIAL (name) = 1;
1695 DECL_NAMELESS (name) = 1;
1696 TYPE_NAME (ctx->record_type) = name;
1697 create_omp_child_function (ctx, false);
1698 gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
1699
1700 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx);
1701 scan_omp (gimple_omp_body_ptr (stmt), ctx);
1702
1703 if (TYPE_FIELDS (ctx->record_type) == NULL)
1704 ctx->record_type = ctx->receiver_decl = NULL;
1705 else
1706 {
1707 layout_type (ctx->record_type);
1708 fixup_child_record_type (ctx);
1709 }
1710 }
1711
1712 /* Scan an OpenMP task directive. */
1713
1714 static void
1715 scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1716 {
1717 omp_context *ctx;
1718 tree name, t;
1719 gimple stmt = gsi_stmt (*gsi);
1720 location_t loc = gimple_location (stmt);
1721
1722 /* Ignore task directives with empty bodies. */
1723 if (optimize > 0
1724 && empty_body_p (gimple_omp_body (stmt)))
1725 {
1726 gsi_replace (gsi, gimple_build_nop (), false);
1727 return;
1728 }
1729
1730 ctx = new_omp_context (stmt, outer_ctx);
1731 if (taskreg_nesting_level > 1)
1732 ctx->is_nested = true;
1733 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1734 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1735 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1736 name = create_tmp_var_name (".omp_data_s");
1737 name = build_decl (gimple_location (stmt),
1738 TYPE_DECL, name, ctx->record_type);
1739 DECL_ARTIFICIAL (name) = 1;
1740 DECL_NAMELESS (name) = 1;
1741 TYPE_NAME (ctx->record_type) = name;
1742 create_omp_child_function (ctx, false);
1743 gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
1744
1745 scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
1746
1747 if (ctx->srecord_type)
1748 {
1749 name = create_tmp_var_name (".omp_data_a");
1750 name = build_decl (gimple_location (stmt),
1751 TYPE_DECL, name, ctx->srecord_type);
1752 DECL_ARTIFICIAL (name) = 1;
1753 DECL_NAMELESS (name) = 1;
1754 TYPE_NAME (ctx->srecord_type) = name;
1755 create_omp_child_function (ctx, true);
1756 }
1757
1758 scan_omp (gimple_omp_body_ptr (stmt), ctx);
1759
1760 if (TYPE_FIELDS (ctx->record_type) == NULL)
1761 {
1762 ctx->record_type = ctx->receiver_decl = NULL;
1763 t = build_int_cst (long_integer_type_node, 0);
1764 gimple_omp_task_set_arg_size (stmt, t);
1765 t = build_int_cst (long_integer_type_node, 1);
1766 gimple_omp_task_set_arg_align (stmt, t);
1767 }
1768 else
1769 {
1770 tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
1771 /* Move VLA fields to the end. */
1772 p = &TYPE_FIELDS (ctx->record_type);
1773 while (*p)
1774 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
1775 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
1776 {
1777 *q = *p;
1778 *p = TREE_CHAIN (*p);
1779 TREE_CHAIN (*q) = NULL_TREE;
1780 q = &TREE_CHAIN (*q);
1781 }
1782 else
1783 p = &DECL_CHAIN (*p);
1784 *p = vla_fields;
1785 layout_type (ctx->record_type);
1786 fixup_child_record_type (ctx);
1787 if (ctx->srecord_type)
1788 layout_type (ctx->srecord_type);
1789 t = fold_convert_loc (loc, long_integer_type_node,
1790 TYPE_SIZE_UNIT (ctx->record_type));
1791 gimple_omp_task_set_arg_size (stmt, t);
1792 t = build_int_cst (long_integer_type_node,
1793 TYPE_ALIGN_UNIT (ctx->record_type));
1794 gimple_omp_task_set_arg_align (stmt, t);
1795 }
1796 }
1797
1798
1799 /* Scan an OpenMP loop directive. */
1800
1801 static void
1802 scan_omp_for (gimple stmt, omp_context *outer_ctx)
1803 {
1804 omp_context *ctx;
1805 size_t i;
1806
1807 ctx = new_omp_context (stmt, outer_ctx);
1808
1809 scan_sharing_clauses (gimple_omp_for_clauses (stmt), ctx);
1810
1811 scan_omp (gimple_omp_for_pre_body_ptr (stmt), ctx);
1812 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
1813 {
1814 scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx);
1815 scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx);
1816 scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx);
1817 scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx);
1818 }
1819 scan_omp (gimple_omp_body_ptr (stmt), ctx);
1820 }
1821
1822 /* Scan an OpenMP sections directive. */
1823
1824 static void
1825 scan_omp_sections (gimple stmt, omp_context *outer_ctx)
1826 {
1827 omp_context *ctx;
1828
1829 ctx = new_omp_context (stmt, outer_ctx);
1830 scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx);
1831 scan_omp (gimple_omp_body_ptr (stmt), ctx);
1832 }
1833
1834 /* Scan an OpenMP single directive. */
1835
1836 static void
1837 scan_omp_single (gimple stmt, omp_context *outer_ctx)
1838 {
1839 omp_context *ctx;
1840 tree name;
1841
1842 ctx = new_omp_context (stmt, outer_ctx);
1843 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1844 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1845 name = create_tmp_var_name (".omp_copy_s");
1846 name = build_decl (gimple_location (stmt),
1847 TYPE_DECL, name, ctx->record_type);
1848 TYPE_NAME (ctx->record_type) = name;
1849
1850 scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx);
1851 scan_omp (gimple_omp_body_ptr (stmt), ctx);
1852
1853 if (TYPE_FIELDS (ctx->record_type) == NULL)
1854 ctx->record_type = NULL;
1855 else
1856 layout_type (ctx->record_type);
1857 }
1858
1859
1860 /* Check OpenMP nesting restrictions. */
1861 static bool
1862 check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
1863 {
1864 if (ctx != NULL)
1865 {
1866 if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
1867 && gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_SIMD)
1868 {
1869 error_at (gimple_location (stmt),
1870 "OpenMP constructs may not be nested inside simd region");
1871 return false;
1872 }
1873 }
1874 switch (gimple_code (stmt))
1875 {
1876 case GIMPLE_OMP_FOR:
1877 if (gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_SIMD)
1878 return true;
1879 /* FALLTHRU */
1880 case GIMPLE_OMP_SECTIONS:
1881 case GIMPLE_OMP_SINGLE:
1882 case GIMPLE_CALL:
1883 for (; ctx != NULL; ctx = ctx->outer)
1884 switch (gimple_code (ctx->stmt))
1885 {
1886 case GIMPLE_OMP_FOR:
1887 case GIMPLE_OMP_SECTIONS:
1888 case GIMPLE_OMP_SINGLE:
1889 case GIMPLE_OMP_ORDERED:
1890 case GIMPLE_OMP_MASTER:
1891 case GIMPLE_OMP_TASK:
1892 if (is_gimple_call (stmt))
1893 {
1894 error_at (gimple_location (stmt),
1895 "barrier region may not be closely nested inside "
1896 "of work-sharing, critical, ordered, master or "
1897 "explicit task region");
1898 return false;
1899 }
1900 error_at (gimple_location (stmt),
1901 "work-sharing region may not be closely nested inside "
1902 "of work-sharing, critical, ordered, master or explicit "
1903 "task region");
1904 return false;
1905 case GIMPLE_OMP_PARALLEL:
1906 return true;
1907 default:
1908 break;
1909 }
1910 break;
1911 case GIMPLE_OMP_MASTER:
1912 for (; ctx != NULL; ctx = ctx->outer)
1913 switch (gimple_code (ctx->stmt))
1914 {
1915 case GIMPLE_OMP_FOR:
1916 case GIMPLE_OMP_SECTIONS:
1917 case GIMPLE_OMP_SINGLE:
1918 case GIMPLE_OMP_TASK:
1919 error_at (gimple_location (stmt),
1920 "master region may not be closely nested inside "
1921 "of work-sharing or explicit task region");
1922 return false;
1923 case GIMPLE_OMP_PARALLEL:
1924 return true;
1925 default:
1926 break;
1927 }
1928 break;
1929 case GIMPLE_OMP_ORDERED:
1930 for (; ctx != NULL; ctx = ctx->outer)
1931 switch (gimple_code (ctx->stmt))
1932 {
1933 case GIMPLE_OMP_CRITICAL:
1934 case GIMPLE_OMP_TASK:
1935 error_at (gimple_location (stmt),
1936 "ordered region may not be closely nested inside "
1937 "of critical or explicit task region");
1938 return false;
1939 case GIMPLE_OMP_FOR:
1940 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
1941 OMP_CLAUSE_ORDERED) == NULL)
1942 {
1943 error_at (gimple_location (stmt),
1944 "ordered region must be closely nested inside "
1945 "a loop region with an ordered clause");
1946 return false;
1947 }
1948 return true;
1949 case GIMPLE_OMP_PARALLEL:
1950 return true;
1951 default:
1952 break;
1953 }
1954 break;
1955 case GIMPLE_OMP_CRITICAL:
1956 for (; ctx != NULL; ctx = ctx->outer)
1957 if (gimple_code (ctx->stmt) == GIMPLE_OMP_CRITICAL
1958 && (gimple_omp_critical_name (stmt)
1959 == gimple_omp_critical_name (ctx->stmt)))
1960 {
1961 error_at (gimple_location (stmt),
1962 "critical region may not be nested inside a critical "
1963 "region with the same name");
1964 return false;
1965 }
1966 break;
1967 default:
1968 break;
1969 }
1970 return true;
1971 }
1972
1973
1974 /* Helper function scan_omp.
1975
1976 Callback for walk_tree or operators in walk_gimple_stmt used to
1977 scan for OpenMP directives in TP. */
1978
1979 static tree
1980 scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
1981 {
1982 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
1983 omp_context *ctx = (omp_context *) wi->info;
1984 tree t = *tp;
1985
1986 switch (TREE_CODE (t))
1987 {
1988 case VAR_DECL:
1989 case PARM_DECL:
1990 case LABEL_DECL:
1991 case RESULT_DECL:
1992 if (ctx)
1993 *tp = remap_decl (t, &ctx->cb);
1994 break;
1995
1996 default:
1997 if (ctx && TYPE_P (t))
1998 *tp = remap_type (t, &ctx->cb);
1999 else if (!DECL_P (t))
2000 {
2001 *walk_subtrees = 1;
2002 if (ctx)
2003 {
2004 tree tem = remap_type (TREE_TYPE (t), &ctx->cb);
2005 if (tem != TREE_TYPE (t))
2006 {
2007 if (TREE_CODE (t) == INTEGER_CST)
2008 *tp = build_int_cst_wide (tem,
2009 TREE_INT_CST_LOW (t),
2010 TREE_INT_CST_HIGH (t));
2011 else
2012 TREE_TYPE (t) = tem;
2013 }
2014 }
2015 }
2016 break;
2017 }
2018
2019 return NULL_TREE;
2020 }
2021
2022
2023 /* Helper function for scan_omp.
2024
2025 Callback for walk_gimple_stmt used to scan for OpenMP directives in
2026 the current statement in GSI. */
2027
2028 static tree
2029 scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
2030 struct walk_stmt_info *wi)
2031 {
2032 gimple stmt = gsi_stmt (*gsi);
2033 omp_context *ctx = (omp_context *) wi->info;
2034
2035 if (gimple_has_location (stmt))
2036 input_location = gimple_location (stmt);
2037
2038 /* Check the OpenMP nesting restrictions. */
2039 if (ctx != NULL)
2040 {
2041 bool remove = false;
2042 if (is_gimple_omp (stmt))
2043 remove = !check_omp_nesting_restrictions (stmt, ctx);
2044 else if (is_gimple_call (stmt))
2045 {
2046 tree fndecl = gimple_call_fndecl (stmt);
2047 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
2048 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
2049 remove = !check_omp_nesting_restrictions (stmt, ctx);
2050 }
2051 if (remove)
2052 {
2053 stmt = gimple_build_nop ();
2054 gsi_replace (gsi, stmt, false);
2055 }
2056 }
2057
2058 *handled_ops_p = true;
2059
2060 switch (gimple_code (stmt))
2061 {
2062 case GIMPLE_OMP_PARALLEL:
2063 taskreg_nesting_level++;
2064 scan_omp_parallel (gsi, ctx);
2065 taskreg_nesting_level--;
2066 break;
2067
2068 case GIMPLE_OMP_TASK:
2069 taskreg_nesting_level++;
2070 scan_omp_task (gsi, ctx);
2071 taskreg_nesting_level--;
2072 break;
2073
2074 case GIMPLE_OMP_FOR:
2075 scan_omp_for (stmt, ctx);
2076 break;
2077
2078 case GIMPLE_OMP_SECTIONS:
2079 scan_omp_sections (stmt, ctx);
2080 break;
2081
2082 case GIMPLE_OMP_SINGLE:
2083 scan_omp_single (stmt, ctx);
2084 break;
2085
2086 case GIMPLE_OMP_SECTION:
2087 case GIMPLE_OMP_MASTER:
2088 case GIMPLE_OMP_ORDERED:
2089 case GIMPLE_OMP_CRITICAL:
2090 ctx = new_omp_context (stmt, ctx);
2091 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2092 break;
2093
2094 case GIMPLE_BIND:
2095 {
2096 tree var;
2097
2098 *handled_ops_p = false;
2099 if (ctx)
2100 for (var = gimple_bind_vars (stmt); var ; var = DECL_CHAIN (var))
2101 insert_decl_map (&ctx->cb, var, var);
2102 }
2103 break;
2104 default:
2105 *handled_ops_p = false;
2106 break;
2107 }
2108
2109 return NULL_TREE;
2110 }
2111
2112
2113 /* Scan all the statements starting at the current statement. CTX
2114 contains context information about the OpenMP directives and
2115 clauses found during the scan. */
2116
2117 static void
2118 scan_omp (gimple_seq *body_p, omp_context *ctx)
2119 {
2120 location_t saved_location;
2121 struct walk_stmt_info wi;
2122
2123 memset (&wi, 0, sizeof (wi));
2124 wi.info = ctx;
2125 wi.want_locations = true;
2126
2127 saved_location = input_location;
2128 walk_gimple_seq_mod (body_p, scan_omp_1_stmt, scan_omp_1_op, &wi);
2129 input_location = saved_location;
2130 }
2131 \f
2132 /* Re-gimplification and code generation routines. */
2133
2134 /* Build a call to GOMP_barrier. */
2135
2136 static tree
2137 build_omp_barrier (void)
2138 {
2139 return build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_BARRIER), 0);
2140 }
2141
2142 /* If a context was created for STMT when it was scanned, return it. */
2143
2144 static omp_context *
2145 maybe_lookup_ctx (gimple stmt)
2146 {
2147 splay_tree_node n;
2148 n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
2149 return n ? (omp_context *) n->value : NULL;
2150 }
2151
2152
2153 /* Find the mapping for DECL in CTX or the immediately enclosing
2154 context that has a mapping for DECL.
2155
2156 If CTX is a nested parallel directive, we may have to use the decl
2157 mappings created in CTX's parent context. Suppose that we have the
2158 following parallel nesting (variable UIDs showed for clarity):
2159
2160 iD.1562 = 0;
2161 #omp parallel shared(iD.1562) -> outer parallel
2162 iD.1562 = iD.1562 + 1;
2163
2164 #omp parallel shared (iD.1562) -> inner parallel
2165 iD.1562 = iD.1562 - 1;
2166
2167 Each parallel structure will create a distinct .omp_data_s structure
2168 for copying iD.1562 in/out of the directive:
2169
2170 outer parallel .omp_data_s.1.i -> iD.1562
2171 inner parallel .omp_data_s.2.i -> iD.1562
2172
2173 A shared variable mapping will produce a copy-out operation before
2174 the parallel directive and a copy-in operation after it. So, in
2175 this case we would have:
2176
2177 iD.1562 = 0;
2178 .omp_data_o.1.i = iD.1562;
2179 #omp parallel shared(iD.1562) -> outer parallel
2180 .omp_data_i.1 = &.omp_data_o.1
2181 .omp_data_i.1->i = .omp_data_i.1->i + 1;
2182
2183 .omp_data_o.2.i = iD.1562; -> **
2184 #omp parallel shared(iD.1562) -> inner parallel
2185 .omp_data_i.2 = &.omp_data_o.2
2186 .omp_data_i.2->i = .omp_data_i.2->i - 1;
2187
2188
2189 ** This is a problem. The symbol iD.1562 cannot be referenced
2190 inside the body of the outer parallel region. But since we are
2191 emitting this copy operation while expanding the inner parallel
2192 directive, we need to access the CTX structure of the outer
2193 parallel directive to get the correct mapping:
2194
2195 .omp_data_o.2.i = .omp_data_i.1->i
2196
2197 Since there may be other workshare or parallel directives enclosing
2198 the parallel directive, it may be necessary to walk up the context
2199 parent chain. This is not a problem in general because nested
2200 parallelism happens only rarely. */
2201
2202 static tree
2203 lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2204 {
2205 tree t;
2206 omp_context *up;
2207
2208 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2209 t = maybe_lookup_decl (decl, up);
2210
2211 gcc_assert (!ctx->is_nested || t || is_global_var (decl));
2212
2213 return t ? t : decl;
2214 }
2215
2216
2217 /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
2218 in outer contexts. */
2219
2220 static tree
2221 maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2222 {
2223 tree t = NULL;
2224 omp_context *up;
2225
2226 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2227 t = maybe_lookup_decl (decl, up);
2228
2229 return t ? t : decl;
2230 }
2231
2232
2233 /* Construct the initialization value for reduction CLAUSE. */
2234
2235 tree
2236 omp_reduction_init (tree clause, tree type)
2237 {
2238 location_t loc = OMP_CLAUSE_LOCATION (clause);
2239 switch (OMP_CLAUSE_REDUCTION_CODE (clause))
2240 {
2241 case PLUS_EXPR:
2242 case MINUS_EXPR:
2243 case BIT_IOR_EXPR:
2244 case BIT_XOR_EXPR:
2245 case TRUTH_OR_EXPR:
2246 case TRUTH_ORIF_EXPR:
2247 case TRUTH_XOR_EXPR:
2248 case NE_EXPR:
2249 return build_zero_cst (type);
2250
2251 case MULT_EXPR:
2252 case TRUTH_AND_EXPR:
2253 case TRUTH_ANDIF_EXPR:
2254 case EQ_EXPR:
2255 return fold_convert_loc (loc, type, integer_one_node);
2256
2257 case BIT_AND_EXPR:
2258 return fold_convert_loc (loc, type, integer_minus_one_node);
2259
2260 case MAX_EXPR:
2261 if (SCALAR_FLOAT_TYPE_P (type))
2262 {
2263 REAL_VALUE_TYPE max, min;
2264 if (HONOR_INFINITIES (TYPE_MODE (type)))
2265 {
2266 real_inf (&max);
2267 real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
2268 }
2269 else
2270 real_maxval (&min, 1, TYPE_MODE (type));
2271 return build_real (type, min);
2272 }
2273 else
2274 {
2275 gcc_assert (INTEGRAL_TYPE_P (type));
2276 return TYPE_MIN_VALUE (type);
2277 }
2278
2279 case MIN_EXPR:
2280 if (SCALAR_FLOAT_TYPE_P (type))
2281 {
2282 REAL_VALUE_TYPE max;
2283 if (HONOR_INFINITIES (TYPE_MODE (type)))
2284 real_inf (&max);
2285 else
2286 real_maxval (&max, 0, TYPE_MODE (type));
2287 return build_real (type, max);
2288 }
2289 else
2290 {
2291 gcc_assert (INTEGRAL_TYPE_P (type));
2292 return TYPE_MAX_VALUE (type);
2293 }
2294
2295 default:
2296 gcc_unreachable ();
2297 }
2298 }
2299
2300 /* Return maximum possible vectorization factor for the target. */
2301
2302 static int
2303 omp_max_vf (void)
2304 {
2305 if (!optimize
2306 || optimize_debug
2307 || (!flag_tree_vectorize
2308 && global_options_set.x_flag_tree_vectorize))
2309 return 1;
2310
2311 int vs = targetm.vectorize.autovectorize_vector_sizes ();
2312 if (vs)
2313 {
2314 vs = 1 << floor_log2 (vs);
2315 return vs;
2316 }
2317 enum machine_mode vqimode = targetm.vectorize.preferred_simd_mode (QImode);
2318 if (GET_MODE_CLASS (vqimode) == MODE_VECTOR_INT)
2319 return GET_MODE_NUNITS (vqimode);
2320 return 1;
2321 }
2322
2323 /* Helper function of lower_rec_input_clauses, used for #pragma omp simd
2324 privatization. */
2325
2326 static bool
2327 lower_rec_simd_input_clauses (tree new_var, omp_context *ctx, int &max_vf,
2328 tree &idx, tree &lane, tree &ivar, tree &lvar)
2329 {
2330 if (max_vf == 0)
2331 {
2332 max_vf = omp_max_vf ();
2333 if (max_vf > 1)
2334 {
2335 tree c = find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
2336 OMP_CLAUSE_SAFELEN);
2337 if (c
2338 && compare_tree_int (OMP_CLAUSE_SAFELEN_EXPR (c), max_vf) == -1)
2339 max_vf = tree_low_cst (OMP_CLAUSE_SAFELEN_EXPR (c), 0);
2340 }
2341 if (max_vf > 1)
2342 {
2343 idx = create_tmp_var (unsigned_type_node, NULL);
2344 lane = create_tmp_var (unsigned_type_node, NULL);
2345 }
2346 }
2347 if (max_vf == 1)
2348 return false;
2349
2350 tree atype = build_array_type_nelts (TREE_TYPE (new_var), max_vf);
2351 tree avar = create_tmp_var_raw (atype, NULL);
2352 if (TREE_ADDRESSABLE (new_var))
2353 TREE_ADDRESSABLE (avar) = 1;
2354 DECL_ATTRIBUTES (avar)
2355 = tree_cons (get_identifier ("omp simd array"), NULL,
2356 DECL_ATTRIBUTES (avar));
2357 gimple_add_tmp_var (avar);
2358 ivar = build4 (ARRAY_REF, TREE_TYPE (new_var), avar, idx,
2359 NULL_TREE, NULL_TREE);
2360 lvar = build4 (ARRAY_REF, TREE_TYPE (new_var), avar, lane,
2361 NULL_TREE, NULL_TREE);
2362 SET_DECL_VALUE_EXPR (new_var, lvar);
2363 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2364 return true;
2365 }
2366
2367 /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
2368 from the receiver (aka child) side and initializers for REFERENCE_TYPE
2369 private variables. Initialization statements go in ILIST, while calls
2370 to destructors go in DLIST. */
2371
2372 static void
2373 lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
2374 omp_context *ctx)
2375 {
2376 tree c, dtor, copyin_seq, x, ptr;
2377 bool copyin_by_ref = false;
2378 bool lastprivate_firstprivate = false;
2379 int pass;
2380 bool is_simd = (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
2381 && gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_SIMD);
2382 int max_vf = 0;
2383 tree lane = NULL_TREE, idx = NULL_TREE;
2384 tree ivar = NULL_TREE, lvar = NULL_TREE;
2385 gimple_seq llist[2] = { NULL, NULL };
2386
2387 copyin_seq = NULL;
2388
2389 /* Set max_vf=1 (which will later enforce safelen=1) in simd loops
2390 with data sharing clauses referencing variable sized vars. That
2391 is unnecessarily hard to support and very unlikely to result in
2392 vectorized code anyway. */
2393 if (is_simd)
2394 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2395 switch (OMP_CLAUSE_CODE (c))
2396 {
2397 case OMP_CLAUSE_REDUCTION:
2398 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2399 max_vf = 1;
2400 /* FALLTHRU */
2401 case OMP_CLAUSE_PRIVATE:
2402 case OMP_CLAUSE_FIRSTPRIVATE:
2403 case OMP_CLAUSE_LASTPRIVATE:
2404 case OMP_CLAUSE_LINEAR:
2405 if (is_variable_sized (OMP_CLAUSE_DECL (c)))
2406 max_vf = 1;
2407 break;
2408 default:
2409 continue;
2410 }
2411
2412 /* Do all the fixed sized types in the first pass, and the variable sized
2413 types in the second pass. This makes sure that the scalar arguments to
2414 the variable sized types are processed before we use them in the
2415 variable sized operations. */
2416 for (pass = 0; pass < 2; ++pass)
2417 {
2418 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2419 {
2420 enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
2421 tree var, new_var;
2422 bool by_ref;
2423 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2424
2425 switch (c_kind)
2426 {
2427 case OMP_CLAUSE_PRIVATE:
2428 if (OMP_CLAUSE_PRIVATE_DEBUG (c))
2429 continue;
2430 break;
2431 case OMP_CLAUSE_SHARED:
2432 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
2433 {
2434 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
2435 continue;
2436 }
2437 case OMP_CLAUSE_FIRSTPRIVATE:
2438 case OMP_CLAUSE_COPYIN:
2439 case OMP_CLAUSE_REDUCTION:
2440 break;
2441 case OMP_CLAUSE_LINEAR:
2442 break;
2443 case OMP_CLAUSE_LASTPRIVATE:
2444 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2445 {
2446 lastprivate_firstprivate = true;
2447 if (pass != 0)
2448 continue;
2449 }
2450 break;
2451 default:
2452 continue;
2453 }
2454
2455 new_var = var = OMP_CLAUSE_DECL (c);
2456 if (c_kind != OMP_CLAUSE_COPYIN)
2457 new_var = lookup_decl (var, ctx);
2458
2459 if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
2460 {
2461 if (pass != 0)
2462 continue;
2463 }
2464 else if (is_variable_sized (var))
2465 {
2466 /* For variable sized types, we need to allocate the
2467 actual storage here. Call alloca and store the
2468 result in the pointer decl that we created elsewhere. */
2469 if (pass == 0)
2470 continue;
2471
2472 if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
2473 {
2474 gimple stmt;
2475 tree tmp, atmp;
2476
2477 ptr = DECL_VALUE_EXPR (new_var);
2478 gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
2479 ptr = TREE_OPERAND (ptr, 0);
2480 gcc_assert (DECL_P (ptr));
2481 x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
2482
2483 /* void *tmp = __builtin_alloca */
2484 atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
2485 stmt = gimple_build_call (atmp, 1, x);
2486 tmp = create_tmp_var_raw (ptr_type_node, NULL);
2487 gimple_add_tmp_var (tmp);
2488 gimple_call_set_lhs (stmt, tmp);
2489
2490 gimple_seq_add_stmt (ilist, stmt);
2491
2492 x = fold_convert_loc (clause_loc, TREE_TYPE (ptr), tmp);
2493 gimplify_assign (ptr, x, ilist);
2494 }
2495 }
2496 else if (is_reference (var))
2497 {
2498 /* For references that are being privatized for Fortran,
2499 allocate new backing storage for the new pointer
2500 variable. This allows us to avoid changing all the
2501 code that expects a pointer to something that expects
2502 a direct variable. Note that this doesn't apply to
2503 C++, since reference types are disallowed in data
2504 sharing clauses there, except for NRV optimized
2505 return values. */
2506 if (pass == 0)
2507 continue;
2508
2509 x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
2510 if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
2511 {
2512 x = build_receiver_ref (var, false, ctx);
2513 x = build_fold_addr_expr_loc (clause_loc, x);
2514 }
2515 else if (TREE_CONSTANT (x))
2516 {
2517 const char *name = NULL;
2518 if (DECL_NAME (var))
2519 name = IDENTIFIER_POINTER (DECL_NAME (new_var));
2520
2521 x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
2522 name);
2523 gimple_add_tmp_var (x);
2524 TREE_ADDRESSABLE (x) = 1;
2525 x = build_fold_addr_expr_loc (clause_loc, x);
2526 }
2527 else
2528 {
2529 tree atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
2530 x = build_call_expr_loc (clause_loc, atmp, 1, x);
2531 }
2532
2533 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
2534 gimplify_assign (new_var, x, ilist);
2535
2536 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2537 }
2538 else if (c_kind == OMP_CLAUSE_REDUCTION
2539 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2540 {
2541 if (pass == 0)
2542 continue;
2543 }
2544 else if (pass != 0)
2545 continue;
2546
2547 switch (OMP_CLAUSE_CODE (c))
2548 {
2549 case OMP_CLAUSE_SHARED:
2550 /* Shared global vars are just accessed directly. */
2551 if (is_global_var (new_var))
2552 break;
2553 /* Set up the DECL_VALUE_EXPR for shared variables now. This
2554 needs to be delayed until after fixup_child_record_type so
2555 that we get the correct type during the dereference. */
2556 by_ref = use_pointer_for_field (var, ctx);
2557 x = build_receiver_ref (var, by_ref, ctx);
2558 SET_DECL_VALUE_EXPR (new_var, x);
2559 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2560
2561 /* ??? If VAR is not passed by reference, and the variable
2562 hasn't been initialized yet, then we'll get a warning for
2563 the store into the omp_data_s structure. Ideally, we'd be
2564 able to notice this and not store anything at all, but
2565 we're generating code too early. Suppress the warning. */
2566 if (!by_ref)
2567 TREE_NO_WARNING (var) = 1;
2568 break;
2569
2570 case OMP_CLAUSE_LASTPRIVATE:
2571 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2572 break;
2573 /* FALLTHRU */
2574
2575 case OMP_CLAUSE_PRIVATE:
2576 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE)
2577 x = build_outer_var_ref (var, ctx);
2578 else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2579 {
2580 if (is_task_ctx (ctx))
2581 x = build_receiver_ref (var, false, ctx);
2582 else
2583 x = build_outer_var_ref (var, ctx);
2584 }
2585 else
2586 x = NULL;
2587 do_private:
2588 x = lang_hooks.decls.omp_clause_default_ctor (c, new_var, x);
2589 if (is_simd)
2590 {
2591 tree y = lang_hooks.decls.omp_clause_dtor (c, new_var);
2592 if ((TREE_ADDRESSABLE (new_var) || x || y
2593 || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
2594 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
2595 idx, lane, ivar, lvar))
2596 {
2597 if (x)
2598 x = lang_hooks.decls.omp_clause_default_ctor
2599 (c, unshare_expr (ivar), x);
2600 if (x)
2601 gimplify_and_add (x, &llist[0]);
2602 if (y)
2603 {
2604 y = lang_hooks.decls.omp_clause_dtor (c, ivar);
2605 if (y)
2606 {
2607 gimple_seq tseq = NULL;
2608
2609 dtor = y;
2610 gimplify_stmt (&dtor, &tseq);
2611 gimple_seq_add_seq (&llist[1], tseq);
2612 }
2613 }
2614 break;
2615 }
2616 }
2617 if (x)
2618 gimplify_and_add (x, ilist);
2619 /* FALLTHRU */
2620
2621 do_dtor:
2622 x = lang_hooks.decls.omp_clause_dtor (c, new_var);
2623 if (x)
2624 {
2625 gimple_seq tseq = NULL;
2626
2627 dtor = x;
2628 gimplify_stmt (&dtor, &tseq);
2629 gimple_seq_add_seq (dlist, tseq);
2630 }
2631 break;
2632
2633 case OMP_CLAUSE_LINEAR:
2634 if (!OMP_CLAUSE_LINEAR_NO_COPYIN (c))
2635 goto do_firstprivate;
2636 if (OMP_CLAUSE_LINEAR_NO_COPYOUT (c))
2637 x = NULL;
2638 else
2639 x = build_outer_var_ref (var, ctx);
2640 goto do_private;
2641
2642 case OMP_CLAUSE_FIRSTPRIVATE:
2643 if (is_task_ctx (ctx))
2644 {
2645 if (is_reference (var) || is_variable_sized (var))
2646 goto do_dtor;
2647 else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var,
2648 ctx))
2649 || use_pointer_for_field (var, NULL))
2650 {
2651 x = build_receiver_ref (var, false, ctx);
2652 SET_DECL_VALUE_EXPR (new_var, x);
2653 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2654 goto do_dtor;
2655 }
2656 }
2657 do_firstprivate:
2658 x = build_outer_var_ref (var, ctx);
2659 if (is_simd)
2660 {
2661 if ((OMP_CLAUSE_CODE (c) != OMP_CLAUSE_LINEAR
2662 || TREE_ADDRESSABLE (new_var))
2663 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
2664 idx, lane, ivar, lvar))
2665 {
2666 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR)
2667 {
2668 tree iv = create_tmp_var (TREE_TYPE (new_var), NULL);
2669 x = lang_hooks.decls.omp_clause_copy_ctor (c, iv, x);
2670 gimplify_and_add (x, ilist);
2671 gimple_stmt_iterator gsi
2672 = gsi_start_1 (gimple_omp_body_ptr (ctx->stmt));
2673 gimple g
2674 = gimple_build_assign (unshare_expr (lvar), iv);
2675 gsi_insert_before_without_update (&gsi, g,
2676 GSI_SAME_STMT);
2677 tree stept = POINTER_TYPE_P (TREE_TYPE (x))
2678 ? sizetype : TREE_TYPE (x);
2679 tree t = fold_convert (stept,
2680 OMP_CLAUSE_LINEAR_STEP (c));
2681 enum tree_code code = PLUS_EXPR;
2682 if (POINTER_TYPE_P (TREE_TYPE (new_var)))
2683 code = POINTER_PLUS_EXPR;
2684 g = gimple_build_assign_with_ops (code, iv, iv, t);
2685 gsi_insert_before_without_update (&gsi, g,
2686 GSI_SAME_STMT);
2687 break;
2688 }
2689 x = lang_hooks.decls.omp_clause_copy_ctor
2690 (c, unshare_expr (ivar), x);
2691 gimplify_and_add (x, &llist[0]);
2692 x = lang_hooks.decls.omp_clause_dtor (c, ivar);
2693 if (x)
2694 {
2695 gimple_seq tseq = NULL;
2696
2697 dtor = x;
2698 gimplify_stmt (&dtor, &tseq);
2699 gimple_seq_add_seq (&llist[1], tseq);
2700 }
2701 break;
2702 }
2703 }
2704 x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x);
2705 gimplify_and_add (x, ilist);
2706 goto do_dtor;
2707
2708 case OMP_CLAUSE_COPYIN:
2709 by_ref = use_pointer_for_field (var, NULL);
2710 x = build_receiver_ref (var, by_ref, ctx);
2711 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
2712 append_to_statement_list (x, &copyin_seq);
2713 copyin_by_ref |= by_ref;
2714 break;
2715
2716 case OMP_CLAUSE_REDUCTION:
2717 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2718 {
2719 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2720 x = build_outer_var_ref (var, ctx);
2721
2722 /* FIXME: Not handled yet. */
2723 gcc_assert (!is_simd);
2724 if (is_reference (var))
2725 x = build_fold_addr_expr_loc (clause_loc, x);
2726 SET_DECL_VALUE_EXPR (placeholder, x);
2727 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2728 lower_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
2729 gimple_seq_add_seq (ilist,
2730 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c));
2731 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
2732 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
2733 }
2734 else
2735 {
2736 x = omp_reduction_init (c, TREE_TYPE (new_var));
2737 gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
2738 if (is_simd
2739 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
2740 idx, lane, ivar, lvar))
2741 {
2742 enum tree_code code = OMP_CLAUSE_REDUCTION_CODE (c);
2743 tree ref = build_outer_var_ref (var, ctx);
2744
2745 gimplify_assign (unshare_expr (ivar), x, &llist[0]);
2746
2747 /* reduction(-:var) sums up the partial results, so it
2748 acts identically to reduction(+:var). */
2749 if (code == MINUS_EXPR)
2750 code = PLUS_EXPR;
2751
2752 x = build2 (code, TREE_TYPE (ref), ref, ivar);
2753 ref = build_outer_var_ref (var, ctx);
2754 gimplify_assign (ref, x, &llist[1]);
2755 }
2756 else
2757 {
2758 gimplify_assign (new_var, x, ilist);
2759 if (is_simd)
2760 gimplify_assign (build_outer_var_ref (var, ctx),
2761 new_var, dlist);
2762 }
2763 }
2764 break;
2765
2766 default:
2767 gcc_unreachable ();
2768 }
2769 }
2770 }
2771
2772 if (lane)
2773 {
2774 tree uid = create_tmp_var (ptr_type_node, "simduid");
2775 gimple g
2776 = gimple_build_call_internal (IFN_GOMP_SIMD_LANE, 1, uid);
2777 gimple_call_set_lhs (g, lane);
2778 gimple_stmt_iterator gsi = gsi_start_1 (gimple_omp_body_ptr (ctx->stmt));
2779 gsi_insert_before_without_update (&gsi, g, GSI_SAME_STMT);
2780 c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__SIMDUID_);
2781 OMP_CLAUSE__SIMDUID__DECL (c) = uid;
2782 OMP_CLAUSE_CHAIN (c) = gimple_omp_for_clauses (ctx->stmt);
2783 gimple_omp_for_set_clauses (ctx->stmt, c);
2784 g = gimple_build_assign_with_ops (INTEGER_CST, lane,
2785 build_int_cst (unsigned_type_node, 0),
2786 NULL_TREE);
2787 gimple_seq_add_stmt (ilist, g);
2788 for (int i = 0; i < 2; i++)
2789 if (llist[i])
2790 {
2791 tree vf = create_tmp_var (unsigned_type_node, NULL);
2792 g = gimple_build_call_internal (IFN_GOMP_SIMD_VF, 1, uid);
2793 gimple_call_set_lhs (g, vf);
2794 gimple_seq *seq = i == 0 ? ilist : dlist;
2795 gimple_seq_add_stmt (seq, g);
2796 tree t = build_int_cst (unsigned_type_node, 0);
2797 g = gimple_build_assign_with_ops (INTEGER_CST, idx, t, NULL_TREE);
2798 gimple_seq_add_stmt (seq, g);
2799 tree body = create_artificial_label (UNKNOWN_LOCATION);
2800 tree header = create_artificial_label (UNKNOWN_LOCATION);
2801 tree end = create_artificial_label (UNKNOWN_LOCATION);
2802 gimple_seq_add_stmt (seq, gimple_build_goto (header));
2803 gimple_seq_add_stmt (seq, gimple_build_label (body));
2804 gimple_seq_add_seq (seq, llist[i]);
2805 t = build_int_cst (unsigned_type_node, 1);
2806 g = gimple_build_assign_with_ops (PLUS_EXPR, idx, idx, t);
2807 gimple_seq_add_stmt (seq, g);
2808 gimple_seq_add_stmt (seq, gimple_build_label (header));
2809 g = gimple_build_cond (LT_EXPR, idx, vf, body, end);
2810 gimple_seq_add_stmt (seq, g);
2811 gimple_seq_add_stmt (seq, gimple_build_label (end));
2812 }
2813 }
2814
2815 /* The copyin sequence is not to be executed by the main thread, since
2816 that would result in self-copies. Perhaps not visible to scalars,
2817 but it certainly is to C++ operator=. */
2818 if (copyin_seq)
2819 {
2820 x = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM),
2821 0);
2822 x = build2 (NE_EXPR, boolean_type_node, x,
2823 build_int_cst (TREE_TYPE (x), 0));
2824 x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
2825 gimplify_and_add (x, ilist);
2826 }
2827
2828 /* If any copyin variable is passed by reference, we must ensure the
2829 master thread doesn't modify it before it is copied over in all
2830 threads. Similarly for variables in both firstprivate and
2831 lastprivate clauses we need to ensure the lastprivate copying
2832 happens after firstprivate copying in all threads. */
2833 if (copyin_by_ref || lastprivate_firstprivate)
2834 {
2835 /* Don't add any barrier for #pragma omp simd or
2836 #pragma omp distribute. */
2837 if (gimple_code (ctx->stmt) != GIMPLE_OMP_FOR
2838 || gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_FOR)
2839 gimplify_and_add (build_omp_barrier (), ilist);
2840 }
2841
2842 /* If max_vf is non-zero, then we can use only a vectorization factor
2843 up to the max_vf we chose. So stick it into the safelen clause. */
2844 if (max_vf)
2845 {
2846 tree c = find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
2847 OMP_CLAUSE_SAFELEN);
2848 if (c == NULL_TREE
2849 || compare_tree_int (OMP_CLAUSE_SAFELEN_EXPR (c),
2850 max_vf) == 1)
2851 {
2852 c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE_SAFELEN);
2853 OMP_CLAUSE_SAFELEN_EXPR (c) = build_int_cst (integer_type_node,
2854 max_vf);
2855 OMP_CLAUSE_CHAIN (c) = gimple_omp_for_clauses (ctx->stmt);
2856 gimple_omp_for_set_clauses (ctx->stmt, c);
2857 }
2858 }
2859 }
2860
2861
2862 /* Generate code to implement the LASTPRIVATE clauses. This is used for
2863 both parallel and workshare constructs. PREDICATE may be NULL if it's
2864 always true. */
2865
2866 static void
2867 lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *stmt_list,
2868 omp_context *ctx)
2869 {
2870 tree x, c, label = NULL, orig_clauses = clauses;
2871 bool par_clauses = false;
2872 tree simduid = NULL, lastlane = NULL;
2873
2874 /* Early exit if there are no lastprivate or linear clauses. */
2875 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
2876 if (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_LASTPRIVATE
2877 || (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_LINEAR
2878 && !OMP_CLAUSE_LINEAR_NO_COPYOUT (clauses)))
2879 break;
2880 if (clauses == NULL)
2881 {
2882 /* If this was a workshare clause, see if it had been combined
2883 with its parallel. In that case, look for the clauses on the
2884 parallel statement itself. */
2885 if (is_parallel_ctx (ctx))
2886 return;
2887
2888 ctx = ctx->outer;
2889 if (ctx == NULL || !is_parallel_ctx (ctx))
2890 return;
2891
2892 clauses = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2893 OMP_CLAUSE_LASTPRIVATE);
2894 if (clauses == NULL)
2895 return;
2896 par_clauses = true;
2897 }
2898
2899 if (predicate)
2900 {
2901 gimple stmt;
2902 tree label_true, arm1, arm2;
2903
2904 label = create_artificial_label (UNKNOWN_LOCATION);
2905 label_true = create_artificial_label (UNKNOWN_LOCATION);
2906 arm1 = TREE_OPERAND (predicate, 0);
2907 arm2 = TREE_OPERAND (predicate, 1);
2908 gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
2909 gimplify_expr (&arm2, stmt_list, NULL, is_gimple_val, fb_rvalue);
2910 stmt = gimple_build_cond (TREE_CODE (predicate), arm1, arm2,
2911 label_true, label);
2912 gimple_seq_add_stmt (stmt_list, stmt);
2913 gimple_seq_add_stmt (stmt_list, gimple_build_label (label_true));
2914 }
2915
2916 if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
2917 && gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_SIMD)
2918 {
2919 simduid = find_omp_clause (orig_clauses, OMP_CLAUSE__SIMDUID_);
2920 if (simduid)
2921 simduid = OMP_CLAUSE__SIMDUID__DECL (simduid);
2922 }
2923
2924 for (c = clauses; c ;)
2925 {
2926 tree var, new_var;
2927 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2928
2929 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
2930 || (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
2931 && !OMP_CLAUSE_LINEAR_NO_COPYOUT (c)))
2932 {
2933 var = OMP_CLAUSE_DECL (c);
2934 new_var = lookup_decl (var, ctx);
2935
2936 if (simduid && DECL_HAS_VALUE_EXPR_P (new_var))
2937 {
2938 tree val = DECL_VALUE_EXPR (new_var);
2939 if (TREE_CODE (val) == ARRAY_REF
2940 && VAR_P (TREE_OPERAND (val, 0))
2941 && lookup_attribute ("omp simd array",
2942 DECL_ATTRIBUTES (TREE_OPERAND (val,
2943 0))))
2944 {
2945 if (lastlane == NULL)
2946 {
2947 lastlane = create_tmp_var (unsigned_type_node, NULL);
2948 gimple g
2949 = gimple_build_call_internal (IFN_GOMP_SIMD_LAST_LANE,
2950 2, simduid,
2951 TREE_OPERAND (val, 1));
2952 gimple_call_set_lhs (g, lastlane);
2953 gimple_seq_add_stmt (stmt_list, g);
2954 }
2955 new_var = build4 (ARRAY_REF, TREE_TYPE (val),
2956 TREE_OPERAND (val, 0), lastlane,
2957 NULL_TREE, NULL_TREE);
2958 }
2959 }
2960
2961 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
2962 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
2963 {
2964 lower_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
2965 gimple_seq_add_seq (stmt_list,
2966 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
2967 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) = NULL;
2968 }
2969
2970 x = build_outer_var_ref (var, ctx);
2971 if (is_reference (var))
2972 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2973 x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
2974 gimplify_and_add (x, stmt_list);
2975 }
2976 c = OMP_CLAUSE_CHAIN (c);
2977 if (c == NULL && !par_clauses)
2978 {
2979 /* If this was a workshare clause, see if it had been combined
2980 with its parallel. In that case, continue looking for the
2981 clauses also on the parallel statement itself. */
2982 if (is_parallel_ctx (ctx))
2983 break;
2984
2985 ctx = ctx->outer;
2986 if (ctx == NULL || !is_parallel_ctx (ctx))
2987 break;
2988
2989 c = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2990 OMP_CLAUSE_LASTPRIVATE);
2991 par_clauses = true;
2992 }
2993 }
2994
2995 if (label)
2996 gimple_seq_add_stmt (stmt_list, gimple_build_label (label));
2997 }
2998
2999
3000 /* Generate code to implement the REDUCTION clauses. */
3001
3002 static void
3003 lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
3004 {
3005 gimple_seq sub_seq = NULL;
3006 gimple stmt;
3007 tree x, c;
3008 int count = 0;
3009
3010 /* SIMD reductions are handled in lower_rec_input_clauses. */
3011 if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
3012 && gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_SIMD)
3013 return;
3014
3015 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
3016 update in that case, otherwise use a lock. */
3017 for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
3018 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
3019 {
3020 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
3021 {
3022 /* Never use OMP_ATOMIC for array reductions. */
3023 count = -1;
3024 break;
3025 }
3026 count++;
3027 }
3028
3029 if (count == 0)
3030 return;
3031
3032 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
3033 {
3034 tree var, ref, new_var;
3035 enum tree_code code;
3036 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
3037
3038 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
3039 continue;
3040
3041 var = OMP_CLAUSE_DECL (c);
3042 new_var = lookup_decl (var, ctx);
3043 if (is_reference (var))
3044 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
3045 ref = build_outer_var_ref (var, ctx);
3046 code = OMP_CLAUSE_REDUCTION_CODE (c);
3047
3048 /* reduction(-:var) sums up the partial results, so it acts
3049 identically to reduction(+:var). */
3050 if (code == MINUS_EXPR)
3051 code = PLUS_EXPR;
3052
3053 if (count == 1)
3054 {
3055 tree addr = build_fold_addr_expr_loc (clause_loc, ref);
3056
3057 addr = save_expr (addr);
3058 ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
3059 x = fold_build2_loc (clause_loc, code, TREE_TYPE (ref), ref, new_var);
3060 x = build2 (OMP_ATOMIC, void_type_node, addr, x);
3061 gimplify_and_add (x, stmt_seqp);
3062 return;
3063 }
3064
3065 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
3066 {
3067 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
3068
3069 if (is_reference (var))
3070 ref = build_fold_addr_expr_loc (clause_loc, ref);
3071 SET_DECL_VALUE_EXPR (placeholder, ref);
3072 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
3073 lower_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
3074 gimple_seq_add_seq (&sub_seq, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
3075 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
3076 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
3077 }
3078 else
3079 {
3080 x = build2 (code, TREE_TYPE (ref), ref, new_var);
3081 ref = build_outer_var_ref (var, ctx);
3082 gimplify_assign (ref, x, &sub_seq);
3083 }
3084 }
3085
3086 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START),
3087 0);
3088 gimple_seq_add_stmt (stmt_seqp, stmt);
3089
3090 gimple_seq_add_seq (stmt_seqp, sub_seq);
3091
3092 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END),
3093 0);
3094 gimple_seq_add_stmt (stmt_seqp, stmt);
3095 }
3096
3097
3098 /* Generate code to implement the COPYPRIVATE clauses. */
3099
3100 static void
3101 lower_copyprivate_clauses (tree clauses, gimple_seq *slist, gimple_seq *rlist,
3102 omp_context *ctx)
3103 {
3104 tree c;
3105
3106 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
3107 {
3108 tree var, new_var, ref, x;
3109 bool by_ref;
3110 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
3111
3112 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
3113 continue;
3114
3115 var = OMP_CLAUSE_DECL (c);
3116 by_ref = use_pointer_for_field (var, NULL);
3117
3118 ref = build_sender_ref (var, ctx);
3119 x = new_var = lookup_decl_in_outer_ctx (var, ctx);
3120 if (by_ref)
3121 {
3122 x = build_fold_addr_expr_loc (clause_loc, new_var);
3123 x = fold_convert_loc (clause_loc, TREE_TYPE (ref), x);
3124 }
3125 gimplify_assign (ref, x, slist);
3126
3127 ref = build_receiver_ref (var, false, ctx);
3128 if (by_ref)
3129 {
3130 ref = fold_convert_loc (clause_loc,
3131 build_pointer_type (TREE_TYPE (new_var)),
3132 ref);
3133 ref = build_fold_indirect_ref_loc (clause_loc, ref);
3134 }
3135 if (is_reference (var))
3136 {
3137 ref = fold_convert_loc (clause_loc, TREE_TYPE (new_var), ref);
3138 ref = build_simple_mem_ref_loc (clause_loc, ref);
3139 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
3140 }
3141 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, ref);
3142 gimplify_and_add (x, rlist);
3143 }
3144 }
3145
3146
3147 /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
3148 and REDUCTION from the sender (aka parent) side. */
3149
3150 static void
3151 lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
3152 omp_context *ctx)
3153 {
3154 tree c;
3155
3156 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
3157 {
3158 tree val, ref, x, var;
3159 bool by_ref, do_in = false, do_out = false;
3160 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
3161
3162 switch (OMP_CLAUSE_CODE (c))
3163 {
3164 case OMP_CLAUSE_PRIVATE:
3165 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
3166 break;
3167 continue;
3168 case OMP_CLAUSE_FIRSTPRIVATE:
3169 case OMP_CLAUSE_COPYIN:
3170 case OMP_CLAUSE_LASTPRIVATE:
3171 case OMP_CLAUSE_REDUCTION:
3172 break;
3173 default:
3174 continue;
3175 }
3176
3177 val = OMP_CLAUSE_DECL (c);
3178 var = lookup_decl_in_outer_ctx (val, ctx);
3179
3180 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
3181 && is_global_var (var))
3182 continue;
3183 if (is_variable_sized (val))
3184 continue;
3185 by_ref = use_pointer_for_field (val, NULL);
3186
3187 switch (OMP_CLAUSE_CODE (c))
3188 {
3189 case OMP_CLAUSE_PRIVATE:
3190 case OMP_CLAUSE_FIRSTPRIVATE:
3191 case OMP_CLAUSE_COPYIN:
3192 do_in = true;
3193 break;
3194
3195 case OMP_CLAUSE_LASTPRIVATE:
3196 if (by_ref || is_reference (val))
3197 {
3198 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
3199 continue;
3200 do_in = true;
3201 }
3202 else
3203 {
3204 do_out = true;
3205 if (lang_hooks.decls.omp_private_outer_ref (val))
3206 do_in = true;
3207 }
3208 break;
3209
3210 case OMP_CLAUSE_REDUCTION:
3211 do_in = true;
3212 do_out = !(by_ref || is_reference (val));
3213 break;
3214
3215 default:
3216 gcc_unreachable ();
3217 }
3218
3219 if (do_in)
3220 {
3221 ref = build_sender_ref (val, ctx);
3222 x = by_ref ? build_fold_addr_expr_loc (clause_loc, var) : var;
3223 gimplify_assign (ref, x, ilist);
3224 if (is_task_ctx (ctx))
3225 DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL;
3226 }
3227
3228 if (do_out)
3229 {
3230 ref = build_sender_ref (val, ctx);
3231 gimplify_assign (var, ref, olist);
3232 }
3233 }
3234 }
3235
3236 /* Generate code to implement SHARED from the sender (aka parent)
3237 side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
3238 list things that got automatically shared. */
3239
3240 static void
3241 lower_send_shared_vars (gimple_seq *ilist, gimple_seq *olist, omp_context *ctx)
3242 {
3243 tree var, ovar, nvar, f, x, record_type;
3244
3245 if (ctx->record_type == NULL)
3246 return;
3247
3248 record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type;
3249 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
3250 {
3251 ovar = DECL_ABSTRACT_ORIGIN (f);
3252 nvar = maybe_lookup_decl (ovar, ctx);
3253 if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
3254 continue;
3255
3256 /* If CTX is a nested parallel directive. Find the immediately
3257 enclosing parallel or workshare construct that contains a
3258 mapping for OVAR. */
3259 var = lookup_decl_in_outer_ctx (ovar, ctx);
3260
3261 if (use_pointer_for_field (ovar, ctx))
3262 {
3263 x = build_sender_ref (ovar, ctx);
3264 var = build_fold_addr_expr (var);
3265 gimplify_assign (x, var, ilist);
3266 }
3267 else
3268 {
3269 x = build_sender_ref (ovar, ctx);
3270 gimplify_assign (x, var, ilist);
3271
3272 if (!TREE_READONLY (var)
3273 /* We don't need to receive a new reference to a result
3274 or parm decl. In fact we may not store to it as we will
3275 invalidate any pending RSO and generate wrong gimple
3276 during inlining. */
3277 && !((TREE_CODE (var) == RESULT_DECL
3278 || TREE_CODE (var) == PARM_DECL)
3279 && DECL_BY_REFERENCE (var)))
3280 {
3281 x = build_sender_ref (ovar, ctx);
3282 gimplify_assign (var, x, olist);
3283 }
3284 }
3285 }
3286 }
3287
3288
3289 /* A convenience function to build an empty GIMPLE_COND with just the
3290 condition. */
3291
3292 static gimple
3293 gimple_build_cond_empty (tree cond)
3294 {
3295 enum tree_code pred_code;
3296 tree lhs, rhs;
3297
3298 gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs);
3299 return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE);
3300 }
3301
3302
3303 /* Build the function calls to GOMP_parallel_start etc to actually
3304 generate the parallel operation. REGION is the parallel region
3305 being expanded. BB is the block where to insert the code. WS_ARGS
3306 will be set if this is a call to a combined parallel+workshare
3307 construct, it contains the list of additional arguments needed by
3308 the workshare construct. */
3309
3310 static void
3311 expand_parallel_call (struct omp_region *region, basic_block bb,
3312 gimple entry_stmt, vec<tree, va_gc> *ws_args)
3313 {
3314 tree t, t1, t2, val, cond, c, clauses;
3315 gimple_stmt_iterator gsi;
3316 gimple stmt;
3317 enum built_in_function start_ix;
3318 int start_ix2;
3319 location_t clause_loc;
3320 vec<tree, va_gc> *args;
3321
3322 clauses = gimple_omp_parallel_clauses (entry_stmt);
3323
3324 /* Determine what flavor of GOMP_parallel_start we will be
3325 emitting. */
3326 start_ix = BUILT_IN_GOMP_PARALLEL_START;
3327 if (is_combined_parallel (region))
3328 {
3329 switch (region->inner->type)
3330 {
3331 case GIMPLE_OMP_FOR:
3332 gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
3333 start_ix2 = ((int)BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START
3334 + (region->inner->sched_kind
3335 == OMP_CLAUSE_SCHEDULE_RUNTIME
3336 ? 3 : region->inner->sched_kind));
3337 start_ix = (enum built_in_function)start_ix2;
3338 break;
3339 case GIMPLE_OMP_SECTIONS:
3340 start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS_START;
3341 break;
3342 default:
3343 gcc_unreachable ();
3344 }
3345 }
3346
3347 /* By default, the value of NUM_THREADS is zero (selected at run time)
3348 and there is no conditional. */
3349 cond = NULL_TREE;
3350 val = build_int_cst (unsigned_type_node, 0);
3351
3352 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
3353 if (c)
3354 cond = OMP_CLAUSE_IF_EXPR (c);
3355
3356 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
3357 if (c)
3358 {
3359 val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
3360 clause_loc = OMP_CLAUSE_LOCATION (c);
3361 }
3362 else
3363 clause_loc = gimple_location (entry_stmt);
3364
3365 /* Ensure 'val' is of the correct type. */
3366 val = fold_convert_loc (clause_loc, unsigned_type_node, val);
3367
3368 /* If we found the clause 'if (cond)', build either
3369 (cond != 0) or (cond ? val : 1u). */
3370 if (cond)
3371 {
3372 gimple_stmt_iterator gsi;
3373
3374 cond = gimple_boolify (cond);
3375
3376 if (integer_zerop (val))
3377 val = fold_build2_loc (clause_loc,
3378 EQ_EXPR, unsigned_type_node, cond,
3379 build_int_cst (TREE_TYPE (cond), 0));
3380 else
3381 {
3382 basic_block cond_bb, then_bb, else_bb;
3383 edge e, e_then, e_else;
3384 tree tmp_then, tmp_else, tmp_join, tmp_var;
3385
3386 tmp_var = create_tmp_var (TREE_TYPE (val), NULL);
3387 if (gimple_in_ssa_p (cfun))
3388 {
3389 tmp_then = make_ssa_name (tmp_var, NULL);
3390 tmp_else = make_ssa_name (tmp_var, NULL);
3391 tmp_join = make_ssa_name (tmp_var, NULL);
3392 }
3393 else
3394 {
3395 tmp_then = tmp_var;
3396 tmp_else = tmp_var;
3397 tmp_join = tmp_var;
3398 }
3399
3400 e = split_block (bb, NULL);
3401 cond_bb = e->src;
3402 bb = e->dest;
3403 remove_edge (e);
3404
3405 then_bb = create_empty_bb (cond_bb);
3406 else_bb = create_empty_bb (then_bb);
3407 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
3408 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
3409
3410 stmt = gimple_build_cond_empty (cond);
3411 gsi = gsi_start_bb (cond_bb);
3412 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3413
3414 gsi = gsi_start_bb (then_bb);
3415 stmt = gimple_build_assign (tmp_then, val);
3416 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3417
3418 gsi = gsi_start_bb (else_bb);
3419 stmt = gimple_build_assign
3420 (tmp_else, build_int_cst (unsigned_type_node, 1));
3421 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3422
3423 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
3424 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
3425 if (current_loops)
3426 {
3427 add_bb_to_loop (then_bb, cond_bb->loop_father);
3428 add_bb_to_loop (else_bb, cond_bb->loop_father);
3429 }
3430 e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
3431 e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
3432
3433 if (gimple_in_ssa_p (cfun))
3434 {
3435 gimple phi = create_phi_node (tmp_join, bb);
3436 add_phi_arg (phi, tmp_then, e_then, UNKNOWN_LOCATION);
3437 add_phi_arg (phi, tmp_else, e_else, UNKNOWN_LOCATION);
3438 }
3439
3440 val = tmp_join;
3441 }
3442
3443 gsi = gsi_start_bb (bb);
3444 val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE,
3445 false, GSI_CONTINUE_LINKING);
3446 }
3447
3448 gsi = gsi_last_bb (bb);
3449 t = gimple_omp_parallel_data_arg (entry_stmt);
3450 if (t == NULL)
3451 t1 = null_pointer_node;
3452 else
3453 t1 = build_fold_addr_expr (t);
3454 t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
3455
3456 vec_alloc (args, 3 + vec_safe_length (ws_args));
3457 args->quick_push (t2);
3458 args->quick_push (t1);
3459 args->quick_push (val);
3460 if (ws_args)
3461 args->splice (*ws_args);
3462
3463 t = build_call_expr_loc_vec (UNKNOWN_LOCATION,
3464 builtin_decl_explicit (start_ix), args);
3465
3466 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3467 false, GSI_CONTINUE_LINKING);
3468
3469 t = gimple_omp_parallel_data_arg (entry_stmt);
3470 if (t == NULL)
3471 t = null_pointer_node;
3472 else
3473 t = build_fold_addr_expr (t);
3474 t = build_call_expr_loc (gimple_location (entry_stmt),
3475 gimple_omp_parallel_child_fn (entry_stmt), 1, t);
3476 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3477 false, GSI_CONTINUE_LINKING);
3478
3479 t = build_call_expr_loc (gimple_location (entry_stmt),
3480 builtin_decl_explicit (BUILT_IN_GOMP_PARALLEL_END),
3481 0);
3482 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3483 false, GSI_CONTINUE_LINKING);
3484 }
3485
3486
3487 /* Build the function call to GOMP_task to actually
3488 generate the task operation. BB is the block where to insert the code. */
3489
3490 static void
3491 expand_task_call (basic_block bb, gimple entry_stmt)
3492 {
3493 tree t, t1, t2, t3, flags, cond, c, c2, clauses;
3494 gimple_stmt_iterator gsi;
3495 location_t loc = gimple_location (entry_stmt);
3496
3497 clauses = gimple_omp_task_clauses (entry_stmt);
3498
3499 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
3500 if (c)
3501 cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (c));
3502 else
3503 cond = boolean_true_node;
3504
3505 c = find_omp_clause (clauses, OMP_CLAUSE_UNTIED);
3506 c2 = find_omp_clause (clauses, OMP_CLAUSE_MERGEABLE);
3507 flags = build_int_cst (unsigned_type_node,
3508 (c ? 1 : 0) + (c2 ? 4 : 0));
3509
3510 c = find_omp_clause (clauses, OMP_CLAUSE_FINAL);
3511 if (c)
3512 {
3513 c = gimple_boolify (OMP_CLAUSE_FINAL_EXPR (c));
3514 c = fold_build3_loc (loc, COND_EXPR, unsigned_type_node, c,
3515 build_int_cst (unsigned_type_node, 2),
3516 build_int_cst (unsigned_type_node, 0));
3517 flags = fold_build2_loc (loc, PLUS_EXPR, unsigned_type_node, flags, c);
3518 }
3519
3520 gsi = gsi_last_bb (bb);
3521 t = gimple_omp_task_data_arg (entry_stmt);
3522 if (t == NULL)
3523 t2 = null_pointer_node;
3524 else
3525 t2 = build_fold_addr_expr_loc (loc, t);
3526 t1 = build_fold_addr_expr_loc (loc, gimple_omp_task_child_fn (entry_stmt));
3527 t = gimple_omp_task_copy_fn (entry_stmt);
3528 if (t == NULL)
3529 t3 = null_pointer_node;
3530 else
3531 t3 = build_fold_addr_expr_loc (loc, t);
3532
3533 t = build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_TASK),
3534 7, t1, t2, t3,
3535 gimple_omp_task_arg_size (entry_stmt),
3536 gimple_omp_task_arg_align (entry_stmt), cond, flags);
3537
3538 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3539 false, GSI_CONTINUE_LINKING);
3540 }
3541
3542
3543 /* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
3544 catch handler and return it. This prevents programs from violating the
3545 structured block semantics with throws. */
3546
3547 static gimple_seq
3548 maybe_catch_exception (gimple_seq body)
3549 {
3550 gimple g;
3551 tree decl;
3552
3553 if (!flag_exceptions)
3554 return body;
3555
3556 if (lang_hooks.eh_protect_cleanup_actions != NULL)
3557 decl = lang_hooks.eh_protect_cleanup_actions ();
3558 else
3559 decl = builtin_decl_explicit (BUILT_IN_TRAP);
3560
3561 g = gimple_build_eh_must_not_throw (decl);
3562 g = gimple_build_try (body, gimple_seq_alloc_with_stmt (g),
3563 GIMPLE_TRY_CATCH);
3564
3565 return gimple_seq_alloc_with_stmt (g);
3566 }
3567
3568 /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
3569
3570 static tree
3571 vec2chain (vec<tree, va_gc> *v)
3572 {
3573 tree chain = NULL_TREE, t;
3574 unsigned ix;
3575
3576 FOR_EACH_VEC_SAFE_ELT_REVERSE (v, ix, t)
3577 {
3578 DECL_CHAIN (t) = chain;
3579 chain = t;
3580 }
3581
3582 return chain;
3583 }
3584
3585
3586 /* Remove barriers in REGION->EXIT's block. Note that this is only
3587 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
3588 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
3589 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
3590 removed. */
3591
3592 static void
3593 remove_exit_barrier (struct omp_region *region)
3594 {
3595 gimple_stmt_iterator gsi;
3596 basic_block exit_bb;
3597 edge_iterator ei;
3598 edge e;
3599 gimple stmt;
3600 int any_addressable_vars = -1;
3601
3602 exit_bb = region->exit;
3603
3604 /* If the parallel region doesn't return, we don't have REGION->EXIT
3605 block at all. */
3606 if (! exit_bb)
3607 return;
3608
3609 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
3610 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
3611 statements that can appear in between are extremely limited -- no
3612 memory operations at all. Here, we allow nothing at all, so the
3613 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
3614 gsi = gsi_last_bb (exit_bb);
3615 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3616 gsi_prev (&gsi);
3617 if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
3618 return;
3619
3620 FOR_EACH_EDGE (e, ei, exit_bb->preds)
3621 {
3622 gsi = gsi_last_bb (e->src);
3623 if (gsi_end_p (gsi))
3624 continue;
3625 stmt = gsi_stmt (gsi);
3626 if (gimple_code (stmt) == GIMPLE_OMP_RETURN
3627 && !gimple_omp_return_nowait_p (stmt))
3628 {
3629 /* OpenMP 3.0 tasks unfortunately prevent this optimization
3630 in many cases. If there could be tasks queued, the barrier
3631 might be needed to let the tasks run before some local
3632 variable of the parallel that the task uses as shared
3633 runs out of scope. The task can be spawned either
3634 from within current function (this would be easy to check)
3635 or from some function it calls and gets passed an address
3636 of such a variable. */
3637 if (any_addressable_vars < 0)
3638 {
3639 gimple parallel_stmt = last_stmt (region->entry);
3640 tree child_fun = gimple_omp_parallel_child_fn (parallel_stmt);
3641 tree local_decls, block, decl;
3642 unsigned ix;
3643
3644 any_addressable_vars = 0;
3645 FOR_EACH_LOCAL_DECL (DECL_STRUCT_FUNCTION (child_fun), ix, decl)
3646 if (TREE_ADDRESSABLE (decl))
3647 {
3648 any_addressable_vars = 1;
3649 break;
3650 }
3651 for (block = gimple_block (stmt);
3652 !any_addressable_vars
3653 && block
3654 && TREE_CODE (block) == BLOCK;
3655 block = BLOCK_SUPERCONTEXT (block))
3656 {
3657 for (local_decls = BLOCK_VARS (block);
3658 local_decls;
3659 local_decls = DECL_CHAIN (local_decls))
3660 if (TREE_ADDRESSABLE (local_decls))
3661 {
3662 any_addressable_vars = 1;
3663 break;
3664 }
3665 if (block == gimple_block (parallel_stmt))
3666 break;
3667 }
3668 }
3669 if (!any_addressable_vars)
3670 gimple_omp_return_set_nowait (stmt);
3671 }
3672 }
3673 }
3674
3675 static void
3676 remove_exit_barriers (struct omp_region *region)
3677 {
3678 if (region->type == GIMPLE_OMP_PARALLEL)
3679 remove_exit_barrier (region);
3680
3681 if (region->inner)
3682 {
3683 region = region->inner;
3684 remove_exit_barriers (region);
3685 while (region->next)
3686 {
3687 region = region->next;
3688 remove_exit_barriers (region);
3689 }
3690 }
3691 }
3692
3693 /* Optimize omp_get_thread_num () and omp_get_num_threads ()
3694 calls. These can't be declared as const functions, but
3695 within one parallel body they are constant, so they can be
3696 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
3697 which are declared const. Similarly for task body, except
3698 that in untied task omp_get_thread_num () can change at any task
3699 scheduling point. */
3700
3701 static void
3702 optimize_omp_library_calls (gimple entry_stmt)
3703 {
3704 basic_block bb;
3705 gimple_stmt_iterator gsi;
3706 tree thr_num_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
3707 tree thr_num_id = DECL_ASSEMBLER_NAME (thr_num_tree);
3708 tree num_thr_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
3709 tree num_thr_id = DECL_ASSEMBLER_NAME (num_thr_tree);
3710 bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK
3711 && find_omp_clause (gimple_omp_task_clauses (entry_stmt),
3712 OMP_CLAUSE_UNTIED) != NULL);
3713
3714 FOR_EACH_BB (bb)
3715 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3716 {
3717 gimple call = gsi_stmt (gsi);
3718 tree decl;
3719
3720 if (is_gimple_call (call)
3721 && (decl = gimple_call_fndecl (call))
3722 && DECL_EXTERNAL (decl)
3723 && TREE_PUBLIC (decl)
3724 && DECL_INITIAL (decl) == NULL)
3725 {
3726 tree built_in;
3727
3728 if (DECL_NAME (decl) == thr_num_id)
3729 {
3730 /* In #pragma omp task untied omp_get_thread_num () can change
3731 during the execution of the task region. */
3732 if (untied_task)
3733 continue;
3734 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
3735 }
3736 else if (DECL_NAME (decl) == num_thr_id)
3737 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
3738 else
3739 continue;
3740
3741 if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
3742 || gimple_call_num_args (call) != 0)
3743 continue;
3744
3745 if (flag_exceptions && !TREE_NOTHROW (decl))
3746 continue;
3747
3748 if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
3749 || !types_compatible_p (TREE_TYPE (TREE_TYPE (decl)),
3750 TREE_TYPE (TREE_TYPE (built_in))))
3751 continue;
3752
3753 gimple_call_set_fndecl (call, built_in);
3754 }
3755 }
3756 }
3757
3758 /* Callback for expand_omp_build_assign. Return non-NULL if *tp needs to be
3759 regimplified. */
3760
3761 static tree
3762 expand_omp_regimplify_p (tree *tp, int *walk_subtrees, void *)
3763 {
3764 tree t = *tp;
3765
3766 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
3767 if (TREE_CODE (t) == VAR_DECL && DECL_HAS_VALUE_EXPR_P (t))
3768 return t;
3769
3770 if (TREE_CODE (t) == ADDR_EXPR)
3771 recompute_tree_invariant_for_addr_expr (t);
3772
3773 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
3774 return NULL_TREE;
3775 }
3776
3777 /* Prepend TO = FROM assignment before *GSI_P. */
3778
3779 static void
3780 expand_omp_build_assign (gimple_stmt_iterator *gsi_p, tree to, tree from)
3781 {
3782 bool simple_p = DECL_P (to) && TREE_ADDRESSABLE (to);
3783 from = force_gimple_operand_gsi (gsi_p, from, simple_p, NULL_TREE,
3784 true, GSI_SAME_STMT);
3785 gimple stmt = gimple_build_assign (to, from);
3786 gsi_insert_before (gsi_p, stmt, GSI_SAME_STMT);
3787 if (walk_tree (&from, expand_omp_regimplify_p, NULL, NULL)
3788 || walk_tree (&to, expand_omp_regimplify_p, NULL, NULL))
3789 {
3790 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
3791 gimple_regimplify_operands (stmt, &gsi);
3792 }
3793 }
3794
3795 /* Expand the OpenMP parallel or task directive starting at REGION. */
3796
3797 static void
3798 expand_omp_taskreg (struct omp_region *region)
3799 {
3800 basic_block entry_bb, exit_bb, new_bb;
3801 struct function *child_cfun;
3802 tree child_fn, block, t;
3803 gimple_stmt_iterator gsi;
3804 gimple entry_stmt, stmt;
3805 edge e;
3806 vec<tree, va_gc> *ws_args;
3807
3808 entry_stmt = last_stmt (region->entry);
3809 child_fn = gimple_omp_taskreg_child_fn (entry_stmt);
3810 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
3811
3812 entry_bb = region->entry;
3813 exit_bb = region->exit;
3814
3815 if (is_combined_parallel (region))
3816 ws_args = region->ws_args;
3817 else
3818 ws_args = NULL;
3819
3820 if (child_cfun->cfg)
3821 {
3822 /* Due to inlining, it may happen that we have already outlined
3823 the region, in which case all we need to do is make the
3824 sub-graph unreachable and emit the parallel call. */
3825 edge entry_succ_e, exit_succ_e;
3826 gimple_stmt_iterator gsi;
3827
3828 entry_succ_e = single_succ_edge (entry_bb);
3829
3830 gsi = gsi_last_bb (entry_bb);
3831 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL
3832 || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK);
3833 gsi_remove (&gsi, true);
3834
3835 new_bb = entry_bb;
3836 if (exit_bb)
3837 {
3838 exit_succ_e = single_succ_edge (exit_bb);
3839 make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
3840 }
3841 remove_edge_and_dominated_blocks (entry_succ_e);
3842 }
3843 else
3844 {
3845 unsigned srcidx, dstidx, num;
3846
3847 /* If the parallel region needs data sent from the parent
3848 function, then the very first statement (except possible
3849 tree profile counter updates) of the parallel body
3850 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
3851 &.OMP_DATA_O is passed as an argument to the child function,
3852 we need to replace it with the argument as seen by the child
3853 function.
3854
3855 In most cases, this will end up being the identity assignment
3856 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
3857 a function call that has been inlined, the original PARM_DECL
3858 .OMP_DATA_I may have been converted into a different local
3859 variable. In which case, we need to keep the assignment. */
3860 if (gimple_omp_taskreg_data_arg (entry_stmt))
3861 {
3862 basic_block entry_succ_bb = single_succ (entry_bb);
3863 gimple_stmt_iterator gsi;
3864 tree arg, narg;
3865 gimple parcopy_stmt = NULL;
3866
3867 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
3868 {
3869 gimple stmt;
3870
3871 gcc_assert (!gsi_end_p (gsi));
3872 stmt = gsi_stmt (gsi);
3873 if (gimple_code (stmt) != GIMPLE_ASSIGN)
3874 continue;
3875
3876 if (gimple_num_ops (stmt) == 2)
3877 {
3878 tree arg = gimple_assign_rhs1 (stmt);
3879
3880 /* We're ignore the subcode because we're
3881 effectively doing a STRIP_NOPS. */
3882
3883 if (TREE_CODE (arg) == ADDR_EXPR
3884 && TREE_OPERAND (arg, 0)
3885 == gimple_omp_taskreg_data_arg (entry_stmt))
3886 {
3887 parcopy_stmt = stmt;
3888 break;
3889 }
3890 }
3891 }
3892
3893 gcc_assert (parcopy_stmt != NULL);
3894 arg = DECL_ARGUMENTS (child_fn);
3895
3896 if (!gimple_in_ssa_p (cfun))
3897 {
3898 if (gimple_assign_lhs (parcopy_stmt) == arg)
3899 gsi_remove (&gsi, true);
3900 else
3901 {
3902 /* ?? Is setting the subcode really necessary ?? */
3903 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg));
3904 gimple_assign_set_rhs1 (parcopy_stmt, arg);
3905 }
3906 }
3907 else
3908 {
3909 /* If we are in ssa form, we must load the value from the default
3910 definition of the argument. That should not be defined now,
3911 since the argument is not used uninitialized. */
3912 gcc_assert (ssa_default_def (cfun, arg) == NULL);
3913 narg = make_ssa_name (arg, gimple_build_nop ());
3914 set_ssa_default_def (cfun, arg, narg);
3915 /* ?? Is setting the subcode really necessary ?? */
3916 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (narg));
3917 gimple_assign_set_rhs1 (parcopy_stmt, narg);
3918 update_stmt (parcopy_stmt);
3919 }
3920 }
3921
3922 /* Declare local variables needed in CHILD_CFUN. */
3923 block = DECL_INITIAL (child_fn);
3924 BLOCK_VARS (block) = vec2chain (child_cfun->local_decls);
3925 /* The gimplifier could record temporaries in parallel/task block
3926 rather than in containing function's local_decls chain,
3927 which would mean cgraph missed finalizing them. Do it now. */
3928 for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t))
3929 if (TREE_CODE (t) == VAR_DECL
3930 && TREE_STATIC (t)
3931 && !DECL_EXTERNAL (t))
3932 varpool_finalize_decl (t);
3933 DECL_SAVED_TREE (child_fn) = NULL;
3934 /* We'll create a CFG for child_fn, so no gimple body is needed. */
3935 gimple_set_body (child_fn, NULL);
3936 TREE_USED (block) = 1;
3937
3938 /* Reset DECL_CONTEXT on function arguments. */
3939 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
3940 DECL_CONTEXT (t) = child_fn;
3941
3942 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
3943 so that it can be moved to the child function. */
3944 gsi = gsi_last_bb (entry_bb);
3945 stmt = gsi_stmt (gsi);
3946 gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
3947 || gimple_code (stmt) == GIMPLE_OMP_TASK));
3948 gsi_remove (&gsi, true);
3949 e = split_block (entry_bb, stmt);
3950 entry_bb = e->dest;
3951 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
3952
3953 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
3954 if (exit_bb)
3955 {
3956 gsi = gsi_last_bb (exit_bb);
3957 gcc_assert (!gsi_end_p (gsi)
3958 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3959 stmt = gimple_build_return (NULL);
3960 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
3961 gsi_remove (&gsi, true);
3962 }
3963
3964 /* Move the parallel region into CHILD_CFUN. */
3965
3966 if (gimple_in_ssa_p (cfun))
3967 {
3968 init_tree_ssa (child_cfun);
3969 init_ssa_operands (child_cfun);
3970 child_cfun->gimple_df->in_ssa_p = true;
3971 block = NULL_TREE;
3972 }
3973 else
3974 block = gimple_block (entry_stmt);
3975
3976 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
3977 if (exit_bb)
3978 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
3979 /* When the OMP expansion process cannot guarantee an up-to-date
3980 loop tree arrange for the child function to fixup loops. */
3981 if (loops_state_satisfies_p (LOOPS_NEED_FIXUP))
3982 child_cfun->x_current_loops->state |= LOOPS_NEED_FIXUP;
3983
3984 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
3985 num = vec_safe_length (child_cfun->local_decls);
3986 for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
3987 {
3988 t = (*child_cfun->local_decls)[srcidx];
3989 if (DECL_CONTEXT (t) == cfun->decl)
3990 continue;
3991 if (srcidx != dstidx)
3992 (*child_cfun->local_decls)[dstidx] = t;
3993 dstidx++;
3994 }
3995 if (dstidx != num)
3996 vec_safe_truncate (child_cfun->local_decls, dstidx);
3997
3998 /* Inform the callgraph about the new function. */
3999 DECL_STRUCT_FUNCTION (child_fn)->curr_properties = cfun->curr_properties;
4000 cgraph_add_new_function (child_fn, true);
4001
4002 /* Fix the callgraph edges for child_cfun. Those for cfun will be
4003 fixed in a following pass. */
4004 push_cfun (child_cfun);
4005 if (optimize)
4006 optimize_omp_library_calls (entry_stmt);
4007 rebuild_cgraph_edges ();
4008
4009 /* Some EH regions might become dead, see PR34608. If
4010 pass_cleanup_cfg isn't the first pass to happen with the
4011 new child, these dead EH edges might cause problems.
4012 Clean them up now. */
4013 if (flag_exceptions)
4014 {
4015 basic_block bb;
4016 bool changed = false;
4017
4018 FOR_EACH_BB (bb)
4019 changed |= gimple_purge_dead_eh_edges (bb);
4020 if (changed)
4021 cleanup_tree_cfg ();
4022 }
4023 if (gimple_in_ssa_p (cfun))
4024 update_ssa (TODO_update_ssa);
4025 pop_cfun ();
4026 }
4027
4028 /* Emit a library call to launch the children threads. */
4029 if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
4030 expand_parallel_call (region, new_bb, entry_stmt, ws_args);
4031 else
4032 expand_task_call (new_bb, entry_stmt);
4033 if (gimple_in_ssa_p (cfun))
4034 update_ssa (TODO_update_ssa_only_virtuals);
4035 }
4036
4037
4038 /* Helper function for expand_omp_{for_*,simd}. If this is the outermost
4039 of the combined collapse > 1 loop constructs, generate code like:
4040 if (__builtin_expect (N32 cond3 N31, 0)) goto ZERO_ITER_BB;
4041 if (cond3 is <)
4042 adj = STEP3 - 1;
4043 else
4044 adj = STEP3 + 1;
4045 count3 = (adj + N32 - N31) / STEP3;
4046 if (__builtin_expect (N22 cond2 N21, 0)) goto ZERO_ITER_BB;
4047 if (cond2 is <)
4048 adj = STEP2 - 1;
4049 else
4050 adj = STEP2 + 1;
4051 count2 = (adj + N22 - N21) / STEP2;
4052 if (__builtin_expect (N12 cond1 N11, 0)) goto ZERO_ITER_BB;
4053 if (cond1 is <)
4054 adj = STEP1 - 1;
4055 else
4056 adj = STEP1 + 1;
4057 count1 = (adj + N12 - N11) / STEP1;
4058 count = count1 * count2 * count3;
4059 Furthermore, if ZERO_ITER_BB is NULL, create a BB which does:
4060 count = 0;
4061 and set ZERO_ITER_BB to that bb. */
4062
4063 /* NOTE: It *could* be better to moosh all of the BBs together,
4064 creating one larger BB with all the computation and the unexpected
4065 jump at the end. I.e.
4066
4067 bool zero3, zero2, zero1, zero;
4068
4069 zero3 = N32 c3 N31;
4070 count3 = (N32 - N31) /[cl] STEP3;
4071 zero2 = N22 c2 N21;
4072 count2 = (N22 - N21) /[cl] STEP2;
4073 zero1 = N12 c1 N11;
4074 count1 = (N12 - N11) /[cl] STEP1;
4075 zero = zero3 || zero2 || zero1;
4076 count = count1 * count2 * count3;
4077 if (__builtin_expect(zero, false)) goto zero_iter_bb;
4078
4079 After all, we expect the zero=false, and thus we expect to have to
4080 evaluate all of the comparison expressions, so short-circuiting
4081 oughtn't be a win. Since the condition isn't protecting a
4082 denominator, we're not concerned about divide-by-zero, so we can
4083 fully evaluate count even if a numerator turned out to be wrong.
4084
4085 It seems like putting this all together would create much better
4086 scheduling opportunities, and less pressure on the chip's branch
4087 predictor. */
4088
4089 static void
4090 expand_omp_for_init_counts (struct omp_for_data *fd, gimple_stmt_iterator *gsi,
4091 basic_block &entry_bb, tree *counts,
4092 basic_block &zero_iter_bb, int &first_zero_iter,
4093 basic_block &l2_dom_bb)
4094 {
4095 tree t, type = TREE_TYPE (fd->loop.v);
4096 gimple stmt;
4097 edge e, ne;
4098 int i;
4099
4100 /* Collapsed loops need work for expansion into SSA form. */
4101 gcc_assert (!gimple_in_ssa_p (cfun));
4102
4103 for (i = 0; i < fd->collapse; i++)
4104 {
4105 tree itype = TREE_TYPE (fd->loops[i].v);
4106
4107 if (SSA_VAR_P (fd->loop.n2)
4108 && ((t = fold_binary (fd->loops[i].cond_code, boolean_type_node,
4109 fold_convert (itype, fd->loops[i].n1),
4110 fold_convert (itype, fd->loops[i].n2)))
4111 == NULL_TREE || !integer_onep (t)))
4112 {
4113 tree n1, n2;
4114 n1 = fold_convert (itype, unshare_expr (fd->loops[i].n1));
4115 n1 = force_gimple_operand_gsi (gsi, n1, true, NULL_TREE,
4116 true, GSI_SAME_STMT);
4117 n2 = fold_convert (itype, unshare_expr (fd->loops[i].n2));
4118 n2 = force_gimple_operand_gsi (gsi, n2, true, NULL_TREE,
4119 true, GSI_SAME_STMT);
4120 stmt = gimple_build_cond (fd->loops[i].cond_code, n1, n2,
4121 NULL_TREE, NULL_TREE);
4122 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
4123 if (walk_tree (gimple_cond_lhs_ptr (stmt),
4124 expand_omp_regimplify_p, NULL, NULL)
4125 || walk_tree (gimple_cond_rhs_ptr (stmt),
4126 expand_omp_regimplify_p, NULL, NULL))
4127 {
4128 *gsi = gsi_for_stmt (stmt);
4129 gimple_regimplify_operands (stmt, gsi);
4130 }
4131 e = split_block (entry_bb, stmt);
4132 if (zero_iter_bb == NULL)
4133 {
4134 first_zero_iter = i;
4135 zero_iter_bb = create_empty_bb (entry_bb);
4136 if (current_loops)
4137 add_bb_to_loop (zero_iter_bb, entry_bb->loop_father);
4138 *gsi = gsi_after_labels (zero_iter_bb);
4139 stmt = gimple_build_assign (fd->loop.n2,
4140 build_zero_cst (type));
4141 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
4142 set_immediate_dominator (CDI_DOMINATORS, zero_iter_bb,
4143 entry_bb);
4144 }
4145 ne = make_edge (entry_bb, zero_iter_bb, EDGE_FALSE_VALUE);
4146 ne->probability = REG_BR_PROB_BASE / 2000 - 1;
4147 e->flags = EDGE_TRUE_VALUE;
4148 e->probability = REG_BR_PROB_BASE - ne->probability;
4149 if (l2_dom_bb == NULL)
4150 l2_dom_bb = entry_bb;
4151 entry_bb = e->dest;
4152 *gsi = gsi_last_bb (entry_bb);
4153 }
4154
4155 if (POINTER_TYPE_P (itype))
4156 itype = signed_type_for (itype);
4157 t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR
4158 ? -1 : 1));
4159 t = fold_build2 (PLUS_EXPR, itype,
4160 fold_convert (itype, fd->loops[i].step), t);
4161 t = fold_build2 (PLUS_EXPR, itype, t,
4162 fold_convert (itype, fd->loops[i].n2));
4163 t = fold_build2 (MINUS_EXPR, itype, t,
4164 fold_convert (itype, fd->loops[i].n1));
4165 /* ?? We could probably use CEIL_DIV_EXPR instead of
4166 TRUNC_DIV_EXPR and adjusting by hand. Unless we can't
4167 generate the same code in the end because generically we
4168 don't know that the values involved must be negative for
4169 GT?? */
4170 if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
4171 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4172 fold_build1 (NEGATE_EXPR, itype, t),
4173 fold_build1 (NEGATE_EXPR, itype,
4174 fold_convert (itype,
4175 fd->loops[i].step)));
4176 else
4177 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
4178 fold_convert (itype, fd->loops[i].step));
4179 t = fold_convert (type, t);
4180 if (TREE_CODE (t) == INTEGER_CST)
4181 counts[i] = t;
4182 else
4183 {
4184 counts[i] = create_tmp_reg (type, ".count");
4185 expand_omp_build_assign (gsi, counts[i], t);
4186 }
4187 if (SSA_VAR_P (fd->loop.n2))
4188 {
4189 if (i == 0)
4190 t = counts[0];
4191 else
4192 t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]);
4193 expand_omp_build_assign (gsi, fd->loop.n2, t);
4194 }
4195 }
4196 }
4197
4198
4199 /* Helper function for expand_omp_{for_*,simd}. Generate code like:
4200 T = V;
4201 V3 = N31 + (T % count3) * STEP3;
4202 T = T / count3;
4203 V2 = N21 + (T % count2) * STEP2;
4204 T = T / count2;
4205 V1 = N11 + T * STEP1;
4206 if this loop doesn't have an inner loop construct combined with it. */
4207
4208 static void
4209 expand_omp_for_init_vars (struct omp_for_data *fd, gimple_stmt_iterator *gsi,
4210 tree *counts, tree startvar)
4211 {
4212 int i;
4213 tree type = TREE_TYPE (fd->loop.v);
4214 tree tem = create_tmp_reg (type, ".tem");
4215 gimple stmt = gimple_build_assign (tem, startvar);
4216 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
4217
4218 for (i = fd->collapse - 1; i >= 0; i--)
4219 {
4220 tree vtype = TREE_TYPE (fd->loops[i].v), itype, t;
4221 itype = vtype;
4222 if (POINTER_TYPE_P (vtype))
4223 itype = signed_type_for (vtype);
4224 if (i != 0)
4225 t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]);
4226 else
4227 t = tem;
4228 t = fold_convert (itype, t);
4229 t = fold_build2 (MULT_EXPR, itype, t,
4230 fold_convert (itype, fd->loops[i].step));
4231 if (POINTER_TYPE_P (vtype))
4232 t = fold_build_pointer_plus (fd->loops[i].n1, t);
4233 else
4234 t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t);
4235 t = force_gimple_operand_gsi (gsi, t,
4236 DECL_P (fd->loops[i].v)
4237 && TREE_ADDRESSABLE (fd->loops[i].v),
4238 NULL_TREE, false,
4239 GSI_CONTINUE_LINKING);
4240 stmt = gimple_build_assign (fd->loops[i].v, t);
4241 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
4242 if (i != 0)
4243 {
4244 t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]);
4245 t = force_gimple_operand_gsi (gsi, t, false, NULL_TREE,
4246 false, GSI_CONTINUE_LINKING);
4247 stmt = gimple_build_assign (tem, t);
4248 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
4249 }
4250 }
4251 }
4252
4253
4254 /* Helper function for expand_omp_for_*. Generate code like:
4255 L10:
4256 V3 += STEP3;
4257 if (V3 cond3 N32) goto BODY_BB; else goto L11;
4258 L11:
4259 V3 = N31;
4260 V2 += STEP2;
4261 if (V2 cond2 N22) goto BODY_BB; else goto L12;
4262 L12:
4263 V2 = N21;
4264 V1 += STEP1;
4265 goto BODY_BB; */
4266
4267 static basic_block
4268 extract_omp_for_update_vars (struct omp_for_data *fd, basic_block cont_bb,
4269 basic_block body_bb)
4270 {
4271 basic_block last_bb, bb, collapse_bb = NULL;
4272 int i;
4273 gimple_stmt_iterator gsi;
4274 edge e;
4275 tree t;
4276 gimple stmt;
4277
4278 last_bb = cont_bb;
4279 for (i = fd->collapse - 1; i >= 0; i--)
4280 {
4281 tree vtype = TREE_TYPE (fd->loops[i].v);
4282
4283 bb = create_empty_bb (last_bb);
4284 if (current_loops)
4285 add_bb_to_loop (bb, last_bb->loop_father);
4286 gsi = gsi_start_bb (bb);
4287
4288 if (i < fd->collapse - 1)
4289 {
4290 e = make_edge (last_bb, bb, EDGE_FALSE_VALUE);
4291 e->probability = REG_BR_PROB_BASE / 8;
4292
4293 t = fd->loops[i + 1].n1;
4294 t = force_gimple_operand_gsi (&gsi, t,
4295 DECL_P (fd->loops[i + 1].v)
4296 && TREE_ADDRESSABLE (fd->loops[i
4297 + 1].v),
4298 NULL_TREE, false,
4299 GSI_CONTINUE_LINKING);
4300 stmt = gimple_build_assign (fd->loops[i + 1].v, t);
4301 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4302 }
4303 else
4304 collapse_bb = bb;
4305
4306 set_immediate_dominator (CDI_DOMINATORS, bb, last_bb);
4307
4308 if (POINTER_TYPE_P (vtype))
4309 t = fold_build_pointer_plus (fd->loops[i].v, fd->loops[i].step);
4310 else
4311 t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v, fd->loops[i].step);
4312 t = force_gimple_operand_gsi (&gsi, t,
4313 DECL_P (fd->loops[i].v)
4314 && TREE_ADDRESSABLE (fd->loops[i].v),
4315 NULL_TREE, false, GSI_CONTINUE_LINKING);
4316 stmt = gimple_build_assign (fd->loops[i].v, t);
4317 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4318
4319 if (i > 0)
4320 {
4321 t = fd->loops[i].n2;
4322 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4323 false, GSI_CONTINUE_LINKING);
4324 tree v = fd->loops[i].v;
4325 if (DECL_P (v) && TREE_ADDRESSABLE (v))
4326 v = force_gimple_operand_gsi (&gsi, v, true, NULL_TREE,
4327 false, GSI_CONTINUE_LINKING);
4328 t = fold_build2 (fd->loops[i].cond_code, boolean_type_node, v, t);
4329 stmt = gimple_build_cond_empty (t);
4330 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4331 e = make_edge (bb, body_bb, EDGE_TRUE_VALUE);
4332 e->probability = REG_BR_PROB_BASE * 7 / 8;
4333 }
4334 else
4335 make_edge (bb, body_bb, EDGE_FALLTHRU);
4336 last_bb = bb;
4337 }
4338
4339 return collapse_bb;
4340 }
4341
4342
4343 /* A subroutine of expand_omp_for. Generate code for a parallel
4344 loop with any schedule. Given parameters:
4345
4346 for (V = N1; V cond N2; V += STEP) BODY;
4347
4348 where COND is "<" or ">", we generate pseudocode
4349
4350 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
4351 if (more) goto L0; else goto L3;
4352 L0:
4353 V = istart0;
4354 iend = iend0;
4355 L1:
4356 BODY;
4357 V += STEP;
4358 if (V cond iend) goto L1; else goto L2;
4359 L2:
4360 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
4361 L3:
4362
4363 If this is a combined omp parallel loop, instead of the call to
4364 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
4365
4366 For collapsed loops, given parameters:
4367 collapse(3)
4368 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
4369 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
4370 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
4371 BODY;
4372
4373 we generate pseudocode
4374
4375 if (__builtin_expect (N32 cond3 N31, 0)) goto Z0;
4376 if (cond3 is <)
4377 adj = STEP3 - 1;
4378 else
4379 adj = STEP3 + 1;
4380 count3 = (adj + N32 - N31) / STEP3;
4381 if (__builtin_expect (N22 cond2 N21, 0)) goto Z0;
4382 if (cond2 is <)
4383 adj = STEP2 - 1;
4384 else
4385 adj = STEP2 + 1;
4386 count2 = (adj + N22 - N21) / STEP2;
4387 if (__builtin_expect (N12 cond1 N11, 0)) goto Z0;
4388 if (cond1 is <)
4389 adj = STEP1 - 1;
4390 else
4391 adj = STEP1 + 1;
4392 count1 = (adj + N12 - N11) / STEP1;
4393 count = count1 * count2 * count3;
4394 goto Z1;
4395 Z0:
4396 count = 0;
4397 Z1:
4398 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
4399 if (more) goto L0; else goto L3;
4400 L0:
4401 V = istart0;
4402 T = V;
4403 V3 = N31 + (T % count3) * STEP3;
4404 T = T / count3;
4405 V2 = N21 + (T % count2) * STEP2;
4406 T = T / count2;
4407 V1 = N11 + T * STEP1;
4408 iend = iend0;
4409 L1:
4410 BODY;
4411 V += 1;
4412 if (V < iend) goto L10; else goto L2;
4413 L10:
4414 V3 += STEP3;
4415 if (V3 cond3 N32) goto L1; else goto L11;
4416 L11:
4417 V3 = N31;
4418 V2 += STEP2;
4419 if (V2 cond2 N22) goto L1; else goto L12;
4420 L12:
4421 V2 = N21;
4422 V1 += STEP1;
4423 goto L1;
4424 L2:
4425 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
4426 L3:
4427
4428 */
4429
4430 static void
4431 expand_omp_for_generic (struct omp_region *region,
4432 struct omp_for_data *fd,
4433 enum built_in_function start_fn,
4434 enum built_in_function next_fn)
4435 {
4436 tree type, istart0, iend0, iend;
4437 tree t, vmain, vback, bias = NULL_TREE;
4438 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb;
4439 basic_block l2_bb = NULL, l3_bb = NULL;
4440 gimple_stmt_iterator gsi;
4441 gimple stmt;
4442 bool in_combined_parallel = is_combined_parallel (region);
4443 bool broken_loop = region->cont == NULL;
4444 edge e, ne;
4445 tree *counts = NULL;
4446 int i;
4447
4448 gcc_assert (!broken_loop || !in_combined_parallel);
4449 gcc_assert (fd->iter_type == long_integer_type_node
4450 || !in_combined_parallel);
4451
4452 type = TREE_TYPE (fd->loop.v);
4453 istart0 = create_tmp_var (fd->iter_type, ".istart0");
4454 iend0 = create_tmp_var (fd->iter_type, ".iend0");
4455 TREE_ADDRESSABLE (istart0) = 1;
4456 TREE_ADDRESSABLE (iend0) = 1;
4457
4458 /* See if we need to bias by LLONG_MIN. */
4459 if (fd->iter_type == long_long_unsigned_type_node
4460 && TREE_CODE (type) == INTEGER_TYPE
4461 && !TYPE_UNSIGNED (type))
4462 {
4463 tree n1, n2;
4464
4465 if (fd->loop.cond_code == LT_EXPR)
4466 {
4467 n1 = fd->loop.n1;
4468 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
4469 }
4470 else
4471 {
4472 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
4473 n2 = fd->loop.n1;
4474 }
4475 if (TREE_CODE (n1) != INTEGER_CST
4476 || TREE_CODE (n2) != INTEGER_CST
4477 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
4478 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
4479 }
4480
4481 entry_bb = region->entry;
4482 cont_bb = region->cont;
4483 collapse_bb = NULL;
4484 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
4485 gcc_assert (broken_loop
4486 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
4487 l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
4488 l1_bb = single_succ (l0_bb);
4489 if (!broken_loop)
4490 {
4491 l2_bb = create_empty_bb (cont_bb);
4492 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb);
4493 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4494 }
4495 else
4496 l2_bb = NULL;
4497 l3_bb = BRANCH_EDGE (entry_bb)->dest;
4498 exit_bb = region->exit;
4499
4500 gsi = gsi_last_bb (entry_bb);
4501
4502 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4503 if (fd->collapse > 1)
4504 {
4505 int first_zero_iter = -1;
4506 basic_block zero_iter_bb = NULL, l2_dom_bb = NULL;
4507
4508 counts = XALLOCAVEC (tree, fd->collapse);
4509 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
4510 zero_iter_bb, first_zero_iter,
4511 l2_dom_bb);
4512
4513 if (zero_iter_bb)
4514 {
4515 /* Some counts[i] vars might be uninitialized if
4516 some loop has zero iterations. But the body shouldn't
4517 be executed in that case, so just avoid uninit warnings. */
4518 for (i = first_zero_iter; i < fd->collapse; i++)
4519 if (SSA_VAR_P (counts[i]))
4520 TREE_NO_WARNING (counts[i]) = 1;
4521 gsi_prev (&gsi);
4522 e = split_block (entry_bb, gsi_stmt (gsi));
4523 entry_bb = e->dest;
4524 make_edge (zero_iter_bb, entry_bb, EDGE_FALLTHRU);
4525 gsi = gsi_last_bb (entry_bb);
4526 set_immediate_dominator (CDI_DOMINATORS, entry_bb,
4527 get_immediate_dominator (CDI_DOMINATORS,
4528 zero_iter_bb));
4529 }
4530 }
4531 if (in_combined_parallel)
4532 {
4533 /* In a combined parallel loop, emit a call to
4534 GOMP_loop_foo_next. */
4535 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
4536 build_fold_addr_expr (istart0),
4537 build_fold_addr_expr (iend0));
4538 }
4539 else
4540 {
4541 tree t0, t1, t2, t3, t4;
4542 /* If this is not a combined parallel loop, emit a call to
4543 GOMP_loop_foo_start in ENTRY_BB. */
4544 t4 = build_fold_addr_expr (iend0);
4545 t3 = build_fold_addr_expr (istart0);
4546 t2 = fold_convert (fd->iter_type, fd->loop.step);
4547 t1 = fd->loop.n2;
4548 t0 = fd->loop.n1;
4549 if (POINTER_TYPE_P (TREE_TYPE (t0))
4550 && TYPE_PRECISION (TREE_TYPE (t0))
4551 != TYPE_PRECISION (fd->iter_type))
4552 {
4553 /* Avoid casting pointers to integer of a different size. */
4554 tree itype = signed_type_for (type);
4555 t1 = fold_convert (fd->iter_type, fold_convert (itype, t1));
4556 t0 = fold_convert (fd->iter_type, fold_convert (itype, t0));
4557 }
4558 else
4559 {
4560 t1 = fold_convert (fd->iter_type, t1);
4561 t0 = fold_convert (fd->iter_type, t0);
4562 }
4563 if (bias)
4564 {
4565 t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
4566 t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
4567 }
4568 if (fd->iter_type == long_integer_type_node)
4569 {
4570 if (fd->chunk_size)
4571 {
4572 t = fold_convert (fd->iter_type, fd->chunk_size);
4573 t = build_call_expr (builtin_decl_explicit (start_fn),
4574 6, t0, t1, t2, t, t3, t4);
4575 }
4576 else
4577 t = build_call_expr (builtin_decl_explicit (start_fn),
4578 5, t0, t1, t2, t3, t4);
4579 }
4580 else
4581 {
4582 tree t5;
4583 tree c_bool_type;
4584 tree bfn_decl;
4585
4586 /* The GOMP_loop_ull_*start functions have additional boolean
4587 argument, true for < loops and false for > loops.
4588 In Fortran, the C bool type can be different from
4589 boolean_type_node. */
4590 bfn_decl = builtin_decl_explicit (start_fn);
4591 c_bool_type = TREE_TYPE (TREE_TYPE (bfn_decl));
4592 t5 = build_int_cst (c_bool_type,
4593 fd->loop.cond_code == LT_EXPR ? 1 : 0);
4594 if (fd->chunk_size)
4595 {
4596 tree bfn_decl = builtin_decl_explicit (start_fn);
4597 t = fold_convert (fd->iter_type, fd->chunk_size);
4598 t = build_call_expr (bfn_decl, 7, t5, t0, t1, t2, t, t3, t4);
4599 }
4600 else
4601 t = build_call_expr (builtin_decl_explicit (start_fn),
4602 6, t5, t0, t1, t2, t3, t4);
4603 }
4604 }
4605 if (TREE_TYPE (t) != boolean_type_node)
4606 t = fold_build2 (NE_EXPR, boolean_type_node,
4607 t, build_int_cst (TREE_TYPE (t), 0));
4608 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4609 true, GSI_SAME_STMT);
4610 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4611
4612 /* Remove the GIMPLE_OMP_FOR statement. */
4613 gsi_remove (&gsi, true);
4614
4615 /* Iteration setup for sequential loop goes in L0_BB. */
4616 tree startvar = fd->loop.v;
4617 tree endvar = NULL_TREE;
4618
4619 gsi = gsi_start_bb (l0_bb);
4620 t = istart0;
4621 if (bias)
4622 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
4623 if (POINTER_TYPE_P (TREE_TYPE (startvar)))
4624 t = fold_convert (signed_type_for (TREE_TYPE (startvar)), t);
4625 t = fold_convert (TREE_TYPE (startvar), t);
4626 t = force_gimple_operand_gsi (&gsi, t,
4627 DECL_P (startvar)
4628 && TREE_ADDRESSABLE (startvar),
4629 NULL_TREE, false, GSI_CONTINUE_LINKING);
4630 stmt = gimple_build_assign (startvar, t);
4631 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4632
4633 t = iend0;
4634 if (bias)
4635 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
4636 if (POINTER_TYPE_P (TREE_TYPE (startvar)))
4637 t = fold_convert (signed_type_for (TREE_TYPE (startvar)), t);
4638 t = fold_convert (TREE_TYPE (startvar), t);
4639 iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4640 false, GSI_CONTINUE_LINKING);
4641 if (endvar)
4642 {
4643 stmt = gimple_build_assign (endvar, iend);
4644 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4645 }
4646 if (fd->collapse > 1)
4647 expand_omp_for_init_vars (fd, &gsi, counts, startvar);
4648
4649 if (!broken_loop)
4650 {
4651 /* Code to control the increment and predicate for the sequential
4652 loop goes in the CONT_BB. */
4653 gsi = gsi_last_bb (cont_bb);
4654 stmt = gsi_stmt (gsi);
4655 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4656 vmain = gimple_omp_continue_control_use (stmt);
4657 vback = gimple_omp_continue_control_def (stmt);
4658
4659 /* OMP4 placeholder: if (!gimple_omp_for_combined_p (fd->for_stmt)). */
4660 if (1)
4661 {
4662 if (POINTER_TYPE_P (type))
4663 t = fold_build_pointer_plus (vmain, fd->loop.step);
4664 else
4665 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
4666 t = force_gimple_operand_gsi (&gsi, t,
4667 DECL_P (vback)
4668 && TREE_ADDRESSABLE (vback),
4669 NULL_TREE, true, GSI_SAME_STMT);
4670 stmt = gimple_build_assign (vback, t);
4671 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4672
4673 t = build2 (fd->loop.cond_code, boolean_type_node,
4674 DECL_P (vback) && TREE_ADDRESSABLE (vback) ? t : vback,
4675 iend);
4676 stmt = gimple_build_cond_empty (t);
4677 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4678 }
4679
4680 /* Remove GIMPLE_OMP_CONTINUE. */
4681 gsi_remove (&gsi, true);
4682
4683 if (fd->collapse > 1)
4684 collapse_bb = extract_omp_for_update_vars (fd, cont_bb, l1_bb);
4685
4686 /* Emit code to get the next parallel iteration in L2_BB. */
4687 gsi = gsi_start_bb (l2_bb);
4688
4689 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
4690 build_fold_addr_expr (istart0),
4691 build_fold_addr_expr (iend0));
4692 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4693 false, GSI_CONTINUE_LINKING);
4694 if (TREE_TYPE (t) != boolean_type_node)
4695 t = fold_build2 (NE_EXPR, boolean_type_node,
4696 t, build_int_cst (TREE_TYPE (t), 0));
4697 stmt = gimple_build_cond_empty (t);
4698 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4699 }
4700
4701 /* Add the loop cleanup function. */
4702 gsi = gsi_last_bb (exit_bb);
4703 if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4704 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_NOWAIT);
4705 else
4706 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END);
4707 stmt = gimple_build_call (t, 0);
4708 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
4709 gsi_remove (&gsi, true);
4710
4711 /* Connect the new blocks. */
4712 find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
4713 find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
4714
4715 if (!broken_loop)
4716 {
4717 gimple_seq phis;
4718
4719 e = find_edge (cont_bb, l3_bb);
4720 ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
4721
4722 phis = phi_nodes (l3_bb);
4723 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
4724 {
4725 gimple phi = gsi_stmt (gsi);
4726 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
4727 PHI_ARG_DEF_FROM_EDGE (phi, e));
4728 }
4729 remove_edge (e);
4730
4731 make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
4732 if (current_loops)
4733 add_bb_to_loop (l2_bb, cont_bb->loop_father);
4734 e = find_edge (cont_bb, l1_bb);
4735 /* OMP4 placeholder for gimple_omp_for_combined_p (fd->for_stmt). */
4736 if (0)
4737 ;
4738 else if (fd->collapse > 1)
4739 {
4740 remove_edge (e);
4741 e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
4742 }
4743 else
4744 e->flags = EDGE_TRUE_VALUE;
4745 if (e)
4746 {
4747 e->probability = REG_BR_PROB_BASE * 7 / 8;
4748 find_edge (cont_bb, l2_bb)->probability = REG_BR_PROB_BASE / 8;
4749 }
4750 else
4751 {
4752 e = find_edge (cont_bb, l2_bb);
4753 e->flags = EDGE_FALLTHRU;
4754 }
4755 make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
4756
4757 set_immediate_dominator (CDI_DOMINATORS, l2_bb,
4758 recompute_dominator (CDI_DOMINATORS, l2_bb));
4759 set_immediate_dominator (CDI_DOMINATORS, l3_bb,
4760 recompute_dominator (CDI_DOMINATORS, l3_bb));
4761 set_immediate_dominator (CDI_DOMINATORS, l0_bb,
4762 recompute_dominator (CDI_DOMINATORS, l0_bb));
4763 set_immediate_dominator (CDI_DOMINATORS, l1_bb,
4764 recompute_dominator (CDI_DOMINATORS, l1_bb));
4765
4766 struct loop *outer_loop = alloc_loop ();
4767 outer_loop->header = l0_bb;
4768 outer_loop->latch = l2_bb;
4769 add_loop (outer_loop, l0_bb->loop_father);
4770
4771 /* OMP4 placeholder: if (!gimple_omp_for_combined_p (fd->for_stmt)). */
4772 if (1)
4773 {
4774 struct loop *loop = alloc_loop ();
4775 loop->header = l1_bb;
4776 /* The loop may have multiple latches. */
4777 add_loop (loop, outer_loop);
4778 }
4779 }
4780 }
4781
4782
4783 /* A subroutine of expand_omp_for. Generate code for a parallel
4784 loop with static schedule and no specified chunk size. Given
4785 parameters:
4786
4787 for (V = N1; V cond N2; V += STEP) BODY;
4788
4789 where COND is "<" or ">", we generate pseudocode
4790
4791 if ((__typeof (V)) -1 > 0 && N2 cond N1) goto L2;
4792 if (cond is <)
4793 adj = STEP - 1;
4794 else
4795 adj = STEP + 1;
4796 if ((__typeof (V)) -1 > 0 && cond is >)
4797 n = -(adj + N2 - N1) / -STEP;
4798 else
4799 n = (adj + N2 - N1) / STEP;
4800 q = n / nthreads;
4801 tt = n % nthreads;
4802 if (threadid < tt) goto L3; else goto L4;
4803 L3:
4804 tt = 0;
4805 q = q + 1;
4806 L4:
4807 s0 = q * threadid + tt;
4808 e0 = s0 + q;
4809 V = s0 * STEP + N1;
4810 if (s0 >= e0) goto L2; else goto L0;
4811 L0:
4812 e = e0 * STEP + N1;
4813 L1:
4814 BODY;
4815 V += STEP;
4816 if (V cond e) goto L1;
4817 L2:
4818 */
4819
4820 static void
4821 expand_omp_for_static_nochunk (struct omp_region *region,
4822 struct omp_for_data *fd)
4823 {
4824 tree n, q, s0, e0, e, t, tt, nthreads, threadid;
4825 tree type, itype, vmain, vback;
4826 basic_block entry_bb, second_bb, third_bb, exit_bb, seq_start_bb;
4827 basic_block body_bb, cont_bb;
4828 basic_block fin_bb;
4829 gimple_stmt_iterator gsi;
4830 gimple stmt;
4831 edge ep;
4832
4833 itype = type = TREE_TYPE (fd->loop.v);
4834 if (POINTER_TYPE_P (type))
4835 itype = signed_type_for (type);
4836
4837 entry_bb = region->entry;
4838 cont_bb = region->cont;
4839 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
4840 gcc_assert (BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
4841 seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
4842 body_bb = single_succ (seq_start_bb);
4843 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4844 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4845 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4846 exit_bb = region->exit;
4847
4848 /* Iteration space partitioning goes in ENTRY_BB. */
4849 gsi = gsi_last_bb (entry_bb);
4850 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4851
4852 t = fold_binary (fd->loop.cond_code, boolean_type_node,
4853 fold_convert (type, fd->loop.n1),
4854 fold_convert (type, fd->loop.n2));
4855 if (TYPE_UNSIGNED (type)
4856 && (t == NULL_TREE || !integer_onep (t)))
4857 {
4858 tree n1, n2;
4859 n1 = fold_convert (type, unshare_expr (fd->loop.n1));
4860 n1 = force_gimple_operand_gsi (&gsi, n1, true, NULL_TREE,
4861 true, GSI_SAME_STMT);
4862 n2 = fold_convert (type, unshare_expr (fd->loop.n2));
4863 n2 = force_gimple_operand_gsi (&gsi, n2, true, NULL_TREE,
4864 true, GSI_SAME_STMT);
4865 stmt = gimple_build_cond (fd->loop.cond_code, n1, n2,
4866 NULL_TREE, NULL_TREE);
4867 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4868 if (walk_tree (gimple_cond_lhs_ptr (stmt),
4869 expand_omp_regimplify_p, NULL, NULL)
4870 || walk_tree (gimple_cond_rhs_ptr (stmt),
4871 expand_omp_regimplify_p, NULL, NULL))
4872 {
4873 gsi = gsi_for_stmt (stmt);
4874 gimple_regimplify_operands (stmt, &gsi);
4875 }
4876 ep = split_block (entry_bb, stmt);
4877 ep->flags = EDGE_TRUE_VALUE;
4878 entry_bb = ep->dest;
4879 ep->probability = REG_BR_PROB_BASE - (REG_BR_PROB_BASE / 2000 - 1);
4880 ep = make_edge (ep->src, fin_bb, EDGE_FALSE_VALUE);
4881 ep->probability = REG_BR_PROB_BASE / 2000 - 1;
4882 if (gimple_in_ssa_p (cfun))
4883 {
4884 int dest_idx = find_edge (entry_bb, fin_bb)->dest_idx;
4885 for (gsi = gsi_start_phis (fin_bb);
4886 !gsi_end_p (gsi); gsi_next (&gsi))
4887 {
4888 gimple phi = gsi_stmt (gsi);
4889 add_phi_arg (phi, gimple_phi_arg_def (phi, dest_idx),
4890 ep, UNKNOWN_LOCATION);
4891 }
4892 }
4893 gsi = gsi_last_bb (entry_bb);
4894 }
4895
4896 t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS), 0);
4897 t = fold_convert (itype, t);
4898 nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4899 true, GSI_SAME_STMT);
4900
4901 t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM), 0);
4902 t = fold_convert (itype, t);
4903 threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4904 true, GSI_SAME_STMT);
4905
4906 fd->loop.n1
4907 = force_gimple_operand_gsi (&gsi, fold_convert (type, fd->loop.n1),
4908 true, NULL_TREE, true, GSI_SAME_STMT);
4909 fd->loop.n2
4910 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.n2),
4911 true, NULL_TREE, true, GSI_SAME_STMT);
4912 fd->loop.step
4913 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.step),
4914 true, NULL_TREE, true, GSI_SAME_STMT);
4915
4916 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4917 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4918 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4919 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4920 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4921 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4922 fold_build1 (NEGATE_EXPR, itype, t),
4923 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4924 else
4925 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4926 t = fold_convert (itype, t);
4927 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4928
4929 q = create_tmp_reg (itype, "q");
4930 t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads);
4931 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
4932 gsi_insert_before (&gsi, gimple_build_assign (q, t), GSI_SAME_STMT);
4933
4934 tt = create_tmp_reg (itype, "tt");
4935 t = fold_build2 (TRUNC_MOD_EXPR, itype, n, nthreads);
4936 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
4937 gsi_insert_before (&gsi, gimple_build_assign (tt, t), GSI_SAME_STMT);
4938
4939 t = build2 (LT_EXPR, boolean_type_node, threadid, tt);
4940 stmt = gimple_build_cond_empty (t);
4941 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4942
4943 second_bb = split_block (entry_bb, stmt)->dest;
4944 gsi = gsi_last_bb (second_bb);
4945 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4946
4947 gsi_insert_before (&gsi, gimple_build_assign (tt, build_int_cst (itype, 0)),
4948 GSI_SAME_STMT);
4949 stmt = gimple_build_assign_with_ops (PLUS_EXPR, q, q,
4950 build_int_cst (itype, 1));
4951 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4952
4953 third_bb = split_block (second_bb, stmt)->dest;
4954 gsi = gsi_last_bb (third_bb);
4955 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4956
4957 t = build2 (MULT_EXPR, itype, q, threadid);
4958 t = build2 (PLUS_EXPR, itype, t, tt);
4959 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4960
4961 t = fold_build2 (PLUS_EXPR, itype, s0, q);
4962 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4963
4964 t = build2 (GE_EXPR, boolean_type_node, s0, e0);
4965 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4966
4967 /* Remove the GIMPLE_OMP_FOR statement. */
4968 gsi_remove (&gsi, true);
4969
4970 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4971 gsi = gsi_start_bb (seq_start_bb);
4972
4973 t = fold_convert (itype, s0);
4974 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4975 if (POINTER_TYPE_P (type))
4976 t = fold_build_pointer_plus (fd->loop.n1, t);
4977 else
4978 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4979 t = force_gimple_operand_gsi (&gsi, t,
4980 DECL_P (fd->loop.v)
4981 && TREE_ADDRESSABLE (fd->loop.v),
4982 NULL_TREE, false, GSI_CONTINUE_LINKING);
4983 stmt = gimple_build_assign (fd->loop.v, t);
4984 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4985
4986 t = fold_convert (itype, e0);
4987 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4988 if (POINTER_TYPE_P (type))
4989 t = fold_build_pointer_plus (fd->loop.n1, t);
4990 else
4991 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4992 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4993 false, GSI_CONTINUE_LINKING);
4994
4995 /* The code controlling the sequential loop replaces the
4996 GIMPLE_OMP_CONTINUE. */
4997 gsi = gsi_last_bb (cont_bb);
4998 stmt = gsi_stmt (gsi);
4999 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
5000 vmain = gimple_omp_continue_control_use (stmt);
5001 vback = gimple_omp_continue_control_def (stmt);
5002
5003 if (POINTER_TYPE_P (type))
5004 t = fold_build_pointer_plus (vmain, fd->loop.step);
5005 else
5006 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
5007 t = force_gimple_operand_gsi (&gsi, t,
5008 DECL_P (vback) && TREE_ADDRESSABLE (vback),
5009 NULL_TREE, true, GSI_SAME_STMT);
5010 stmt = gimple_build_assign (vback, t);
5011 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
5012
5013 t = build2 (fd->loop.cond_code, boolean_type_node,
5014 DECL_P (vback) && TREE_ADDRESSABLE (vback) ? t : vback, e);
5015 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
5016
5017 /* Remove the GIMPLE_OMP_CONTINUE statement. */
5018 gsi_remove (&gsi, true);
5019
5020 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
5021 gsi = gsi_last_bb (exit_bb);
5022 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
5023 force_gimple_operand_gsi (&gsi, build_omp_barrier (), false, NULL_TREE,
5024 false, GSI_SAME_STMT);
5025 gsi_remove (&gsi, true);
5026
5027 /* Connect all the blocks. */
5028 ep = make_edge (entry_bb, third_bb, EDGE_FALSE_VALUE);
5029 ep->probability = REG_BR_PROB_BASE / 4 * 3;
5030 ep = find_edge (entry_bb, second_bb);
5031 ep->flags = EDGE_TRUE_VALUE;
5032 ep->probability = REG_BR_PROB_BASE / 4;
5033 find_edge (third_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
5034 find_edge (third_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
5035
5036 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
5037 find_edge (cont_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
5038
5039 set_immediate_dominator (CDI_DOMINATORS, second_bb, entry_bb);
5040 set_immediate_dominator (CDI_DOMINATORS, third_bb, entry_bb);
5041 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, third_bb);
5042 set_immediate_dominator (CDI_DOMINATORS, body_bb,
5043 recompute_dominator (CDI_DOMINATORS, body_bb));
5044 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
5045 recompute_dominator (CDI_DOMINATORS, fin_bb));
5046
5047 struct loop *loop = alloc_loop ();
5048 loop->header = body_bb;
5049 loop->latch = cont_bb;
5050 add_loop (loop, body_bb->loop_father);
5051 }
5052
5053
5054 /* A subroutine of expand_omp_for. Generate code for a parallel
5055 loop with static schedule and a specified chunk size. Given
5056 parameters:
5057
5058 for (V = N1; V cond N2; V += STEP) BODY;
5059
5060 where COND is "<" or ">", we generate pseudocode
5061
5062 if ((__typeof (V)) -1 > 0 && N2 cond N1) goto L2;
5063 if (cond is <)
5064 adj = STEP - 1;
5065 else
5066 adj = STEP + 1;
5067 if ((__typeof (V)) -1 > 0 && cond is >)
5068 n = -(adj + N2 - N1) / -STEP;
5069 else
5070 n = (adj + N2 - N1) / STEP;
5071 trip = 0;
5072 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
5073 here so that V is defined
5074 if the loop is not entered
5075 L0:
5076 s0 = (trip * nthreads + threadid) * CHUNK;
5077 e0 = min(s0 + CHUNK, n);
5078 if (s0 < n) goto L1; else goto L4;
5079 L1:
5080 V = s0 * STEP + N1;
5081 e = e0 * STEP + N1;
5082 L2:
5083 BODY;
5084 V += STEP;
5085 if (V cond e) goto L2; else goto L3;
5086 L3:
5087 trip += 1;
5088 goto L0;
5089 L4:
5090 */
5091
5092 static void
5093 expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
5094 {
5095 tree n, s0, e0, e, t;
5096 tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
5097 tree type, itype, v_main, v_back, v_extra;
5098 basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
5099 basic_block trip_update_bb, cont_bb, fin_bb;
5100 gimple_stmt_iterator si;
5101 gimple stmt;
5102 edge se;
5103
5104 itype = type = TREE_TYPE (fd->loop.v);
5105 if (POINTER_TYPE_P (type))
5106 itype = signed_type_for (type);
5107
5108 entry_bb = region->entry;
5109 se = split_block (entry_bb, last_stmt (entry_bb));
5110 entry_bb = se->src;
5111 iter_part_bb = se->dest;
5112 cont_bb = region->cont;
5113 gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
5114 gcc_assert (BRANCH_EDGE (iter_part_bb)->dest
5115 == FALLTHRU_EDGE (cont_bb)->dest);
5116 seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
5117 body_bb = single_succ (seq_start_bb);
5118 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
5119 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
5120 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
5121 trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
5122 exit_bb = region->exit;
5123
5124 /* Trip and adjustment setup goes in ENTRY_BB. */
5125 si = gsi_last_bb (entry_bb);
5126 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_FOR);
5127
5128 t = fold_binary (fd->loop.cond_code, boolean_type_node,
5129 fold_convert (type, fd->loop.n1),
5130 fold_convert (type, fd->loop.n2));
5131 if (TYPE_UNSIGNED (type)
5132 && (t == NULL_TREE || !integer_onep (t)))
5133 {
5134 tree n1, n2;
5135 n1 = fold_convert (type, unshare_expr (fd->loop.n1));
5136 n1 = force_gimple_operand_gsi (&si, n1, true, NULL_TREE,
5137 true, GSI_SAME_STMT);
5138 n2 = fold_convert (type, unshare_expr (fd->loop.n2));
5139 n2 = force_gimple_operand_gsi (&si, n2, true, NULL_TREE,
5140 true, GSI_SAME_STMT);
5141 stmt = gimple_build_cond (fd->loop.cond_code, n1, n2,
5142 NULL_TREE, NULL_TREE);
5143 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5144 if (walk_tree (gimple_cond_lhs_ptr (stmt),
5145 expand_omp_regimplify_p, NULL, NULL)
5146 || walk_tree (gimple_cond_rhs_ptr (stmt),
5147 expand_omp_regimplify_p, NULL, NULL))
5148 {
5149 si = gsi_for_stmt (stmt);
5150 gimple_regimplify_operands (stmt, &si);
5151 }
5152 se = split_block (entry_bb, stmt);
5153 se->flags = EDGE_TRUE_VALUE;
5154 entry_bb = se->dest;
5155 se->probability = REG_BR_PROB_BASE - (REG_BR_PROB_BASE / 2000 - 1);
5156 se = make_edge (se->src, fin_bb, EDGE_FALSE_VALUE);
5157 se->probability = REG_BR_PROB_BASE / 2000 - 1;
5158 if (gimple_in_ssa_p (cfun))
5159 {
5160 int dest_idx = find_edge (entry_bb, fin_bb)->dest_idx;
5161 for (si = gsi_start_phis (fin_bb);
5162 !gsi_end_p (si); gsi_next (&si))
5163 {
5164 gimple phi = gsi_stmt (si);
5165 add_phi_arg (phi, gimple_phi_arg_def (phi, dest_idx),
5166 se, UNKNOWN_LOCATION);
5167 }
5168 }
5169 si = gsi_last_bb (entry_bb);
5170 }
5171
5172 t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS), 0);
5173 t = fold_convert (itype, t);
5174 nthreads = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
5175 true, GSI_SAME_STMT);
5176
5177 t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM), 0);
5178 t = fold_convert (itype, t);
5179 threadid = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
5180 true, GSI_SAME_STMT);
5181
5182 fd->loop.n1
5183 = force_gimple_operand_gsi (&si, fold_convert (type, fd->loop.n1),
5184 true, NULL_TREE, true, GSI_SAME_STMT);
5185 fd->loop.n2
5186 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.n2),
5187 true, NULL_TREE, true, GSI_SAME_STMT);
5188 fd->loop.step
5189 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.step),
5190 true, NULL_TREE, true, GSI_SAME_STMT);
5191 fd->chunk_size
5192 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->chunk_size),
5193 true, NULL_TREE, true, GSI_SAME_STMT);
5194
5195 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
5196 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
5197 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
5198 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
5199 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
5200 t = fold_build2 (TRUNC_DIV_EXPR, itype,
5201 fold_build1 (NEGATE_EXPR, itype, t),
5202 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
5203 else
5204 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
5205 t = fold_convert (itype, t);
5206 n = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
5207 true, GSI_SAME_STMT);
5208
5209 trip_var = create_tmp_reg (itype, ".trip");
5210 if (gimple_in_ssa_p (cfun))
5211 {
5212 trip_init = make_ssa_name (trip_var, NULL);
5213 trip_main = make_ssa_name (trip_var, NULL);
5214 trip_back = make_ssa_name (trip_var, NULL);
5215 }
5216 else
5217 {
5218 trip_init = trip_var;
5219 trip_main = trip_var;
5220 trip_back = trip_var;
5221 }
5222
5223 stmt = gimple_build_assign (trip_init, build_int_cst (itype, 0));
5224 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5225
5226 t = fold_build2 (MULT_EXPR, itype, threadid, fd->chunk_size);
5227 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
5228 if (POINTER_TYPE_P (type))
5229 t = fold_build_pointer_plus (fd->loop.n1, t);
5230 else
5231 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
5232 v_extra = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
5233 true, GSI_SAME_STMT);
5234
5235 /* Remove the GIMPLE_OMP_FOR. */
5236 gsi_remove (&si, true);
5237
5238 /* Iteration space partitioning goes in ITER_PART_BB. */
5239 si = gsi_last_bb (iter_part_bb);
5240
5241 t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads);
5242 t = fold_build2 (PLUS_EXPR, itype, t, threadid);
5243 t = fold_build2 (MULT_EXPR, itype, t, fd->chunk_size);
5244 s0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
5245 false, GSI_CONTINUE_LINKING);
5246
5247 t = fold_build2 (PLUS_EXPR, itype, s0, fd->chunk_size);
5248 t = fold_build2 (MIN_EXPR, itype, t, n);
5249 e0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
5250 false, GSI_CONTINUE_LINKING);
5251
5252 t = build2 (LT_EXPR, boolean_type_node, s0, n);
5253 gsi_insert_after (&si, gimple_build_cond_empty (t), GSI_CONTINUE_LINKING);
5254
5255 /* Setup code for sequential iteration goes in SEQ_START_BB. */
5256 si = gsi_start_bb (seq_start_bb);
5257
5258 t = fold_convert (itype, s0);
5259 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
5260 if (POINTER_TYPE_P (type))
5261 t = fold_build_pointer_plus (fd->loop.n1, t);
5262 else
5263 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
5264 t = force_gimple_operand_gsi (&si, t,
5265 DECL_P (fd->loop.v)
5266 && TREE_ADDRESSABLE (fd->loop.v),
5267 NULL_TREE, false, GSI_CONTINUE_LINKING);
5268 stmt = gimple_build_assign (fd->loop.v, t);
5269 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
5270
5271 t = fold_convert (itype, e0);
5272 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
5273 if (POINTER_TYPE_P (type))
5274 t = fold_build_pointer_plus (fd->loop.n1, t);
5275 else
5276 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
5277 e = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
5278 false, GSI_CONTINUE_LINKING);
5279
5280 /* The code controlling the sequential loop goes in CONT_BB,
5281 replacing the GIMPLE_OMP_CONTINUE. */
5282 si = gsi_last_bb (cont_bb);
5283 stmt = gsi_stmt (si);
5284 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
5285 v_main = gimple_omp_continue_control_use (stmt);
5286 v_back = gimple_omp_continue_control_def (stmt);
5287
5288 if (POINTER_TYPE_P (type))
5289 t = fold_build_pointer_plus (v_main, fd->loop.step);
5290 else
5291 t = fold_build2 (PLUS_EXPR, type, v_main, fd->loop.step);
5292 if (DECL_P (v_back) && TREE_ADDRESSABLE (v_back))
5293 t = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
5294 true, GSI_SAME_STMT);
5295 stmt = gimple_build_assign (v_back, t);
5296 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5297
5298 t = build2 (fd->loop.cond_code, boolean_type_node,
5299 DECL_P (v_back) && TREE_ADDRESSABLE (v_back)
5300 ? t : v_back, e);
5301 gsi_insert_before (&si, gimple_build_cond_empty (t), GSI_SAME_STMT);
5302
5303 /* Remove GIMPLE_OMP_CONTINUE. */
5304 gsi_remove (&si, true);
5305
5306 /* Trip update code goes into TRIP_UPDATE_BB. */
5307 si = gsi_start_bb (trip_update_bb);
5308
5309 t = build_int_cst (itype, 1);
5310 t = build2 (PLUS_EXPR, itype, trip_main, t);
5311 stmt = gimple_build_assign (trip_back, t);
5312 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
5313
5314 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
5315 si = gsi_last_bb (exit_bb);
5316 if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
5317 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
5318 false, GSI_SAME_STMT);
5319 gsi_remove (&si, true);
5320
5321 /* Connect the new blocks. */
5322 find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
5323 find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
5324
5325 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
5326 find_edge (cont_bb, trip_update_bb)->flags = EDGE_FALSE_VALUE;
5327
5328 redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
5329
5330 if (gimple_in_ssa_p (cfun))
5331 {
5332 gimple_stmt_iterator psi;
5333 gimple phi;
5334 edge re, ene;
5335 edge_var_map_vector *head;
5336 edge_var_map *vm;
5337 size_t i;
5338
5339 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
5340 remove arguments of the phi nodes in fin_bb. We need to create
5341 appropriate phi nodes in iter_part_bb instead. */
5342 se = single_pred_edge (fin_bb);
5343 re = single_succ_edge (trip_update_bb);
5344 head = redirect_edge_var_map_vector (re);
5345 ene = single_succ_edge (entry_bb);
5346
5347 psi = gsi_start_phis (fin_bb);
5348 for (i = 0; !gsi_end_p (psi) && head->iterate (i, &vm);
5349 gsi_next (&psi), ++i)
5350 {
5351 gimple nphi;
5352 source_location locus;
5353
5354 phi = gsi_stmt (psi);
5355 t = gimple_phi_result (phi);
5356 gcc_assert (t == redirect_edge_var_map_result (vm));
5357 nphi = create_phi_node (t, iter_part_bb);
5358
5359 t = PHI_ARG_DEF_FROM_EDGE (phi, se);
5360 locus = gimple_phi_arg_location_from_edge (phi, se);
5361
5362 /* A special case -- fd->loop.v is not yet computed in
5363 iter_part_bb, we need to use v_extra instead. */
5364 if (t == fd->loop.v)
5365 t = v_extra;
5366 add_phi_arg (nphi, t, ene, locus);
5367 locus = redirect_edge_var_map_location (vm);
5368 add_phi_arg (nphi, redirect_edge_var_map_def (vm), re, locus);
5369 }
5370 gcc_assert (!gsi_end_p (psi) && i == head->length ());
5371 redirect_edge_var_map_clear (re);
5372 while (1)
5373 {
5374 psi = gsi_start_phis (fin_bb);
5375 if (gsi_end_p (psi))
5376 break;
5377 remove_phi_node (&psi, false);
5378 }
5379
5380 /* Make phi node for trip. */
5381 phi = create_phi_node (trip_main, iter_part_bb);
5382 add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb),
5383 UNKNOWN_LOCATION);
5384 add_phi_arg (phi, trip_init, single_succ_edge (entry_bb),
5385 UNKNOWN_LOCATION);
5386 }
5387
5388 set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
5389 set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
5390 recompute_dominator (CDI_DOMINATORS, iter_part_bb));
5391 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
5392 recompute_dominator (CDI_DOMINATORS, fin_bb));
5393 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
5394 recompute_dominator (CDI_DOMINATORS, seq_start_bb));
5395 set_immediate_dominator (CDI_DOMINATORS, body_bb,
5396 recompute_dominator (CDI_DOMINATORS, body_bb));
5397
5398 struct loop *trip_loop = alloc_loop ();
5399 trip_loop->header = iter_part_bb;
5400 trip_loop->latch = trip_update_bb;
5401 add_loop (trip_loop, iter_part_bb->loop_father);
5402
5403 struct loop *loop = alloc_loop ();
5404 loop->header = body_bb;
5405 loop->latch = cont_bb;
5406 add_loop (loop, trip_loop);
5407 }
5408
5409 /* A subroutine of expand_omp_for. Generate code for a simd non-worksharing
5410 loop. Given parameters:
5411
5412 for (V = N1; V cond N2; V += STEP) BODY;
5413
5414 where COND is "<" or ">", we generate pseudocode
5415
5416 V = N1;
5417 goto L1;
5418 L0:
5419 BODY;
5420 V += STEP;
5421 L1:
5422 if (V cond N2) goto L0; else goto L2;
5423 L2:
5424
5425 For collapsed loops, given parameters:
5426 collapse(3)
5427 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
5428 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
5429 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
5430 BODY;
5431
5432 we generate pseudocode
5433
5434 if (cond3 is <)
5435 adj = STEP3 - 1;
5436 else
5437 adj = STEP3 + 1;
5438 count3 = (adj + N32 - N31) / STEP3;
5439 if (cond2 is <)
5440 adj = STEP2 - 1;
5441 else
5442 adj = STEP2 + 1;
5443 count2 = (adj + N22 - N21) / STEP2;
5444 if (cond1 is <)
5445 adj = STEP1 - 1;
5446 else
5447 adj = STEP1 + 1;
5448 count1 = (adj + N12 - N11) / STEP1;
5449 count = count1 * count2 * count3;
5450 V = 0;
5451 V1 = N11;
5452 V2 = N21;
5453 V3 = N31;
5454 goto L1;
5455 L0:
5456 BODY;
5457 V += 1;
5458 V3 += STEP3;
5459 V2 += (V3 cond3 N32) ? 0 : STEP2;
5460 V3 = (V3 cond3 N32) ? V3 : N31;
5461 V1 += (V2 cond2 N22) ? 0 : STEP1;
5462 V2 = (V2 cond2 N22) ? V2 : N21;
5463 L1:
5464 if (V < count) goto L0; else goto L2;
5465 L2:
5466
5467 */
5468
5469 static void
5470 expand_omp_simd (struct omp_region *region, struct omp_for_data *fd)
5471 {
5472 tree type, t;
5473 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, l2_bb, l2_dom_bb;
5474 gimple_stmt_iterator gsi;
5475 gimple stmt;
5476 bool broken_loop = region->cont == NULL;
5477 edge e, ne;
5478 tree *counts = NULL;
5479 int i;
5480 tree safelen = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
5481 OMP_CLAUSE_SAFELEN);
5482 tree simduid = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
5483 OMP_CLAUSE__SIMDUID_);
5484 tree n2;
5485
5486 type = TREE_TYPE (fd->loop.v);
5487 entry_bb = region->entry;
5488 cont_bb = region->cont;
5489 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
5490 gcc_assert (broken_loop
5491 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
5492 l0_bb = FALLTHRU_EDGE (entry_bb)->dest;
5493 if (!broken_loop)
5494 {
5495 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l0_bb);
5496 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
5497 l1_bb = split_block (cont_bb, last_stmt (cont_bb))->dest;
5498 l2_bb = BRANCH_EDGE (entry_bb)->dest;
5499 }
5500 else
5501 {
5502 BRANCH_EDGE (entry_bb)->flags &= ~EDGE_ABNORMAL;
5503 l1_bb = split_edge (BRANCH_EDGE (entry_bb));
5504 l2_bb = single_succ (l1_bb);
5505 }
5506 exit_bb = region->exit;
5507 l2_dom_bb = NULL;
5508
5509 gsi = gsi_last_bb (entry_bb);
5510
5511 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
5512 /* Not needed in SSA form right now. */
5513 gcc_assert (!gimple_in_ssa_p (cfun));
5514 if (fd->collapse > 1)
5515 {
5516 int first_zero_iter = -1;
5517 basic_block zero_iter_bb = l2_bb;
5518
5519 counts = XALLOCAVEC (tree, fd->collapse);
5520 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
5521 zero_iter_bb, first_zero_iter,
5522 l2_dom_bb);
5523 }
5524 if (l2_dom_bb == NULL)
5525 l2_dom_bb = l1_bb;
5526
5527 n2 = fd->loop.n2;
5528 if (0)
5529 /* Place holder for gimple_omp_for_combined_into_p() in
5530 the upcoming gomp-4_0-branch merge. */;
5531 else
5532 {
5533 expand_omp_build_assign (&gsi, fd->loop.v,
5534 fold_convert (type, fd->loop.n1));
5535 if (fd->collapse > 1)
5536 for (i = 0; i < fd->collapse; i++)
5537 {
5538 tree itype = TREE_TYPE (fd->loops[i].v);
5539 if (POINTER_TYPE_P (itype))
5540 itype = signed_type_for (itype);
5541 t = fold_convert (TREE_TYPE (fd->loops[i].v), fd->loops[i].n1);
5542 expand_omp_build_assign (&gsi, fd->loops[i].v, t);
5543 }
5544 }
5545
5546 /* Remove the GIMPLE_OMP_FOR statement. */
5547 gsi_remove (&gsi, true);
5548
5549 if (!broken_loop)
5550 {
5551 /* Code to control the increment goes in the CONT_BB. */
5552 gsi = gsi_last_bb (cont_bb);
5553 stmt = gsi_stmt (gsi);
5554 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
5555
5556 if (POINTER_TYPE_P (type))
5557 t = fold_build_pointer_plus (fd->loop.v, fd->loop.step);
5558 else
5559 t = fold_build2 (PLUS_EXPR, type, fd->loop.v, fd->loop.step);
5560 expand_omp_build_assign (&gsi, fd->loop.v, t);
5561
5562 if (fd->collapse > 1)
5563 {
5564 i = fd->collapse - 1;
5565 if (POINTER_TYPE_P (TREE_TYPE (fd->loops[i].v)))
5566 {
5567 t = fold_convert (sizetype, fd->loops[i].step);
5568 t = fold_build_pointer_plus (fd->loops[i].v, t);
5569 }
5570 else
5571 {
5572 t = fold_convert (TREE_TYPE (fd->loops[i].v),
5573 fd->loops[i].step);
5574 t = fold_build2 (PLUS_EXPR, TREE_TYPE (fd->loops[i].v),
5575 fd->loops[i].v, t);
5576 }
5577 expand_omp_build_assign (&gsi, fd->loops[i].v, t);
5578
5579 for (i = fd->collapse - 1; i > 0; i--)
5580 {
5581 tree itype = TREE_TYPE (fd->loops[i].v);
5582 tree itype2 = TREE_TYPE (fd->loops[i - 1].v);
5583 if (POINTER_TYPE_P (itype2))
5584 itype2 = signed_type_for (itype2);
5585 t = build3 (COND_EXPR, itype2,
5586 build2 (fd->loops[i].cond_code, boolean_type_node,
5587 fd->loops[i].v,
5588 fold_convert (itype, fd->loops[i].n2)),
5589 build_int_cst (itype2, 0),
5590 fold_convert (itype2, fd->loops[i - 1].step));
5591 if (POINTER_TYPE_P (TREE_TYPE (fd->loops[i - 1].v)))
5592 t = fold_build_pointer_plus (fd->loops[i - 1].v, t);
5593 else
5594 t = fold_build2 (PLUS_EXPR, itype2, fd->loops[i - 1].v, t);
5595 expand_omp_build_assign (&gsi, fd->loops[i - 1].v, t);
5596
5597 t = build3 (COND_EXPR, itype,
5598 build2 (fd->loops[i].cond_code, boolean_type_node,
5599 fd->loops[i].v,
5600 fold_convert (itype, fd->loops[i].n2)),
5601 fd->loops[i].v,
5602 fold_convert (itype, fd->loops[i].n1));
5603 expand_omp_build_assign (&gsi, fd->loops[i].v, t);
5604 }
5605 }
5606
5607 /* Remove GIMPLE_OMP_CONTINUE. */
5608 gsi_remove (&gsi, true);
5609 }
5610
5611 /* Emit the condition in L1_BB. */
5612 gsi = gsi_start_bb (l1_bb);
5613
5614 t = fold_convert (type, n2);
5615 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5616 false, GSI_CONTINUE_LINKING);
5617 t = build2 (fd->loop.cond_code, boolean_type_node, fd->loop.v, t);
5618 stmt = gimple_build_cond_empty (t);
5619 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5620 if (walk_tree (gimple_cond_lhs_ptr (stmt), expand_omp_regimplify_p,
5621 NULL, NULL)
5622 || walk_tree (gimple_cond_rhs_ptr (stmt), expand_omp_regimplify_p,
5623 NULL, NULL))
5624 {
5625 gsi = gsi_for_stmt (stmt);
5626 gimple_regimplify_operands (stmt, &gsi);
5627 }
5628
5629 /* Remove GIMPLE_OMP_RETURN. */
5630 gsi = gsi_last_bb (exit_bb);
5631 gsi_remove (&gsi, true);
5632
5633 /* Connect the new blocks. */
5634 remove_edge (FALLTHRU_EDGE (entry_bb));
5635
5636 if (!broken_loop)
5637 {
5638 remove_edge (BRANCH_EDGE (entry_bb));
5639 make_edge (entry_bb, l1_bb, EDGE_FALLTHRU);
5640
5641 e = BRANCH_EDGE (l1_bb);
5642 ne = FALLTHRU_EDGE (l1_bb);
5643 e->flags = EDGE_TRUE_VALUE;
5644 }
5645 else
5646 {
5647 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
5648
5649 ne = single_succ_edge (l1_bb);
5650 e = make_edge (l1_bb, l0_bb, EDGE_TRUE_VALUE);
5651
5652 }
5653 ne->flags = EDGE_FALSE_VALUE;
5654 e->probability = REG_BR_PROB_BASE * 7 / 8;
5655 ne->probability = REG_BR_PROB_BASE / 8;
5656
5657 set_immediate_dominator (CDI_DOMINATORS, l1_bb, entry_bb);
5658 set_immediate_dominator (CDI_DOMINATORS, l2_bb, l2_dom_bb);
5659 set_immediate_dominator (CDI_DOMINATORS, l0_bb, l1_bb);
5660
5661 if (!broken_loop)
5662 {
5663 struct loop *loop = alloc_loop ();
5664 loop->header = l1_bb;
5665 loop->latch = e->dest;
5666 add_loop (loop, l1_bb->loop_father);
5667 if (safelen == NULL_TREE)
5668 loop->safelen = INT_MAX;
5669 else
5670 {
5671 safelen = OMP_CLAUSE_SAFELEN_EXPR (safelen);
5672 if (!host_integerp (safelen, 1)
5673 || (unsigned HOST_WIDE_INT) tree_low_cst (safelen, 1)
5674 > INT_MAX)
5675 loop->safelen = INT_MAX;
5676 else
5677 loop->safelen = tree_low_cst (safelen, 1);
5678 if (loop->safelen == 1)
5679 loop->safelen = 0;
5680 }
5681 if (simduid)
5682 {
5683 loop->simduid = OMP_CLAUSE__SIMDUID__DECL (simduid);
5684 cfun->has_simduid_loops = true;
5685 }
5686 /* If not -fno-tree-vectorize, hint that we want to vectorize
5687 the loop. */
5688 if ((flag_tree_vectorize
5689 || !global_options_set.x_flag_tree_vectorize)
5690 && loop->safelen > 1)
5691 {
5692 loop->force_vect = true;
5693 cfun->has_force_vect_loops = true;
5694 }
5695 }
5696 }
5697
5698
5699 /* Expand the OpenMP loop defined by REGION. */
5700
5701 static void
5702 expand_omp_for (struct omp_region *region)
5703 {
5704 struct omp_for_data fd;
5705 struct omp_for_data_loop *loops;
5706
5707 loops
5708 = (struct omp_for_data_loop *)
5709 alloca (gimple_omp_for_collapse (last_stmt (region->entry))
5710 * sizeof (struct omp_for_data_loop));
5711 extract_omp_for_data (last_stmt (region->entry), &fd, loops);
5712 region->sched_kind = fd.sched_kind;
5713
5714 gcc_assert (EDGE_COUNT (region->entry->succs) == 2);
5715 BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
5716 FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
5717 if (region->cont)
5718 {
5719 gcc_assert (EDGE_COUNT (region->cont->succs) == 2);
5720 BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
5721 FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
5722 }
5723 else
5724 /* If there isn't a continue then this is a degerate case where
5725 the introduction of abnormal edges during lowering will prevent
5726 original loops from being detected. Fix that up. */
5727 loops_state_set (LOOPS_NEED_FIXUP);
5728
5729 if (gimple_omp_for_kind (fd.for_stmt) == GF_OMP_FOR_KIND_SIMD)
5730 expand_omp_simd (region, &fd);
5731 else if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
5732 && !fd.have_ordered
5733 && fd.collapse == 1
5734 && region->cont != NULL)
5735 {
5736 if (fd.chunk_size == NULL)
5737 expand_omp_for_static_nochunk (region, &fd);
5738 else
5739 expand_omp_for_static_chunk (region, &fd);
5740 }
5741 else
5742 {
5743 int fn_index, start_ix, next_ix;
5744
5745 gcc_assert (gimple_omp_for_kind (fd.for_stmt)
5746 == GF_OMP_FOR_KIND_FOR);
5747 if (fd.chunk_size == NULL
5748 && fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
5749 fd.chunk_size = integer_zero_node;
5750 gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
5751 fn_index = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
5752 ? 3 : fd.sched_kind;
5753 fn_index += fd.have_ordered * 4;
5754 start_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_START) + fn_index;
5755 next_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_NEXT) + fn_index;
5756 if (fd.iter_type == long_long_unsigned_type_node)
5757 {
5758 start_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_START
5759 - (int)BUILT_IN_GOMP_LOOP_STATIC_START);
5760 next_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
5761 - (int)BUILT_IN_GOMP_LOOP_STATIC_NEXT);
5762 }
5763 expand_omp_for_generic (region, &fd, (enum built_in_function) start_ix,
5764 (enum built_in_function) next_ix);
5765 }
5766
5767 if (gimple_in_ssa_p (cfun))
5768 update_ssa (TODO_update_ssa_only_virtuals);
5769 }
5770
5771
5772 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
5773
5774 v = GOMP_sections_start (n);
5775 L0:
5776 switch (v)
5777 {
5778 case 0:
5779 goto L2;
5780 case 1:
5781 section 1;
5782 goto L1;
5783 case 2:
5784 ...
5785 case n:
5786 ...
5787 default:
5788 abort ();
5789 }
5790 L1:
5791 v = GOMP_sections_next ();
5792 goto L0;
5793 L2:
5794 reduction;
5795
5796 If this is a combined parallel sections, replace the call to
5797 GOMP_sections_start with call to GOMP_sections_next. */
5798
5799 static void
5800 expand_omp_sections (struct omp_region *region)
5801 {
5802 tree t, u, vin = NULL, vmain, vnext, l2;
5803 vec<tree> label_vec;
5804 unsigned len;
5805 basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
5806 gimple_stmt_iterator si, switch_si;
5807 gimple sections_stmt, stmt, cont;
5808 edge_iterator ei;
5809 edge e;
5810 struct omp_region *inner;
5811 unsigned i, casei;
5812 bool exit_reachable = region->cont != NULL;
5813
5814 gcc_assert (region->exit != NULL);
5815 entry_bb = region->entry;
5816 l0_bb = single_succ (entry_bb);
5817 l1_bb = region->cont;
5818 l2_bb = region->exit;
5819 if (single_pred_p (l2_bb) && single_pred (l2_bb) == l0_bb)
5820 l2 = gimple_block_label (l2_bb);
5821 else
5822 {
5823 /* This can happen if there are reductions. */
5824 len = EDGE_COUNT (l0_bb->succs);
5825 gcc_assert (len > 0);
5826 e = EDGE_SUCC (l0_bb, len - 1);
5827 si = gsi_last_bb (e->dest);
5828 l2 = NULL_TREE;
5829 if (gsi_end_p (si)
5830 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
5831 l2 = gimple_block_label (e->dest);
5832 else
5833 FOR_EACH_EDGE (e, ei, l0_bb->succs)
5834 {
5835 si = gsi_last_bb (e->dest);
5836 if (gsi_end_p (si)
5837 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
5838 {
5839 l2 = gimple_block_label (e->dest);
5840 break;
5841 }
5842 }
5843 }
5844 if (exit_reachable)
5845 default_bb = create_empty_bb (l1_bb->prev_bb);
5846 else
5847 default_bb = create_empty_bb (l0_bb);
5848
5849 /* We will build a switch() with enough cases for all the
5850 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
5851 and a default case to abort if something goes wrong. */
5852 len = EDGE_COUNT (l0_bb->succs);
5853
5854 /* Use vec::quick_push on label_vec throughout, since we know the size
5855 in advance. */
5856 label_vec.create (len);
5857
5858 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
5859 GIMPLE_OMP_SECTIONS statement. */
5860 si = gsi_last_bb (entry_bb);
5861 sections_stmt = gsi_stmt (si);
5862 gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS);
5863 vin = gimple_omp_sections_control (sections_stmt);
5864 if (!is_combined_parallel (region))
5865 {
5866 /* If we are not inside a combined parallel+sections region,
5867 call GOMP_sections_start. */
5868 t = build_int_cst (unsigned_type_node,
5869 exit_reachable ? len - 1 : len);
5870 u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_START);
5871 stmt = gimple_build_call (u, 1, t);
5872 }
5873 else
5874 {
5875 /* Otherwise, call GOMP_sections_next. */
5876 u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
5877 stmt = gimple_build_call (u, 0);
5878 }
5879 gimple_call_set_lhs (stmt, vin);
5880 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
5881 gsi_remove (&si, true);
5882
5883 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
5884 L0_BB. */
5885 switch_si = gsi_last_bb (l0_bb);
5886 gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH);
5887 if (exit_reachable)
5888 {
5889 cont = last_stmt (l1_bb);
5890 gcc_assert (gimple_code (cont) == GIMPLE_OMP_CONTINUE);
5891 vmain = gimple_omp_continue_control_use (cont);
5892 vnext = gimple_omp_continue_control_def (cont);
5893 }
5894 else
5895 {
5896 vmain = vin;
5897 vnext = NULL_TREE;
5898 }
5899
5900 t = build_case_label (build_int_cst (unsigned_type_node, 0), NULL, l2);
5901 label_vec.quick_push (t);
5902 i = 1;
5903
5904 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
5905 for (inner = region->inner, casei = 1;
5906 inner;
5907 inner = inner->next, i++, casei++)
5908 {
5909 basic_block s_entry_bb, s_exit_bb;
5910
5911 /* Skip optional reduction region. */
5912 if (inner->type == GIMPLE_OMP_ATOMIC_LOAD)
5913 {
5914 --i;
5915 --casei;
5916 continue;
5917 }
5918
5919 s_entry_bb = inner->entry;
5920 s_exit_bb = inner->exit;
5921
5922 t = gimple_block_label (s_entry_bb);
5923 u = build_int_cst (unsigned_type_node, casei);
5924 u = build_case_label (u, NULL, t);
5925 label_vec.quick_push (u);
5926
5927 si = gsi_last_bb (s_entry_bb);
5928 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION);
5929 gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si)));
5930 gsi_remove (&si, true);
5931 single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
5932
5933 if (s_exit_bb == NULL)
5934 continue;
5935
5936 si = gsi_last_bb (s_exit_bb);
5937 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
5938 gsi_remove (&si, true);
5939
5940 single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
5941 }
5942
5943 /* Error handling code goes in DEFAULT_BB. */
5944 t = gimple_block_label (default_bb);
5945 u = build_case_label (NULL, NULL, t);
5946 make_edge (l0_bb, default_bb, 0);
5947 if (current_loops)
5948 add_bb_to_loop (default_bb, current_loops->tree_root);
5949
5950 stmt = gimple_build_switch (vmain, u, label_vec);
5951 gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT);
5952 gsi_remove (&switch_si, true);
5953 label_vec.release ();
5954
5955 si = gsi_start_bb (default_bb);
5956 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP), 0);
5957 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
5958
5959 if (exit_reachable)
5960 {
5961 tree bfn_decl;
5962
5963 /* Code to get the next section goes in L1_BB. */
5964 si = gsi_last_bb (l1_bb);
5965 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE);
5966
5967 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
5968 stmt = gimple_build_call (bfn_decl, 0);
5969 gimple_call_set_lhs (stmt, vnext);
5970 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
5971 gsi_remove (&si, true);
5972
5973 single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
5974 }
5975
5976 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
5977 si = gsi_last_bb (l2_bb);
5978 if (gimple_omp_return_nowait_p (gsi_stmt (si)))
5979 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_NOWAIT);
5980 else
5981 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END);
5982 stmt = gimple_build_call (t, 0);
5983 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
5984 gsi_remove (&si, true);
5985
5986 set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
5987 }
5988
5989
5990 /* Expand code for an OpenMP single directive. We've already expanded
5991 much of the code, here we simply place the GOMP_barrier call. */
5992
5993 static void
5994 expand_omp_single (struct omp_region *region)
5995 {
5996 basic_block entry_bb, exit_bb;
5997 gimple_stmt_iterator si;
5998 bool need_barrier = false;
5999
6000 entry_bb = region->entry;
6001 exit_bb = region->exit;
6002
6003 si = gsi_last_bb (entry_bb);
6004 /* The terminal barrier at the end of a GOMP_single_copy sequence cannot
6005 be removed. We need to ensure that the thread that entered the single
6006 does not exit before the data is copied out by the other threads. */
6007 if (find_omp_clause (gimple_omp_single_clauses (gsi_stmt (si)),
6008 OMP_CLAUSE_COPYPRIVATE))
6009 need_barrier = true;
6010 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE);
6011 gsi_remove (&si, true);
6012 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
6013
6014 si = gsi_last_bb (exit_bb);
6015 if (!gimple_omp_return_nowait_p (gsi_stmt (si)) || need_barrier)
6016 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
6017 false, GSI_SAME_STMT);
6018 gsi_remove (&si, true);
6019 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
6020 }
6021
6022
6023 /* Generic expansion for OpenMP synchronization directives: master,
6024 ordered and critical. All we need to do here is remove the entry
6025 and exit markers for REGION. */
6026
6027 static void
6028 expand_omp_synch (struct omp_region *region)
6029 {
6030 basic_block entry_bb, exit_bb;
6031 gimple_stmt_iterator si;
6032
6033 entry_bb = region->entry;
6034 exit_bb = region->exit;
6035
6036 si = gsi_last_bb (entry_bb);
6037 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE
6038 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER
6039 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED
6040 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL);
6041 gsi_remove (&si, true);
6042 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
6043
6044 if (exit_bb)
6045 {
6046 si = gsi_last_bb (exit_bb);
6047 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
6048 gsi_remove (&si, true);
6049 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
6050 }
6051 }
6052
6053 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
6054 operation as a normal volatile load. */
6055
6056 static bool
6057 expand_omp_atomic_load (basic_block load_bb, tree addr,
6058 tree loaded_val, int index)
6059 {
6060 enum built_in_function tmpbase;
6061 gimple_stmt_iterator gsi;
6062 basic_block store_bb;
6063 location_t loc;
6064 gimple stmt;
6065 tree decl, call, type, itype;
6066
6067 gsi = gsi_last_bb (load_bb);
6068 stmt = gsi_stmt (gsi);
6069 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
6070 loc = gimple_location (stmt);
6071
6072 /* ??? If the target does not implement atomic_load_optab[mode], and mode
6073 is smaller than word size, then expand_atomic_load assumes that the load
6074 is atomic. We could avoid the builtin entirely in this case. */
6075
6076 tmpbase = (enum built_in_function) (BUILT_IN_ATOMIC_LOAD_N + index + 1);
6077 decl = builtin_decl_explicit (tmpbase);
6078 if (decl == NULL_TREE)
6079 return false;
6080
6081 type = TREE_TYPE (loaded_val);
6082 itype = TREE_TYPE (TREE_TYPE (decl));
6083
6084 call = build_call_expr_loc (loc, decl, 2, addr,
6085 build_int_cst (NULL, MEMMODEL_RELAXED));
6086 if (!useless_type_conversion_p (type, itype))
6087 call = fold_build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
6088 call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
6089
6090 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
6091 gsi_remove (&gsi, true);
6092
6093 store_bb = single_succ (load_bb);
6094 gsi = gsi_last_bb (store_bb);
6095 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
6096 gsi_remove (&gsi, true);
6097
6098 if (gimple_in_ssa_p (cfun))
6099 update_ssa (TODO_update_ssa_no_phi);
6100
6101 return true;
6102 }
6103
6104 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
6105 operation as a normal volatile store. */
6106
6107 static bool
6108 expand_omp_atomic_store (basic_block load_bb, tree addr,
6109 tree loaded_val, tree stored_val, int index)
6110 {
6111 enum built_in_function tmpbase;
6112 gimple_stmt_iterator gsi;
6113 basic_block store_bb = single_succ (load_bb);
6114 location_t loc;
6115 gimple stmt;
6116 tree decl, call, type, itype;
6117 enum machine_mode imode;
6118 bool exchange;
6119
6120 gsi = gsi_last_bb (load_bb);
6121 stmt = gsi_stmt (gsi);
6122 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
6123
6124 /* If the load value is needed, then this isn't a store but an exchange. */
6125 exchange = gimple_omp_atomic_need_value_p (stmt);
6126
6127 gsi = gsi_last_bb (store_bb);
6128 stmt = gsi_stmt (gsi);
6129 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_STORE);
6130 loc = gimple_location (stmt);
6131
6132 /* ??? If the target does not implement atomic_store_optab[mode], and mode
6133 is smaller than word size, then expand_atomic_store assumes that the store
6134 is atomic. We could avoid the builtin entirely in this case. */
6135
6136 tmpbase = (exchange ? BUILT_IN_ATOMIC_EXCHANGE_N : BUILT_IN_ATOMIC_STORE_N);
6137 tmpbase = (enum built_in_function) ((int) tmpbase + index + 1);
6138 decl = builtin_decl_explicit (tmpbase);
6139 if (decl == NULL_TREE)
6140 return false;
6141
6142 type = TREE_TYPE (stored_val);
6143
6144 /* Dig out the type of the function's second argument. */
6145 itype = TREE_TYPE (decl);
6146 itype = TYPE_ARG_TYPES (itype);
6147 itype = TREE_CHAIN (itype);
6148 itype = TREE_VALUE (itype);
6149 imode = TYPE_MODE (itype);
6150
6151 if (exchange && !can_atomic_exchange_p (imode, true))
6152 return false;
6153
6154 if (!useless_type_conversion_p (itype, type))
6155 stored_val = fold_build1_loc (loc, VIEW_CONVERT_EXPR, itype, stored_val);
6156 call = build_call_expr_loc (loc, decl, 3, addr, stored_val,
6157 build_int_cst (NULL, MEMMODEL_RELAXED));
6158 if (exchange)
6159 {
6160 if (!useless_type_conversion_p (type, itype))
6161 call = build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
6162 call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
6163 }
6164
6165 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
6166 gsi_remove (&gsi, true);
6167
6168 /* Remove the GIMPLE_OMP_ATOMIC_LOAD that we verified above. */
6169 gsi = gsi_last_bb (load_bb);
6170 gsi_remove (&gsi, true);
6171
6172 if (gimple_in_ssa_p (cfun))
6173 update_ssa (TODO_update_ssa_no_phi);
6174
6175 return true;
6176 }
6177
6178 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
6179 operation as a __atomic_fetch_op builtin. INDEX is log2 of the
6180 size of the data type, and thus usable to find the index of the builtin
6181 decl. Returns false if the expression is not of the proper form. */
6182
6183 static bool
6184 expand_omp_atomic_fetch_op (basic_block load_bb,
6185 tree addr, tree loaded_val,
6186 tree stored_val, int index)
6187 {
6188 enum built_in_function oldbase, newbase, tmpbase;
6189 tree decl, itype, call;
6190 tree lhs, rhs;
6191 basic_block store_bb = single_succ (load_bb);
6192 gimple_stmt_iterator gsi;
6193 gimple stmt;
6194 location_t loc;
6195 enum tree_code code;
6196 bool need_old, need_new;
6197 enum machine_mode imode;
6198
6199 /* We expect to find the following sequences:
6200
6201 load_bb:
6202 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
6203
6204 store_bb:
6205 val = tmp OP something; (or: something OP tmp)
6206 GIMPLE_OMP_STORE (val)
6207
6208 ???FIXME: Allow a more flexible sequence.
6209 Perhaps use data flow to pick the statements.
6210
6211 */
6212
6213 gsi = gsi_after_labels (store_bb);
6214 stmt = gsi_stmt (gsi);
6215 loc = gimple_location (stmt);
6216 if (!is_gimple_assign (stmt))
6217 return false;
6218 gsi_next (&gsi);
6219 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE)
6220 return false;
6221 need_new = gimple_omp_atomic_need_value_p (gsi_stmt (gsi));
6222 need_old = gimple_omp_atomic_need_value_p (last_stmt (load_bb));
6223 gcc_checking_assert (!need_old || !need_new);
6224
6225 if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0))
6226 return false;
6227
6228 /* Check for one of the supported fetch-op operations. */
6229 code = gimple_assign_rhs_code (stmt);
6230 switch (code)
6231 {
6232 case PLUS_EXPR:
6233 case POINTER_PLUS_EXPR:
6234 oldbase = BUILT_IN_ATOMIC_FETCH_ADD_N;
6235 newbase = BUILT_IN_ATOMIC_ADD_FETCH_N;
6236 break;
6237 case MINUS_EXPR:
6238 oldbase = BUILT_IN_ATOMIC_FETCH_SUB_N;
6239 newbase = BUILT_IN_ATOMIC_SUB_FETCH_N;
6240 break;
6241 case BIT_AND_EXPR:
6242 oldbase = BUILT_IN_ATOMIC_FETCH_AND_N;
6243 newbase = BUILT_IN_ATOMIC_AND_FETCH_N;
6244 break;
6245 case BIT_IOR_EXPR:
6246 oldbase = BUILT_IN_ATOMIC_FETCH_OR_N;
6247 newbase = BUILT_IN_ATOMIC_OR_FETCH_N;
6248 break;
6249 case BIT_XOR_EXPR:
6250 oldbase = BUILT_IN_ATOMIC_FETCH_XOR_N;
6251 newbase = BUILT_IN_ATOMIC_XOR_FETCH_N;
6252 break;
6253 default:
6254 return false;
6255 }
6256
6257 /* Make sure the expression is of the proper form. */
6258 if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0))
6259 rhs = gimple_assign_rhs2 (stmt);
6260 else if (commutative_tree_code (gimple_assign_rhs_code (stmt))
6261 && operand_equal_p (gimple_assign_rhs2 (stmt), loaded_val, 0))
6262 rhs = gimple_assign_rhs1 (stmt);
6263 else
6264 return false;
6265
6266 tmpbase = ((enum built_in_function)
6267 ((need_new ? newbase : oldbase) + index + 1));
6268 decl = builtin_decl_explicit (tmpbase);
6269 if (decl == NULL_TREE)
6270 return false;
6271 itype = TREE_TYPE (TREE_TYPE (decl));
6272 imode = TYPE_MODE (itype);
6273
6274 /* We could test all of the various optabs involved, but the fact of the
6275 matter is that (with the exception of i486 vs i586 and xadd) all targets
6276 that support any atomic operaton optab also implements compare-and-swap.
6277 Let optabs.c take care of expanding any compare-and-swap loop. */
6278 if (!can_compare_and_swap_p (imode, true))
6279 return false;
6280
6281 gsi = gsi_last_bb (load_bb);
6282 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD);
6283
6284 /* OpenMP does not imply any barrier-like semantics on its atomic ops.
6285 It only requires that the operation happen atomically. Thus we can
6286 use the RELAXED memory model. */
6287 call = build_call_expr_loc (loc, decl, 3, addr,
6288 fold_convert_loc (loc, itype, rhs),
6289 build_int_cst (NULL, MEMMODEL_RELAXED));
6290
6291 if (need_old || need_new)
6292 {
6293 lhs = need_old ? loaded_val : stored_val;
6294 call = fold_convert_loc (loc, TREE_TYPE (lhs), call);
6295 call = build2_loc (loc, MODIFY_EXPR, void_type_node, lhs, call);
6296 }
6297 else
6298 call = fold_convert_loc (loc, void_type_node, call);
6299 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
6300 gsi_remove (&gsi, true);
6301
6302 gsi = gsi_last_bb (store_bb);
6303 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
6304 gsi_remove (&gsi, true);
6305 gsi = gsi_last_bb (store_bb);
6306 gsi_remove (&gsi, true);
6307
6308 if (gimple_in_ssa_p (cfun))
6309 update_ssa (TODO_update_ssa_no_phi);
6310
6311 return true;
6312 }
6313
6314 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
6315
6316 oldval = *addr;
6317 repeat:
6318 newval = rhs; // with oldval replacing *addr in rhs
6319 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
6320 if (oldval != newval)
6321 goto repeat;
6322
6323 INDEX is log2 of the size of the data type, and thus usable to find the
6324 index of the builtin decl. */
6325
6326 static bool
6327 expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
6328 tree addr, tree loaded_val, tree stored_val,
6329 int index)
6330 {
6331 tree loadedi, storedi, initial, new_storedi, old_vali;
6332 tree type, itype, cmpxchg, iaddr;
6333 gimple_stmt_iterator si;
6334 basic_block loop_header = single_succ (load_bb);
6335 gimple phi, stmt;
6336 edge e;
6337 enum built_in_function fncode;
6338
6339 /* ??? We need a non-pointer interface to __atomic_compare_exchange in
6340 order to use the RELAXED memory model effectively. */
6341 fncode = (enum built_in_function)((int)BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_N
6342 + index + 1);
6343 cmpxchg = builtin_decl_explicit (fncode);
6344 if (cmpxchg == NULL_TREE)
6345 return false;
6346 type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
6347 itype = TREE_TYPE (TREE_TYPE (cmpxchg));
6348
6349 if (!can_compare_and_swap_p (TYPE_MODE (itype), true))
6350 return false;
6351
6352 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
6353 si = gsi_last_bb (load_bb);
6354 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
6355
6356 /* For floating-point values, we'll need to view-convert them to integers
6357 so that we can perform the atomic compare and swap. Simplify the
6358 following code by always setting up the "i"ntegral variables. */
6359 if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
6360 {
6361 tree iaddr_val;
6362
6363 iaddr = create_tmp_reg (build_pointer_type_for_mode (itype, ptr_mode,
6364 true), NULL);
6365 iaddr_val
6366 = force_gimple_operand_gsi (&si,
6367 fold_convert (TREE_TYPE (iaddr), addr),
6368 false, NULL_TREE, true, GSI_SAME_STMT);
6369 stmt = gimple_build_assign (iaddr, iaddr_val);
6370 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
6371 loadedi = create_tmp_var (itype, NULL);
6372 if (gimple_in_ssa_p (cfun))
6373 loadedi = make_ssa_name (loadedi, NULL);
6374 }
6375 else
6376 {
6377 iaddr = addr;
6378 loadedi = loaded_val;
6379 }
6380
6381 initial
6382 = force_gimple_operand_gsi (&si,
6383 build2 (MEM_REF, TREE_TYPE (TREE_TYPE (iaddr)),
6384 iaddr,
6385 build_int_cst (TREE_TYPE (iaddr), 0)),
6386 true, NULL_TREE, true, GSI_SAME_STMT);
6387
6388 /* Move the value to the LOADEDI temporary. */
6389 if (gimple_in_ssa_p (cfun))
6390 {
6391 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header)));
6392 phi = create_phi_node (loadedi, loop_header);
6393 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
6394 initial);
6395 }
6396 else
6397 gsi_insert_before (&si,
6398 gimple_build_assign (loadedi, initial),
6399 GSI_SAME_STMT);
6400 if (loadedi != loaded_val)
6401 {
6402 gimple_stmt_iterator gsi2;
6403 tree x;
6404
6405 x = build1 (VIEW_CONVERT_EXPR, type, loadedi);
6406 gsi2 = gsi_start_bb (loop_header);
6407 if (gimple_in_ssa_p (cfun))
6408 {
6409 gimple stmt;
6410 x = force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
6411 true, GSI_SAME_STMT);
6412 stmt = gimple_build_assign (loaded_val, x);
6413 gsi_insert_before (&gsi2, stmt, GSI_SAME_STMT);
6414 }
6415 else
6416 {
6417 x = build2 (MODIFY_EXPR, TREE_TYPE (loaded_val), loaded_val, x);
6418 force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
6419 true, GSI_SAME_STMT);
6420 }
6421 }
6422 gsi_remove (&si, true);
6423
6424 si = gsi_last_bb (store_bb);
6425 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
6426
6427 if (iaddr == addr)
6428 storedi = stored_val;
6429 else
6430 storedi =
6431 force_gimple_operand_gsi (&si,
6432 build1 (VIEW_CONVERT_EXPR, itype,
6433 stored_val), true, NULL_TREE, true,
6434 GSI_SAME_STMT);
6435
6436 /* Build the compare&swap statement. */
6437 new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
6438 new_storedi = force_gimple_operand_gsi (&si,
6439 fold_convert (TREE_TYPE (loadedi),
6440 new_storedi),
6441 true, NULL_TREE,
6442 true, GSI_SAME_STMT);
6443
6444 if (gimple_in_ssa_p (cfun))
6445 old_vali = loadedi;
6446 else
6447 {
6448 old_vali = create_tmp_var (TREE_TYPE (loadedi), NULL);
6449 stmt = gimple_build_assign (old_vali, loadedi);
6450 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
6451
6452 stmt = gimple_build_assign (loadedi, new_storedi);
6453 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
6454 }
6455
6456 /* Note that we always perform the comparison as an integer, even for
6457 floating point. This allows the atomic operation to properly
6458 succeed even with NaNs and -0.0. */
6459 stmt = gimple_build_cond_empty
6460 (build2 (NE_EXPR, boolean_type_node,
6461 new_storedi, old_vali));
6462 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
6463
6464 /* Update cfg. */
6465 e = single_succ_edge (store_bb);
6466 e->flags &= ~EDGE_FALLTHRU;
6467 e->flags |= EDGE_FALSE_VALUE;
6468
6469 e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
6470
6471 /* Copy the new value to loadedi (we already did that before the condition
6472 if we are not in SSA). */
6473 if (gimple_in_ssa_p (cfun))
6474 {
6475 phi = gimple_seq_first_stmt (phi_nodes (loop_header));
6476 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi);
6477 }
6478
6479 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
6480 gsi_remove (&si, true);
6481
6482 struct loop *loop = alloc_loop ();
6483 loop->header = loop_header;
6484 loop->latch = store_bb;
6485 add_loop (loop, loop_header->loop_father);
6486
6487 if (gimple_in_ssa_p (cfun))
6488 update_ssa (TODO_update_ssa_no_phi);
6489
6490 return true;
6491 }
6492
6493 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
6494
6495 GOMP_atomic_start ();
6496 *addr = rhs;
6497 GOMP_atomic_end ();
6498
6499 The result is not globally atomic, but works so long as all parallel
6500 references are within #pragma omp atomic directives. According to
6501 responses received from omp@openmp.org, appears to be within spec.
6502 Which makes sense, since that's how several other compilers handle
6503 this situation as well.
6504 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
6505 expanding. STORED_VAL is the operand of the matching
6506 GIMPLE_OMP_ATOMIC_STORE.
6507
6508 We replace
6509 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
6510 loaded_val = *addr;
6511
6512 and replace
6513 GIMPLE_OMP_ATOMIC_STORE (stored_val) with
6514 *addr = stored_val;
6515 */
6516
6517 static bool
6518 expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
6519 tree addr, tree loaded_val, tree stored_val)
6520 {
6521 gimple_stmt_iterator si;
6522 gimple stmt;
6523 tree t;
6524
6525 si = gsi_last_bb (load_bb);
6526 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
6527
6528 t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START);
6529 t = build_call_expr (t, 0);
6530 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
6531
6532 stmt = gimple_build_assign (loaded_val, build_simple_mem_ref (addr));
6533 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
6534 gsi_remove (&si, true);
6535
6536 si = gsi_last_bb (store_bb);
6537 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
6538
6539 stmt = gimple_build_assign (build_simple_mem_ref (unshare_expr (addr)),
6540 stored_val);
6541 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
6542
6543 t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END);
6544 t = build_call_expr (t, 0);
6545 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
6546 gsi_remove (&si, true);
6547
6548 if (gimple_in_ssa_p (cfun))
6549 update_ssa (TODO_update_ssa_no_phi);
6550 return true;
6551 }
6552
6553 /* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
6554 using expand_omp_atomic_fetch_op. If it failed, we try to
6555 call expand_omp_atomic_pipeline, and if it fails too, the
6556 ultimate fallback is wrapping the operation in a mutex
6557 (expand_omp_atomic_mutex). REGION is the atomic region built
6558 by build_omp_regions_1(). */
6559
6560 static void
6561 expand_omp_atomic (struct omp_region *region)
6562 {
6563 basic_block load_bb = region->entry, store_bb = region->exit;
6564 gimple load = last_stmt (load_bb), store = last_stmt (store_bb);
6565 tree loaded_val = gimple_omp_atomic_load_lhs (load);
6566 tree addr = gimple_omp_atomic_load_rhs (load);
6567 tree stored_val = gimple_omp_atomic_store_val (store);
6568 tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
6569 HOST_WIDE_INT index;
6570
6571 /* Make sure the type is one of the supported sizes. */
6572 index = tree_low_cst (TYPE_SIZE_UNIT (type), 1);
6573 index = exact_log2 (index);
6574 if (index >= 0 && index <= 4)
6575 {
6576 unsigned int align = TYPE_ALIGN_UNIT (type);
6577
6578 /* __sync builtins require strict data alignment. */
6579 if (exact_log2 (align) >= index)
6580 {
6581 /* Atomic load. */
6582 if (loaded_val == stored_val
6583 && (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
6584 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
6585 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
6586 && expand_omp_atomic_load (load_bb, addr, loaded_val, index))
6587 return;
6588
6589 /* Atomic store. */
6590 if ((GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
6591 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
6592 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
6593 && store_bb == single_succ (load_bb)
6594 && first_stmt (store_bb) == store
6595 && expand_omp_atomic_store (load_bb, addr, loaded_val,
6596 stored_val, index))
6597 return;
6598
6599 /* When possible, use specialized atomic update functions. */
6600 if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
6601 && store_bb == single_succ (load_bb)
6602 && expand_omp_atomic_fetch_op (load_bb, addr,
6603 loaded_val, stored_val, index))
6604 return;
6605
6606 /* If we don't have specialized __sync builtins, try and implement
6607 as a compare and swap loop. */
6608 if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
6609 loaded_val, stored_val, index))
6610 return;
6611 }
6612 }
6613
6614 /* The ultimate fallback is wrapping the operation in a mutex. */
6615 expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
6616 }
6617
6618
6619 /* Expand the parallel region tree rooted at REGION. Expansion
6620 proceeds in depth-first order. Innermost regions are expanded
6621 first. This way, parallel regions that require a new function to
6622 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
6623 internal dependencies in their body. */
6624
6625 static void
6626 expand_omp (struct omp_region *region)
6627 {
6628 while (region)
6629 {
6630 location_t saved_location;
6631
6632 /* First, determine whether this is a combined parallel+workshare
6633 region. */
6634 if (region->type == GIMPLE_OMP_PARALLEL)
6635 determine_parallel_type (region);
6636
6637 if (region->inner)
6638 expand_omp (region->inner);
6639
6640 saved_location = input_location;
6641 if (gimple_has_location (last_stmt (region->entry)))
6642 input_location = gimple_location (last_stmt (region->entry));
6643
6644 switch (region->type)
6645 {
6646 case GIMPLE_OMP_PARALLEL:
6647 case GIMPLE_OMP_TASK:
6648 expand_omp_taskreg (region);
6649 break;
6650
6651 case GIMPLE_OMP_FOR:
6652 expand_omp_for (region);
6653 break;
6654
6655 case GIMPLE_OMP_SECTIONS:
6656 expand_omp_sections (region);
6657 break;
6658
6659 case GIMPLE_OMP_SECTION:
6660 /* Individual omp sections are handled together with their
6661 parent GIMPLE_OMP_SECTIONS region. */
6662 break;
6663
6664 case GIMPLE_OMP_SINGLE:
6665 expand_omp_single (region);
6666 break;
6667
6668 case GIMPLE_OMP_MASTER:
6669 case GIMPLE_OMP_ORDERED:
6670 case GIMPLE_OMP_CRITICAL:
6671 expand_omp_synch (region);
6672 break;
6673
6674 case GIMPLE_OMP_ATOMIC_LOAD:
6675 expand_omp_atomic (region);
6676 break;
6677
6678 default:
6679 gcc_unreachable ();
6680 }
6681
6682 input_location = saved_location;
6683 region = region->next;
6684 }
6685 }
6686
6687
6688 /* Helper for build_omp_regions. Scan the dominator tree starting at
6689 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
6690 true, the function ends once a single tree is built (otherwise, whole
6691 forest of OMP constructs may be built). */
6692
6693 static void
6694 build_omp_regions_1 (basic_block bb, struct omp_region *parent,
6695 bool single_tree)
6696 {
6697 gimple_stmt_iterator gsi;
6698 gimple stmt;
6699 basic_block son;
6700
6701 gsi = gsi_last_bb (bb);
6702 if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi)))
6703 {
6704 struct omp_region *region;
6705 enum gimple_code code;
6706
6707 stmt = gsi_stmt (gsi);
6708 code = gimple_code (stmt);
6709 if (code == GIMPLE_OMP_RETURN)
6710 {
6711 /* STMT is the return point out of region PARENT. Mark it
6712 as the exit point and make PARENT the immediately
6713 enclosing region. */
6714 gcc_assert (parent);
6715 region = parent;
6716 region->exit = bb;
6717 parent = parent->outer;
6718 }
6719 else if (code == GIMPLE_OMP_ATOMIC_STORE)
6720 {
6721 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
6722 GIMPLE_OMP_RETURN, but matches with
6723 GIMPLE_OMP_ATOMIC_LOAD. */
6724 gcc_assert (parent);
6725 gcc_assert (parent->type == GIMPLE_OMP_ATOMIC_LOAD);
6726 region = parent;
6727 region->exit = bb;
6728 parent = parent->outer;
6729 }
6730
6731 else if (code == GIMPLE_OMP_CONTINUE)
6732 {
6733 gcc_assert (parent);
6734 parent->cont = bb;
6735 }
6736 else if (code == GIMPLE_OMP_SECTIONS_SWITCH)
6737 {
6738 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
6739 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
6740 ;
6741 }
6742 else
6743 {
6744 /* Otherwise, this directive becomes the parent for a new
6745 region. */
6746 region = new_omp_region (bb, code, parent);
6747 parent = region;
6748 }
6749 }
6750
6751 if (single_tree && !parent)
6752 return;
6753
6754 for (son = first_dom_son (CDI_DOMINATORS, bb);
6755 son;
6756 son = next_dom_son (CDI_DOMINATORS, son))
6757 build_omp_regions_1 (son, parent, single_tree);
6758 }
6759
6760 /* Builds the tree of OMP regions rooted at ROOT, storing it to
6761 root_omp_region. */
6762
6763 static void
6764 build_omp_regions_root (basic_block root)
6765 {
6766 gcc_assert (root_omp_region == NULL);
6767 build_omp_regions_1 (root, NULL, true);
6768 gcc_assert (root_omp_region != NULL);
6769 }
6770
6771 /* Expands omp construct (and its subconstructs) starting in HEAD. */
6772
6773 void
6774 omp_expand_local (basic_block head)
6775 {
6776 build_omp_regions_root (head);
6777 if (dump_file && (dump_flags & TDF_DETAILS))
6778 {
6779 fprintf (dump_file, "\nOMP region tree\n\n");
6780 dump_omp_region (dump_file, root_omp_region, 0);
6781 fprintf (dump_file, "\n");
6782 }
6783
6784 remove_exit_barriers (root_omp_region);
6785 expand_omp (root_omp_region);
6786
6787 free_omp_regions ();
6788 }
6789
6790 /* Scan the CFG and build a tree of OMP regions. Return the root of
6791 the OMP region tree. */
6792
6793 static void
6794 build_omp_regions (void)
6795 {
6796 gcc_assert (root_omp_region == NULL);
6797 calculate_dominance_info (CDI_DOMINATORS);
6798 build_omp_regions_1 (ENTRY_BLOCK_PTR, NULL, false);
6799 }
6800
6801 /* Main entry point for expanding OMP-GIMPLE into runtime calls. */
6802
6803 static unsigned int
6804 execute_expand_omp (void)
6805 {
6806 build_omp_regions ();
6807
6808 if (!root_omp_region)
6809 return 0;
6810
6811 if (dump_file)
6812 {
6813 fprintf (dump_file, "\nOMP region tree\n\n");
6814 dump_omp_region (dump_file, root_omp_region, 0);
6815 fprintf (dump_file, "\n");
6816 }
6817
6818 remove_exit_barriers (root_omp_region);
6819
6820 expand_omp (root_omp_region);
6821
6822 cleanup_tree_cfg ();
6823
6824 free_omp_regions ();
6825
6826 return 0;
6827 }
6828
6829 /* OMP expansion -- the default pass, run before creation of SSA form. */
6830
6831 static bool
6832 gate_expand_omp (void)
6833 {
6834 return (flag_openmp != 0 && !seen_error ());
6835 }
6836
6837 namespace {
6838
6839 const pass_data pass_data_expand_omp =
6840 {
6841 GIMPLE_PASS, /* type */
6842 "ompexp", /* name */
6843 OPTGROUP_NONE, /* optinfo_flags */
6844 true, /* has_gate */
6845 true, /* has_execute */
6846 TV_NONE, /* tv_id */
6847 PROP_gimple_any, /* properties_required */
6848 0, /* properties_provided */
6849 0, /* properties_destroyed */
6850 0, /* todo_flags_start */
6851 0, /* todo_flags_finish */
6852 };
6853
6854 class pass_expand_omp : public gimple_opt_pass
6855 {
6856 public:
6857 pass_expand_omp(gcc::context *ctxt)
6858 : gimple_opt_pass(pass_data_expand_omp, ctxt)
6859 {}
6860
6861 /* opt_pass methods: */
6862 bool gate () { return gate_expand_omp (); }
6863 unsigned int execute () { return execute_expand_omp (); }
6864
6865 }; // class pass_expand_omp
6866
6867 } // anon namespace
6868
6869 gimple_opt_pass *
6870 make_pass_expand_omp (gcc::context *ctxt)
6871 {
6872 return new pass_expand_omp (ctxt);
6873 }
6874 \f
6875 /* Routines to lower OpenMP directives into OMP-GIMPLE. */
6876
6877 /* Lower the OpenMP sections directive in the current statement in GSI_P.
6878 CTX is the enclosing OMP context for the current statement. */
6879
6880 static void
6881 lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6882 {
6883 tree block, control;
6884 gimple_stmt_iterator tgsi;
6885 gimple stmt, new_stmt, bind, t;
6886 gimple_seq ilist, dlist, olist, new_body;
6887 struct gimplify_ctx gctx;
6888
6889 stmt = gsi_stmt (*gsi_p);
6890
6891 push_gimplify_context (&gctx);
6892
6893 dlist = NULL;
6894 ilist = NULL;
6895 lower_rec_input_clauses (gimple_omp_sections_clauses (stmt),
6896 &ilist, &dlist, ctx);
6897
6898 new_body = gimple_omp_body (stmt);
6899 gimple_omp_set_body (stmt, NULL);
6900 tgsi = gsi_start (new_body);
6901 for (; !gsi_end_p (tgsi); gsi_next (&tgsi))
6902 {
6903 omp_context *sctx;
6904 gimple sec_start;
6905
6906 sec_start = gsi_stmt (tgsi);
6907 sctx = maybe_lookup_ctx (sec_start);
6908 gcc_assert (sctx);
6909
6910 lower_omp (gimple_omp_body_ptr (sec_start), sctx);
6911 gsi_insert_seq_after (&tgsi, gimple_omp_body (sec_start),
6912 GSI_CONTINUE_LINKING);
6913 gimple_omp_set_body (sec_start, NULL);
6914
6915 if (gsi_one_before_end_p (tgsi))
6916 {
6917 gimple_seq l = NULL;
6918 lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt), NULL,
6919 &l, ctx);
6920 gsi_insert_seq_after (&tgsi, l, GSI_CONTINUE_LINKING);
6921 gimple_omp_section_set_last (sec_start);
6922 }
6923
6924 gsi_insert_after (&tgsi, gimple_build_omp_return (false),
6925 GSI_CONTINUE_LINKING);
6926 }
6927
6928 block = make_node (BLOCK);
6929 bind = gimple_build_bind (NULL, new_body, block);
6930
6931 olist = NULL;
6932 lower_reduction_clauses (gimple_omp_sections_clauses (stmt), &olist, ctx);
6933
6934 block = make_node (BLOCK);
6935 new_stmt = gimple_build_bind (NULL, NULL, block);
6936 gsi_replace (gsi_p, new_stmt, true);
6937
6938 pop_gimplify_context (new_stmt);
6939 gimple_bind_append_vars (new_stmt, ctx->block_vars);
6940 BLOCK_VARS (block) = gimple_bind_vars (bind);
6941 if (BLOCK_VARS (block))
6942 TREE_USED (block) = 1;
6943
6944 new_body = NULL;
6945 gimple_seq_add_seq (&new_body, ilist);
6946 gimple_seq_add_stmt (&new_body, stmt);
6947 gimple_seq_add_stmt (&new_body, gimple_build_omp_sections_switch ());
6948 gimple_seq_add_stmt (&new_body, bind);
6949
6950 control = create_tmp_var (unsigned_type_node, ".section");
6951 t = gimple_build_omp_continue (control, control);
6952 gimple_omp_sections_set_control (stmt, control);
6953 gimple_seq_add_stmt (&new_body, t);
6954
6955 gimple_seq_add_seq (&new_body, olist);
6956 gimple_seq_add_seq (&new_body, dlist);
6957
6958 new_body = maybe_catch_exception (new_body);
6959
6960 t = gimple_build_omp_return
6961 (!!find_omp_clause (gimple_omp_sections_clauses (stmt),
6962 OMP_CLAUSE_NOWAIT));
6963 gimple_seq_add_stmt (&new_body, t);
6964
6965 gimple_bind_set_body (new_stmt, new_body);
6966 }
6967
6968
6969 /* A subroutine of lower_omp_single. Expand the simple form of
6970 a GIMPLE_OMP_SINGLE, without a copyprivate clause:
6971
6972 if (GOMP_single_start ())
6973 BODY;
6974 [ GOMP_barrier (); ] -> unless 'nowait' is present.
6975
6976 FIXME. It may be better to delay expanding the logic of this until
6977 pass_expand_omp. The expanded logic may make the job more difficult
6978 to a synchronization analysis pass. */
6979
6980 static void
6981 lower_omp_single_simple (gimple single_stmt, gimple_seq *pre_p)
6982 {
6983 location_t loc = gimple_location (single_stmt);
6984 tree tlabel = create_artificial_label (loc);
6985 tree flabel = create_artificial_label (loc);
6986 gimple call, cond;
6987 tree lhs, decl;
6988
6989 decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_START);
6990 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (decl)), NULL);
6991 call = gimple_build_call (decl, 0);
6992 gimple_call_set_lhs (call, lhs);
6993 gimple_seq_add_stmt (pre_p, call);
6994
6995 cond = gimple_build_cond (EQ_EXPR, lhs,
6996 fold_convert_loc (loc, TREE_TYPE (lhs),
6997 boolean_true_node),
6998 tlabel, flabel);
6999 gimple_seq_add_stmt (pre_p, cond);
7000 gimple_seq_add_stmt (pre_p, gimple_build_label (tlabel));
7001 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
7002 gimple_seq_add_stmt (pre_p, gimple_build_label (flabel));
7003 }
7004
7005
7006 /* A subroutine of lower_omp_single. Expand the simple form of
7007 a GIMPLE_OMP_SINGLE, with a copyprivate clause:
7008
7009 #pragma omp single copyprivate (a, b, c)
7010
7011 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
7012
7013 {
7014 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
7015 {
7016 BODY;
7017 copyout.a = a;
7018 copyout.b = b;
7019 copyout.c = c;
7020 GOMP_single_copy_end (&copyout);
7021 }
7022 else
7023 {
7024 a = copyout_p->a;
7025 b = copyout_p->b;
7026 c = copyout_p->c;
7027 }
7028 GOMP_barrier ();
7029 }
7030
7031 FIXME. It may be better to delay expanding the logic of this until
7032 pass_expand_omp. The expanded logic may make the job more difficult
7033 to a synchronization analysis pass. */
7034
7035 static void
7036 lower_omp_single_copy (gimple single_stmt, gimple_seq *pre_p, omp_context *ctx)
7037 {
7038 tree ptr_type, t, l0, l1, l2, bfn_decl;
7039 gimple_seq copyin_seq;
7040 location_t loc = gimple_location (single_stmt);
7041
7042 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
7043
7044 ptr_type = build_pointer_type (ctx->record_type);
7045 ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
7046
7047 l0 = create_artificial_label (loc);
7048 l1 = create_artificial_label (loc);
7049 l2 = create_artificial_label (loc);
7050
7051 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_START);
7052 t = build_call_expr_loc (loc, bfn_decl, 0);
7053 t = fold_convert_loc (loc, ptr_type, t);
7054 gimplify_assign (ctx->receiver_decl, t, pre_p);
7055
7056 t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
7057 build_int_cst (ptr_type, 0));
7058 t = build3 (COND_EXPR, void_type_node, t,
7059 build_and_jump (&l0), build_and_jump (&l1));
7060 gimplify_and_add (t, pre_p);
7061
7062 gimple_seq_add_stmt (pre_p, gimple_build_label (l0));
7063
7064 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
7065
7066 copyin_seq = NULL;
7067 lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt), pre_p,
7068 &copyin_seq, ctx);
7069
7070 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
7071 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_END);
7072 t = build_call_expr_loc (loc, bfn_decl, 1, t);
7073 gimplify_and_add (t, pre_p);
7074
7075 t = build_and_jump (&l2);
7076 gimplify_and_add (t, pre_p);
7077
7078 gimple_seq_add_stmt (pre_p, gimple_build_label (l1));
7079
7080 gimple_seq_add_seq (pre_p, copyin_seq);
7081
7082 gimple_seq_add_stmt (pre_p, gimple_build_label (l2));
7083 }
7084
7085
7086 /* Expand code for an OpenMP single directive. */
7087
7088 static void
7089 lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
7090 {
7091 tree block;
7092 gimple t, bind, single_stmt = gsi_stmt (*gsi_p);
7093 gimple_seq bind_body, dlist;
7094 struct gimplify_ctx gctx;
7095
7096 push_gimplify_context (&gctx);
7097
7098 block = make_node (BLOCK);
7099 bind = gimple_build_bind (NULL, NULL, block);
7100 gsi_replace (gsi_p, bind, true);
7101 bind_body = NULL;
7102 dlist = NULL;
7103 lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt),
7104 &bind_body, &dlist, ctx);
7105 lower_omp (gimple_omp_body_ptr (single_stmt), ctx);
7106
7107 gimple_seq_add_stmt (&bind_body, single_stmt);
7108
7109 if (ctx->record_type)
7110 lower_omp_single_copy (single_stmt, &bind_body, ctx);
7111 else
7112 lower_omp_single_simple (single_stmt, &bind_body);
7113
7114 gimple_omp_set_body (single_stmt, NULL);
7115
7116 gimple_seq_add_seq (&bind_body, dlist);
7117
7118 bind_body = maybe_catch_exception (bind_body);
7119
7120 t = gimple_build_omp_return
7121 (!!find_omp_clause (gimple_omp_single_clauses (single_stmt),
7122 OMP_CLAUSE_NOWAIT));
7123 gimple_seq_add_stmt (&bind_body, t);
7124 gimple_bind_set_body (bind, bind_body);
7125
7126 pop_gimplify_context (bind);
7127
7128 gimple_bind_append_vars (bind, ctx->block_vars);
7129 BLOCK_VARS (block) = ctx->block_vars;
7130 if (BLOCK_VARS (block))
7131 TREE_USED (block) = 1;
7132 }
7133
7134
7135 /* Expand code for an OpenMP master directive. */
7136
7137 static void
7138 lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
7139 {
7140 tree block, lab = NULL, x, bfn_decl;
7141 gimple stmt = gsi_stmt (*gsi_p), bind;
7142 location_t loc = gimple_location (stmt);
7143 gimple_seq tseq;
7144 struct gimplify_ctx gctx;
7145
7146 push_gimplify_context (&gctx);
7147
7148 block = make_node (BLOCK);
7149 bind = gimple_build_bind (NULL, NULL, block);
7150 gsi_replace (gsi_p, bind, true);
7151 gimple_bind_add_stmt (bind, stmt);
7152
7153 bfn_decl = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
7154 x = build_call_expr_loc (loc, bfn_decl, 0);
7155 x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
7156 x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
7157 tseq = NULL;
7158 gimplify_and_add (x, &tseq);
7159 gimple_bind_add_seq (bind, tseq);
7160
7161 lower_omp (gimple_omp_body_ptr (stmt), ctx);
7162 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
7163 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
7164 gimple_omp_set_body (stmt, NULL);
7165
7166 gimple_bind_add_stmt (bind, gimple_build_label (lab));
7167
7168 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
7169
7170 pop_gimplify_context (bind);
7171
7172 gimple_bind_append_vars (bind, ctx->block_vars);
7173 BLOCK_VARS (block) = ctx->block_vars;
7174 }
7175
7176
7177 /* Expand code for an OpenMP ordered directive. */
7178
7179 static void
7180 lower_omp_ordered (gimple_stmt_iterator *gsi_p, omp_context *ctx)
7181 {
7182 tree block;
7183 gimple stmt = gsi_stmt (*gsi_p), bind, x;
7184 struct gimplify_ctx gctx;
7185
7186 push_gimplify_context (&gctx);
7187
7188 block = make_node (BLOCK);
7189 bind = gimple_build_bind (NULL, NULL, block);
7190 gsi_replace (gsi_p, bind, true);
7191 gimple_bind_add_stmt (bind, stmt);
7192
7193 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_START),
7194 0);
7195 gimple_bind_add_stmt (bind, x);
7196
7197 lower_omp (gimple_omp_body_ptr (stmt), ctx);
7198 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
7199 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
7200 gimple_omp_set_body (stmt, NULL);
7201
7202 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_END), 0);
7203 gimple_bind_add_stmt (bind, x);
7204
7205 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
7206
7207 pop_gimplify_context (bind);
7208
7209 gimple_bind_append_vars (bind, ctx->block_vars);
7210 BLOCK_VARS (block) = gimple_bind_vars (bind);
7211 }
7212
7213
7214 /* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
7215 substitution of a couple of function calls. But in the NAMED case,
7216 requires that languages coordinate a symbol name. It is therefore
7217 best put here in common code. */
7218
7219 static GTY((param1_is (tree), param2_is (tree)))
7220 splay_tree critical_name_mutexes;
7221
7222 static void
7223 lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx)
7224 {
7225 tree block;
7226 tree name, lock, unlock;
7227 gimple stmt = gsi_stmt (*gsi_p), bind;
7228 location_t loc = gimple_location (stmt);
7229 gimple_seq tbody;
7230 struct gimplify_ctx gctx;
7231
7232 name = gimple_omp_critical_name (stmt);
7233 if (name)
7234 {
7235 tree decl;
7236 splay_tree_node n;
7237
7238 if (!critical_name_mutexes)
7239 critical_name_mutexes
7240 = splay_tree_new_ggc (splay_tree_compare_pointers,
7241 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_s,
7242 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_node_s);
7243
7244 n = splay_tree_lookup (critical_name_mutexes, (splay_tree_key) name);
7245 if (n == NULL)
7246 {
7247 char *new_str;
7248
7249 decl = create_tmp_var_raw (ptr_type_node, NULL);
7250
7251 new_str = ACONCAT ((".gomp_critical_user_",
7252 IDENTIFIER_POINTER (name), NULL));
7253 DECL_NAME (decl) = get_identifier (new_str);
7254 TREE_PUBLIC (decl) = 1;
7255 TREE_STATIC (decl) = 1;
7256 DECL_COMMON (decl) = 1;
7257 DECL_ARTIFICIAL (decl) = 1;
7258 DECL_IGNORED_P (decl) = 1;
7259 varpool_finalize_decl (decl);
7260
7261 splay_tree_insert (critical_name_mutexes, (splay_tree_key) name,
7262 (splay_tree_value) decl);
7263 }
7264 else
7265 decl = (tree) n->value;
7266
7267 lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_START);
7268 lock = build_call_expr_loc (loc, lock, 1, build_fold_addr_expr_loc (loc, decl));
7269
7270 unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_END);
7271 unlock = build_call_expr_loc (loc, unlock, 1,
7272 build_fold_addr_expr_loc (loc, decl));
7273 }
7274 else
7275 {
7276 lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_START);
7277 lock = build_call_expr_loc (loc, lock, 0);
7278
7279 unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_END);
7280 unlock = build_call_expr_loc (loc, unlock, 0);
7281 }
7282
7283 push_gimplify_context (&gctx);
7284
7285 block = make_node (BLOCK);
7286 bind = gimple_build_bind (NULL, NULL, block);
7287 gsi_replace (gsi_p, bind, true);
7288 gimple_bind_add_stmt (bind, stmt);
7289
7290 tbody = gimple_bind_body (bind);
7291 gimplify_and_add (lock, &tbody);
7292 gimple_bind_set_body (bind, tbody);
7293
7294 lower_omp (gimple_omp_body_ptr (stmt), ctx);
7295 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
7296 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
7297 gimple_omp_set_body (stmt, NULL);
7298
7299 tbody = gimple_bind_body (bind);
7300 gimplify_and_add (unlock, &tbody);
7301 gimple_bind_set_body (bind, tbody);
7302
7303 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
7304
7305 pop_gimplify_context (bind);
7306 gimple_bind_append_vars (bind, ctx->block_vars);
7307 BLOCK_VARS (block) = gimple_bind_vars (bind);
7308 }
7309
7310
7311 /* A subroutine of lower_omp_for. Generate code to emit the predicate
7312 for a lastprivate clause. Given a loop control predicate of (V
7313 cond N2), we gate the clause on (!(V cond N2)). The lowered form
7314 is appended to *DLIST, iterator initialization is appended to
7315 *BODY_P. */
7316
7317 static void
7318 lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p,
7319 gimple_seq *dlist, struct omp_context *ctx)
7320 {
7321 tree clauses, cond, vinit;
7322 enum tree_code cond_code;
7323 gimple_seq stmts;
7324
7325 cond_code = fd->loop.cond_code;
7326 cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
7327
7328 /* When possible, use a strict equality expression. This can let VRP
7329 type optimizations deduce the value and remove a copy. */
7330 if (host_integerp (fd->loop.step, 0))
7331 {
7332 HOST_WIDE_INT step = TREE_INT_CST_LOW (fd->loop.step);
7333 if (step == 1 || step == -1)
7334 cond_code = EQ_EXPR;
7335 }
7336
7337 cond = build2 (cond_code, boolean_type_node, fd->loop.v, fd->loop.n2);
7338
7339 clauses = gimple_omp_for_clauses (fd->for_stmt);
7340 stmts = NULL;
7341 lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
7342 if (!gimple_seq_empty_p (stmts))
7343 {
7344 gimple_seq_add_seq (&stmts, *dlist);
7345 *dlist = stmts;
7346
7347 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
7348 vinit = fd->loop.n1;
7349 if (cond_code == EQ_EXPR
7350 && host_integerp (fd->loop.n2, 0)
7351 && ! integer_zerop (fd->loop.n2))
7352 vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0);
7353 else
7354 vinit = unshare_expr (vinit);
7355
7356 /* Initialize the iterator variable, so that threads that don't execute
7357 any iterations don't execute the lastprivate clauses by accident. */
7358 gimplify_assign (fd->loop.v, vinit, body_p);
7359 }
7360 }
7361
7362
7363 /* Lower code for an OpenMP loop directive. */
7364
7365 static void
7366 lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
7367 {
7368 tree *rhs_p, block;
7369 struct omp_for_data fd;
7370 gimple stmt = gsi_stmt (*gsi_p), new_stmt;
7371 gimple_seq omp_for_body, body, dlist;
7372 size_t i;
7373 struct gimplify_ctx gctx;
7374
7375 push_gimplify_context (&gctx);
7376
7377 lower_omp (gimple_omp_for_pre_body_ptr (stmt), ctx);
7378
7379 block = make_node (BLOCK);
7380 new_stmt = gimple_build_bind (NULL, NULL, block);
7381 /* Replace at gsi right away, so that 'stmt' is no member
7382 of a sequence anymore as we're going to add to to a different
7383 one below. */
7384 gsi_replace (gsi_p, new_stmt, true);
7385
7386 /* Move declaration of temporaries in the loop body before we make
7387 it go away. */
7388 omp_for_body = gimple_omp_body (stmt);
7389 if (!gimple_seq_empty_p (omp_for_body)
7390 && gimple_code (gimple_seq_first_stmt (omp_for_body)) == GIMPLE_BIND)
7391 {
7392 tree vars = gimple_bind_vars (gimple_seq_first_stmt (omp_for_body));
7393 gimple_bind_append_vars (new_stmt, vars);
7394 }
7395
7396 /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
7397 dlist = NULL;
7398 body = NULL;
7399 lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx);
7400 gimple_seq_add_seq (&body, gimple_omp_for_pre_body (stmt));
7401
7402 lower_omp (gimple_omp_body_ptr (stmt), ctx);
7403
7404 /* Lower the header expressions. At this point, we can assume that
7405 the header is of the form:
7406
7407 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
7408
7409 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
7410 using the .omp_data_s mapping, if needed. */
7411 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
7412 {
7413 rhs_p = gimple_omp_for_initial_ptr (stmt, i);
7414 if (!is_gimple_min_invariant (*rhs_p))
7415 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
7416
7417 rhs_p = gimple_omp_for_final_ptr (stmt, i);
7418 if (!is_gimple_min_invariant (*rhs_p))
7419 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
7420
7421 rhs_p = &TREE_OPERAND (gimple_omp_for_incr (stmt, i), 1);
7422 if (!is_gimple_min_invariant (*rhs_p))
7423 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
7424 }
7425
7426 /* Once lowered, extract the bounds and clauses. */
7427 extract_omp_for_data (stmt, &fd, NULL);
7428
7429 lower_omp_for_lastprivate (&fd, &body, &dlist, ctx);
7430
7431 gimple_seq_add_stmt (&body, stmt);
7432 gimple_seq_add_seq (&body, gimple_omp_body (stmt));
7433
7434 gimple_seq_add_stmt (&body, gimple_build_omp_continue (fd.loop.v,
7435 fd.loop.v));
7436
7437 /* After the loop, add exit clauses. */
7438 lower_reduction_clauses (gimple_omp_for_clauses (stmt), &body, ctx);
7439 gimple_seq_add_seq (&body, dlist);
7440
7441 body = maybe_catch_exception (body);
7442
7443 /* Region exit marker goes at the end of the loop body. */
7444 gimple_seq_add_stmt (&body, gimple_build_omp_return (fd.have_nowait));
7445
7446 pop_gimplify_context (new_stmt);
7447
7448 gimple_bind_append_vars (new_stmt, ctx->block_vars);
7449 BLOCK_VARS (block) = gimple_bind_vars (new_stmt);
7450 if (BLOCK_VARS (block))
7451 TREE_USED (block) = 1;
7452
7453 gimple_bind_set_body (new_stmt, body);
7454 gimple_omp_set_body (stmt, NULL);
7455 gimple_omp_for_set_pre_body (stmt, NULL);
7456 }
7457
7458 /* Callback for walk_stmts. Check if the current statement only contains
7459 GIMPLE_OMP_FOR or GIMPLE_OMP_PARALLEL. */
7460
7461 static tree
7462 check_combined_parallel (gimple_stmt_iterator *gsi_p,
7463 bool *handled_ops_p,
7464 struct walk_stmt_info *wi)
7465 {
7466 int *info = (int *) wi->info;
7467 gimple stmt = gsi_stmt (*gsi_p);
7468
7469 *handled_ops_p = true;
7470 switch (gimple_code (stmt))
7471 {
7472 WALK_SUBSTMTS;
7473
7474 case GIMPLE_OMP_FOR:
7475 case GIMPLE_OMP_SECTIONS:
7476 *info = *info == 0 ? 1 : -1;
7477 break;
7478 default:
7479 *info = -1;
7480 break;
7481 }
7482 return NULL;
7483 }
7484
7485 struct omp_taskcopy_context
7486 {
7487 /* This field must be at the beginning, as we do "inheritance": Some
7488 callback functions for tree-inline.c (e.g., omp_copy_decl)
7489 receive a copy_body_data pointer that is up-casted to an
7490 omp_context pointer. */
7491 copy_body_data cb;
7492 omp_context *ctx;
7493 };
7494
7495 static tree
7496 task_copyfn_copy_decl (tree var, copy_body_data *cb)
7497 {
7498 struct omp_taskcopy_context *tcctx = (struct omp_taskcopy_context *) cb;
7499
7500 if (splay_tree_lookup (tcctx->ctx->sfield_map, (splay_tree_key) var))
7501 return create_tmp_var (TREE_TYPE (var), NULL);
7502
7503 return var;
7504 }
7505
7506 static tree
7507 task_copyfn_remap_type (struct omp_taskcopy_context *tcctx, tree orig_type)
7508 {
7509 tree name, new_fields = NULL, type, f;
7510
7511 type = lang_hooks.types.make_type (RECORD_TYPE);
7512 name = DECL_NAME (TYPE_NAME (orig_type));
7513 name = build_decl (gimple_location (tcctx->ctx->stmt),
7514 TYPE_DECL, name, type);
7515 TYPE_NAME (type) = name;
7516
7517 for (f = TYPE_FIELDS (orig_type); f ; f = TREE_CHAIN (f))
7518 {
7519 tree new_f = copy_node (f);
7520 DECL_CONTEXT (new_f) = type;
7521 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &tcctx->cb);
7522 TREE_CHAIN (new_f) = new_fields;
7523 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &tcctx->cb, NULL);
7524 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, &tcctx->cb, NULL);
7525 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
7526 &tcctx->cb, NULL);
7527 new_fields = new_f;
7528 *pointer_map_insert (tcctx->cb.decl_map, f) = new_f;
7529 }
7530 TYPE_FIELDS (type) = nreverse (new_fields);
7531 layout_type (type);
7532 return type;
7533 }
7534
7535 /* Create task copyfn. */
7536
7537 static void
7538 create_task_copyfn (gimple task_stmt, omp_context *ctx)
7539 {
7540 struct function *child_cfun;
7541 tree child_fn, t, c, src, dst, f, sf, arg, sarg, decl;
7542 tree record_type, srecord_type, bind, list;
7543 bool record_needs_remap = false, srecord_needs_remap = false;
7544 splay_tree_node n;
7545 struct omp_taskcopy_context tcctx;
7546 struct gimplify_ctx gctx;
7547 location_t loc = gimple_location (task_stmt);
7548
7549 child_fn = gimple_omp_task_copy_fn (task_stmt);
7550 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
7551 gcc_assert (child_cfun->cfg == NULL);
7552 DECL_SAVED_TREE (child_fn) = alloc_stmt_list ();
7553
7554 /* Reset DECL_CONTEXT on function arguments. */
7555 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
7556 DECL_CONTEXT (t) = child_fn;
7557
7558 /* Populate the function. */
7559 push_gimplify_context (&gctx);
7560 push_cfun (child_cfun);
7561
7562 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
7563 TREE_SIDE_EFFECTS (bind) = 1;
7564 list = NULL;
7565 DECL_SAVED_TREE (child_fn) = bind;
7566 DECL_SOURCE_LOCATION (child_fn) = gimple_location (task_stmt);
7567
7568 /* Remap src and dst argument types if needed. */
7569 record_type = ctx->record_type;
7570 srecord_type = ctx->srecord_type;
7571 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
7572 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
7573 {
7574 record_needs_remap = true;
7575 break;
7576 }
7577 for (f = TYPE_FIELDS (srecord_type); f ; f = DECL_CHAIN (f))
7578 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
7579 {
7580 srecord_needs_remap = true;
7581 break;
7582 }
7583
7584 if (record_needs_remap || srecord_needs_remap)
7585 {
7586 memset (&tcctx, '\0', sizeof (tcctx));
7587 tcctx.cb.src_fn = ctx->cb.src_fn;
7588 tcctx.cb.dst_fn = child_fn;
7589 tcctx.cb.src_node = cgraph_get_node (tcctx.cb.src_fn);
7590 gcc_checking_assert (tcctx.cb.src_node);
7591 tcctx.cb.dst_node = tcctx.cb.src_node;
7592 tcctx.cb.src_cfun = ctx->cb.src_cfun;
7593 tcctx.cb.copy_decl = task_copyfn_copy_decl;
7594 tcctx.cb.eh_lp_nr = 0;
7595 tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE;
7596 tcctx.cb.decl_map = pointer_map_create ();
7597 tcctx.ctx = ctx;
7598
7599 if (record_needs_remap)
7600 record_type = task_copyfn_remap_type (&tcctx, record_type);
7601 if (srecord_needs_remap)
7602 srecord_type = task_copyfn_remap_type (&tcctx, srecord_type);
7603 }
7604 else
7605 tcctx.cb.decl_map = NULL;
7606
7607 arg = DECL_ARGUMENTS (child_fn);
7608 TREE_TYPE (arg) = build_pointer_type (record_type);
7609 sarg = DECL_CHAIN (arg);
7610 TREE_TYPE (sarg) = build_pointer_type (srecord_type);
7611
7612 /* First pass: initialize temporaries used in record_type and srecord_type
7613 sizes and field offsets. */
7614 if (tcctx.cb.decl_map)
7615 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
7616 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
7617 {
7618 tree *p;
7619
7620 decl = OMP_CLAUSE_DECL (c);
7621 p = (tree *) pointer_map_contains (tcctx.cb.decl_map, decl);
7622 if (p == NULL)
7623 continue;
7624 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
7625 sf = (tree) n->value;
7626 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
7627 src = build_simple_mem_ref_loc (loc, sarg);
7628 src = omp_build_component_ref (src, sf);
7629 t = build2 (MODIFY_EXPR, TREE_TYPE (*p), *p, src);
7630 append_to_statement_list (t, &list);
7631 }
7632
7633 /* Second pass: copy shared var pointers and copy construct non-VLA
7634 firstprivate vars. */
7635 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
7636 switch (OMP_CLAUSE_CODE (c))
7637 {
7638 case OMP_CLAUSE_SHARED:
7639 decl = OMP_CLAUSE_DECL (c);
7640 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
7641 if (n == NULL)
7642 break;
7643 f = (tree) n->value;
7644 if (tcctx.cb.decl_map)
7645 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
7646 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
7647 sf = (tree) n->value;
7648 if (tcctx.cb.decl_map)
7649 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
7650 src = build_simple_mem_ref_loc (loc, sarg);
7651 src = omp_build_component_ref (src, sf);
7652 dst = build_simple_mem_ref_loc (loc, arg);
7653 dst = omp_build_component_ref (dst, f);
7654 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
7655 append_to_statement_list (t, &list);
7656 break;
7657 case OMP_CLAUSE_FIRSTPRIVATE:
7658 decl = OMP_CLAUSE_DECL (c);
7659 if (is_variable_sized (decl))
7660 break;
7661 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
7662 if (n == NULL)
7663 break;
7664 f = (tree) n->value;
7665 if (tcctx.cb.decl_map)
7666 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
7667 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
7668 if (n != NULL)
7669 {
7670 sf = (tree) n->value;
7671 if (tcctx.cb.decl_map)
7672 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
7673 src = build_simple_mem_ref_loc (loc, sarg);
7674 src = omp_build_component_ref (src, sf);
7675 if (use_pointer_for_field (decl, NULL) || is_reference (decl))
7676 src = build_simple_mem_ref_loc (loc, src);
7677 }
7678 else
7679 src = decl;
7680 dst = build_simple_mem_ref_loc (loc, arg);
7681 dst = omp_build_component_ref (dst, f);
7682 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
7683 append_to_statement_list (t, &list);
7684 break;
7685 case OMP_CLAUSE_PRIVATE:
7686 if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c))
7687 break;
7688 decl = OMP_CLAUSE_DECL (c);
7689 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
7690 f = (tree) n->value;
7691 if (tcctx.cb.decl_map)
7692 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
7693 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
7694 if (n != NULL)
7695 {
7696 sf = (tree) n->value;
7697 if (tcctx.cb.decl_map)
7698 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
7699 src = build_simple_mem_ref_loc (loc, sarg);
7700 src = omp_build_component_ref (src, sf);
7701 if (use_pointer_for_field (decl, NULL))
7702 src = build_simple_mem_ref_loc (loc, src);
7703 }
7704 else
7705 src = decl;
7706 dst = build_simple_mem_ref_loc (loc, arg);
7707 dst = omp_build_component_ref (dst, f);
7708 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
7709 append_to_statement_list (t, &list);
7710 break;
7711 default:
7712 break;
7713 }
7714
7715 /* Last pass: handle VLA firstprivates. */
7716 if (tcctx.cb.decl_map)
7717 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
7718 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
7719 {
7720 tree ind, ptr, df;
7721
7722 decl = OMP_CLAUSE_DECL (c);
7723 if (!is_variable_sized (decl))
7724 continue;
7725 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
7726 if (n == NULL)
7727 continue;
7728 f = (tree) n->value;
7729 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
7730 gcc_assert (DECL_HAS_VALUE_EXPR_P (decl));
7731 ind = DECL_VALUE_EXPR (decl);
7732 gcc_assert (TREE_CODE (ind) == INDIRECT_REF);
7733 gcc_assert (DECL_P (TREE_OPERAND (ind, 0)));
7734 n = splay_tree_lookup (ctx->sfield_map,
7735 (splay_tree_key) TREE_OPERAND (ind, 0));
7736 sf = (tree) n->value;
7737 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
7738 src = build_simple_mem_ref_loc (loc, sarg);
7739 src = omp_build_component_ref (src, sf);
7740 src = build_simple_mem_ref_loc (loc, src);
7741 dst = build_simple_mem_ref_loc (loc, arg);
7742 dst = omp_build_component_ref (dst, f);
7743 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
7744 append_to_statement_list (t, &list);
7745 n = splay_tree_lookup (ctx->field_map,
7746 (splay_tree_key) TREE_OPERAND (ind, 0));
7747 df = (tree) n->value;
7748 df = *(tree *) pointer_map_contains (tcctx.cb.decl_map, df);
7749 ptr = build_simple_mem_ref_loc (loc, arg);
7750 ptr = omp_build_component_ref (ptr, df);
7751 t = build2 (MODIFY_EXPR, TREE_TYPE (ptr), ptr,
7752 build_fold_addr_expr_loc (loc, dst));
7753 append_to_statement_list (t, &list);
7754 }
7755
7756 t = build1 (RETURN_EXPR, void_type_node, NULL);
7757 append_to_statement_list (t, &list);
7758
7759 if (tcctx.cb.decl_map)
7760 pointer_map_destroy (tcctx.cb.decl_map);
7761 pop_gimplify_context (NULL);
7762 BIND_EXPR_BODY (bind) = list;
7763 pop_cfun ();
7764 }
7765
7766 /* Lower the OpenMP parallel or task directive in the current statement
7767 in GSI_P. CTX holds context information for the directive. */
7768
7769 static void
7770 lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
7771 {
7772 tree clauses;
7773 tree child_fn, t;
7774 gimple stmt = gsi_stmt (*gsi_p);
7775 gimple par_bind, bind;
7776 gimple_seq par_body, olist, ilist, par_olist, par_ilist, new_body;
7777 struct gimplify_ctx gctx;
7778 location_t loc = gimple_location (stmt);
7779
7780 clauses = gimple_omp_taskreg_clauses (stmt);
7781 par_bind = gimple_seq_first_stmt (gimple_omp_body (stmt));
7782 par_body = gimple_bind_body (par_bind);
7783 child_fn = ctx->cb.dst_fn;
7784 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
7785 && !gimple_omp_parallel_combined_p (stmt))
7786 {
7787 struct walk_stmt_info wi;
7788 int ws_num = 0;
7789
7790 memset (&wi, 0, sizeof (wi));
7791 wi.info = &ws_num;
7792 wi.val_only = true;
7793 walk_gimple_seq (par_body, check_combined_parallel, NULL, &wi);
7794 if (ws_num == 1)
7795 gimple_omp_parallel_set_combined_p (stmt, true);
7796 }
7797 if (ctx->srecord_type)
7798 create_task_copyfn (stmt, ctx);
7799
7800 push_gimplify_context (&gctx);
7801
7802 par_olist = NULL;
7803 par_ilist = NULL;
7804 lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx);
7805 lower_omp (&par_body, ctx);
7806 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL)
7807 lower_reduction_clauses (clauses, &par_olist, ctx);
7808
7809 /* Declare all the variables created by mapping and the variables
7810 declared in the scope of the parallel body. */
7811 record_vars_into (ctx->block_vars, child_fn);
7812 record_vars_into (gimple_bind_vars (par_bind), child_fn);
7813
7814 if (ctx->record_type)
7815 {
7816 ctx->sender_decl
7817 = create_tmp_var (ctx->srecord_type ? ctx->srecord_type
7818 : ctx->record_type, ".omp_data_o");
7819 DECL_NAMELESS (ctx->sender_decl) = 1;
7820 TREE_ADDRESSABLE (ctx->sender_decl) = 1;
7821 gimple_omp_taskreg_set_data_arg (stmt, ctx->sender_decl);
7822 }
7823
7824 olist = NULL;
7825 ilist = NULL;
7826 lower_send_clauses (clauses, &ilist, &olist, ctx);
7827 lower_send_shared_vars (&ilist, &olist, ctx);
7828
7829 /* Once all the expansions are done, sequence all the different
7830 fragments inside gimple_omp_body. */
7831
7832 new_body = NULL;
7833
7834 if (ctx->record_type)
7835 {
7836 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
7837 /* fixup_child_record_type might have changed receiver_decl's type. */
7838 t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
7839 gimple_seq_add_stmt (&new_body,
7840 gimple_build_assign (ctx->receiver_decl, t));
7841 }
7842
7843 gimple_seq_add_seq (&new_body, par_ilist);
7844 gimple_seq_add_seq (&new_body, par_body);
7845 gimple_seq_add_seq (&new_body, par_olist);
7846 new_body = maybe_catch_exception (new_body);
7847 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
7848 gimple_omp_set_body (stmt, new_body);
7849
7850 bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
7851 gsi_replace (gsi_p, bind, true);
7852 gimple_bind_add_seq (bind, ilist);
7853 gimple_bind_add_stmt (bind, stmt);
7854 gimple_bind_add_seq (bind, olist);
7855
7856 pop_gimplify_context (NULL);
7857 }
7858
7859 /* Callback for lower_omp_1. Return non-NULL if *tp needs to be
7860 regimplified. If DATA is non-NULL, lower_omp_1 is outside
7861 of OpenMP context, but with task_shared_vars set. */
7862
7863 static tree
7864 lower_omp_regimplify_p (tree *tp, int *walk_subtrees,
7865 void *data)
7866 {
7867 tree t = *tp;
7868
7869 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
7870 if (TREE_CODE (t) == VAR_DECL && data == NULL && DECL_HAS_VALUE_EXPR_P (t))
7871 return t;
7872
7873 if (task_shared_vars
7874 && DECL_P (t)
7875 && bitmap_bit_p (task_shared_vars, DECL_UID (t)))
7876 return t;
7877
7878 /* If a global variable has been privatized, TREE_CONSTANT on
7879 ADDR_EXPR might be wrong. */
7880 if (data == NULL && TREE_CODE (t) == ADDR_EXPR)
7881 recompute_tree_invariant_for_addr_expr (t);
7882
7883 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
7884 return NULL_TREE;
7885 }
7886
7887 static void
7888 lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
7889 {
7890 gimple stmt = gsi_stmt (*gsi_p);
7891 struct walk_stmt_info wi;
7892
7893 if (gimple_has_location (stmt))
7894 input_location = gimple_location (stmt);
7895
7896 if (task_shared_vars)
7897 memset (&wi, '\0', sizeof (wi));
7898
7899 /* If we have issued syntax errors, avoid doing any heavy lifting.
7900 Just replace the OpenMP directives with a NOP to avoid
7901 confusing RTL expansion. */
7902 if (seen_error () && is_gimple_omp (stmt))
7903 {
7904 gsi_replace (gsi_p, gimple_build_nop (), true);
7905 return;
7906 }
7907
7908 switch (gimple_code (stmt))
7909 {
7910 case GIMPLE_COND:
7911 if ((ctx || task_shared_vars)
7912 && (walk_tree (gimple_cond_lhs_ptr (stmt), lower_omp_regimplify_p,
7913 ctx ? NULL : &wi, NULL)
7914 || walk_tree (gimple_cond_rhs_ptr (stmt), lower_omp_regimplify_p,
7915 ctx ? NULL : &wi, NULL)))
7916 gimple_regimplify_operands (stmt, gsi_p);
7917 break;
7918 case GIMPLE_CATCH:
7919 lower_omp (gimple_catch_handler_ptr (stmt), ctx);
7920 break;
7921 case GIMPLE_EH_FILTER:
7922 lower_omp (gimple_eh_filter_failure_ptr (stmt), ctx);
7923 break;
7924 case GIMPLE_TRY:
7925 lower_omp (gimple_try_eval_ptr (stmt), ctx);
7926 lower_omp (gimple_try_cleanup_ptr (stmt), ctx);
7927 break;
7928 case GIMPLE_TRANSACTION:
7929 lower_omp (gimple_transaction_body_ptr (stmt), ctx);
7930 break;
7931 case GIMPLE_BIND:
7932 lower_omp (gimple_bind_body_ptr (stmt), ctx);
7933 break;
7934 case GIMPLE_OMP_PARALLEL:
7935 case GIMPLE_OMP_TASK:
7936 ctx = maybe_lookup_ctx (stmt);
7937 lower_omp_taskreg (gsi_p, ctx);
7938 break;
7939 case GIMPLE_OMP_FOR:
7940 ctx = maybe_lookup_ctx (stmt);
7941 gcc_assert (ctx);
7942 lower_omp_for (gsi_p, ctx);
7943 break;
7944 case GIMPLE_OMP_SECTIONS:
7945 ctx = maybe_lookup_ctx (stmt);
7946 gcc_assert (ctx);
7947 lower_omp_sections (gsi_p, ctx);
7948 break;
7949 case GIMPLE_OMP_SINGLE:
7950 ctx = maybe_lookup_ctx (stmt);
7951 gcc_assert (ctx);
7952 lower_omp_single (gsi_p, ctx);
7953 break;
7954 case GIMPLE_OMP_MASTER:
7955 ctx = maybe_lookup_ctx (stmt);
7956 gcc_assert (ctx);
7957 lower_omp_master (gsi_p, ctx);
7958 break;
7959 case GIMPLE_OMP_ORDERED:
7960 ctx = maybe_lookup_ctx (stmt);
7961 gcc_assert (ctx);
7962 lower_omp_ordered (gsi_p, ctx);
7963 break;
7964 case GIMPLE_OMP_CRITICAL:
7965 ctx = maybe_lookup_ctx (stmt);
7966 gcc_assert (ctx);
7967 lower_omp_critical (gsi_p, ctx);
7968 break;
7969 case GIMPLE_OMP_ATOMIC_LOAD:
7970 if ((ctx || task_shared_vars)
7971 && walk_tree (gimple_omp_atomic_load_rhs_ptr (stmt),
7972 lower_omp_regimplify_p, ctx ? NULL : &wi, NULL))
7973 gimple_regimplify_operands (stmt, gsi_p);
7974 break;
7975 default:
7976 if ((ctx || task_shared_vars)
7977 && walk_gimple_op (stmt, lower_omp_regimplify_p,
7978 ctx ? NULL : &wi))
7979 gimple_regimplify_operands (stmt, gsi_p);
7980 break;
7981 }
7982 }
7983
7984 static void
7985 lower_omp (gimple_seq *body, omp_context *ctx)
7986 {
7987 location_t saved_location = input_location;
7988 gimple_stmt_iterator gsi;
7989 for (gsi = gsi_start (*body); !gsi_end_p (gsi); gsi_next (&gsi))
7990 lower_omp_1 (&gsi, ctx);
7991 input_location = saved_location;
7992 }
7993 \f
7994 /* Main entry point. */
7995
7996 static unsigned int
7997 execute_lower_omp (void)
7998 {
7999 gimple_seq body;
8000
8001 /* This pass always runs, to provide PROP_gimple_lomp.
8002 But there is nothing to do unless -fopenmp is given. */
8003 if (flag_openmp == 0)
8004 return 0;
8005
8006 all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
8007 delete_omp_context);
8008
8009 body = gimple_body (current_function_decl);
8010 scan_omp (&body, NULL);
8011 gcc_assert (taskreg_nesting_level == 0);
8012
8013 if (all_contexts->root)
8014 {
8015 struct gimplify_ctx gctx;
8016
8017 if (task_shared_vars)
8018 push_gimplify_context (&gctx);
8019 lower_omp (&body, NULL);
8020 if (task_shared_vars)
8021 pop_gimplify_context (NULL);
8022 }
8023
8024 if (all_contexts)
8025 {
8026 splay_tree_delete (all_contexts);
8027 all_contexts = NULL;
8028 }
8029 BITMAP_FREE (task_shared_vars);
8030 return 0;
8031 }
8032
8033 namespace {
8034
8035 const pass_data pass_data_lower_omp =
8036 {
8037 GIMPLE_PASS, /* type */
8038 "omplower", /* name */
8039 OPTGROUP_NONE, /* optinfo_flags */
8040 false, /* has_gate */
8041 true, /* has_execute */
8042 TV_NONE, /* tv_id */
8043 PROP_gimple_any, /* properties_required */
8044 PROP_gimple_lomp, /* properties_provided */
8045 0, /* properties_destroyed */
8046 0, /* todo_flags_start */
8047 0, /* todo_flags_finish */
8048 };
8049
8050 class pass_lower_omp : public gimple_opt_pass
8051 {
8052 public:
8053 pass_lower_omp(gcc::context *ctxt)
8054 : gimple_opt_pass(pass_data_lower_omp, ctxt)
8055 {}
8056
8057 /* opt_pass methods: */
8058 unsigned int execute () { return execute_lower_omp (); }
8059
8060 }; // class pass_lower_omp
8061
8062 } // anon namespace
8063
8064 gimple_opt_pass *
8065 make_pass_lower_omp (gcc::context *ctxt)
8066 {
8067 return new pass_lower_omp (ctxt);
8068 }
8069 \f
8070 /* The following is a utility to diagnose OpenMP structured block violations.
8071 It is not part of the "omplower" pass, as that's invoked too late. It
8072 should be invoked by the respective front ends after gimplification. */
8073
8074 static splay_tree all_labels;
8075
8076 /* Check for mismatched contexts and generate an error if needed. Return
8077 true if an error is detected. */
8078
8079 static bool
8080 diagnose_sb_0 (gimple_stmt_iterator *gsi_p,
8081 gimple branch_ctx, gimple label_ctx)
8082 {
8083 if (label_ctx == branch_ctx)
8084 return false;
8085
8086
8087 /*
8088 Previously we kept track of the label's entire context in diagnose_sb_[12]
8089 so we could traverse it and issue a correct "exit" or "enter" error
8090 message upon a structured block violation.
8091
8092 We built the context by building a list with tree_cons'ing, but there is
8093 no easy counterpart in gimple tuples. It seems like far too much work
8094 for issuing exit/enter error messages. If someone really misses the
8095 distinct error message... patches welcome.
8096 */
8097
8098 #if 0
8099 /* Try to avoid confusing the user by producing and error message
8100 with correct "exit" or "enter" verbiage. We prefer "exit"
8101 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
8102 if (branch_ctx == NULL)
8103 exit_p = false;
8104 else
8105 {
8106 while (label_ctx)
8107 {
8108 if (TREE_VALUE (label_ctx) == branch_ctx)
8109 {
8110 exit_p = false;
8111 break;
8112 }
8113 label_ctx = TREE_CHAIN (label_ctx);
8114 }
8115 }
8116
8117 if (exit_p)
8118 error ("invalid exit from OpenMP structured block");
8119 else
8120 error ("invalid entry to OpenMP structured block");
8121 #endif
8122
8123 /* If it's obvious we have an invalid entry, be specific about the error. */
8124 if (branch_ctx == NULL)
8125 error ("invalid entry to OpenMP structured block");
8126 else
8127 /* Otherwise, be vague and lazy, but efficient. */
8128 error ("invalid branch to/from an OpenMP structured block");
8129
8130 gsi_replace (gsi_p, gimple_build_nop (), false);
8131 return true;
8132 }
8133
8134 /* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
8135 where each label is found. */
8136
8137 static tree
8138 diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
8139 struct walk_stmt_info *wi)
8140 {
8141 gimple context = (gimple) wi->info;
8142 gimple inner_context;
8143 gimple stmt = gsi_stmt (*gsi_p);
8144
8145 *handled_ops_p = true;
8146
8147 switch (gimple_code (stmt))
8148 {
8149 WALK_SUBSTMTS;
8150
8151 case GIMPLE_OMP_PARALLEL:
8152 case GIMPLE_OMP_TASK:
8153 case GIMPLE_OMP_SECTIONS:
8154 case GIMPLE_OMP_SINGLE:
8155 case GIMPLE_OMP_SECTION:
8156 case GIMPLE_OMP_MASTER:
8157 case GIMPLE_OMP_ORDERED:
8158 case GIMPLE_OMP_CRITICAL:
8159 /* The minimal context here is just the current OMP construct. */
8160 inner_context = stmt;
8161 wi->info = inner_context;
8162 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
8163 wi->info = context;
8164 break;
8165
8166 case GIMPLE_OMP_FOR:
8167 inner_context = stmt;
8168 wi->info = inner_context;
8169 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
8170 walk them. */
8171 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
8172 diagnose_sb_1, NULL, wi);
8173 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
8174 wi->info = context;
8175 break;
8176
8177 case GIMPLE_LABEL:
8178 splay_tree_insert (all_labels, (splay_tree_key) gimple_label_label (stmt),
8179 (splay_tree_value) context);
8180 break;
8181
8182 default:
8183 break;
8184 }
8185
8186 return NULL_TREE;
8187 }
8188
8189 /* Pass 2: Check each branch and see if its context differs from that of
8190 the destination label's context. */
8191
8192 static tree
8193 diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
8194 struct walk_stmt_info *wi)
8195 {
8196 gimple context = (gimple) wi->info;
8197 splay_tree_node n;
8198 gimple stmt = gsi_stmt (*gsi_p);
8199
8200 *handled_ops_p = true;
8201
8202 switch (gimple_code (stmt))
8203 {
8204 WALK_SUBSTMTS;
8205
8206 case GIMPLE_OMP_PARALLEL:
8207 case GIMPLE_OMP_TASK:
8208 case GIMPLE_OMP_SECTIONS:
8209 case GIMPLE_OMP_SINGLE:
8210 case GIMPLE_OMP_SECTION:
8211 case GIMPLE_OMP_MASTER:
8212 case GIMPLE_OMP_ORDERED:
8213 case GIMPLE_OMP_CRITICAL:
8214 wi->info = stmt;
8215 walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), diagnose_sb_2, NULL, wi);
8216 wi->info = context;
8217 break;
8218
8219 case GIMPLE_OMP_FOR:
8220 wi->info = stmt;
8221 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
8222 walk them. */
8223 walk_gimple_seq_mod (gimple_omp_for_pre_body_ptr (stmt),
8224 diagnose_sb_2, NULL, wi);
8225 walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), diagnose_sb_2, NULL, wi);
8226 wi->info = context;
8227 break;
8228
8229 case GIMPLE_COND:
8230 {
8231 tree lab = gimple_cond_true_label (stmt);
8232 if (lab)
8233 {
8234 n = splay_tree_lookup (all_labels,
8235 (splay_tree_key) lab);
8236 diagnose_sb_0 (gsi_p, context,
8237 n ? (gimple) n->value : NULL);
8238 }
8239 lab = gimple_cond_false_label (stmt);
8240 if (lab)
8241 {
8242 n = splay_tree_lookup (all_labels,
8243 (splay_tree_key) lab);
8244 diagnose_sb_0 (gsi_p, context,
8245 n ? (gimple) n->value : NULL);
8246 }
8247 }
8248 break;
8249
8250 case GIMPLE_GOTO:
8251 {
8252 tree lab = gimple_goto_dest (stmt);
8253 if (TREE_CODE (lab) != LABEL_DECL)
8254 break;
8255
8256 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
8257 diagnose_sb_0 (gsi_p, context, n ? (gimple) n->value : NULL);
8258 }
8259 break;
8260
8261 case GIMPLE_SWITCH:
8262 {
8263 unsigned int i;
8264 for (i = 0; i < gimple_switch_num_labels (stmt); ++i)
8265 {
8266 tree lab = CASE_LABEL (gimple_switch_label (stmt, i));
8267 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
8268 if (n && diagnose_sb_0 (gsi_p, context, (gimple) n->value))
8269 break;
8270 }
8271 }
8272 break;
8273
8274 case GIMPLE_RETURN:
8275 diagnose_sb_0 (gsi_p, context, NULL);
8276 break;
8277
8278 default:
8279 break;
8280 }
8281
8282 return NULL_TREE;
8283 }
8284
8285 static unsigned int
8286 diagnose_omp_structured_block_errors (void)
8287 {
8288 struct walk_stmt_info wi;
8289 gimple_seq body = gimple_body (current_function_decl);
8290
8291 all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
8292
8293 memset (&wi, 0, sizeof (wi));
8294 walk_gimple_seq (body, diagnose_sb_1, NULL, &wi);
8295
8296 memset (&wi, 0, sizeof (wi));
8297 wi.want_locations = true;
8298 walk_gimple_seq_mod (&body, diagnose_sb_2, NULL, &wi);
8299
8300 gimple_set_body (current_function_decl, body);
8301
8302 splay_tree_delete (all_labels);
8303 all_labels = NULL;
8304
8305 return 0;
8306 }
8307
8308 static bool
8309 gate_diagnose_omp_blocks (void)
8310 {
8311 return flag_openmp != 0;
8312 }
8313
8314 namespace {
8315
8316 const pass_data pass_data_diagnose_omp_blocks =
8317 {
8318 GIMPLE_PASS, /* type */
8319 "*diagnose_omp_blocks", /* name */
8320 OPTGROUP_NONE, /* optinfo_flags */
8321 true, /* has_gate */
8322 true, /* has_execute */
8323 TV_NONE, /* tv_id */
8324 PROP_gimple_any, /* properties_required */
8325 0, /* properties_provided */
8326 0, /* properties_destroyed */
8327 0, /* todo_flags_start */
8328 0, /* todo_flags_finish */
8329 };
8330
8331 class pass_diagnose_omp_blocks : public gimple_opt_pass
8332 {
8333 public:
8334 pass_diagnose_omp_blocks(gcc::context *ctxt)
8335 : gimple_opt_pass(pass_data_diagnose_omp_blocks, ctxt)
8336 {}
8337
8338 /* opt_pass methods: */
8339 bool gate () { return gate_diagnose_omp_blocks (); }
8340 unsigned int execute () {
8341 return diagnose_omp_structured_block_errors ();
8342 }
8343
8344 }; // class pass_diagnose_omp_blocks
8345
8346 } // anon namespace
8347
8348 gimple_opt_pass *
8349 make_pass_diagnose_omp_blocks (gcc::context *ctxt)
8350 {
8351 return new pass_diagnose_omp_blocks (ctxt);
8352 }
8353
8354 #include "gt-omp-low.h"