omp-low.c (expand_omp_sections): Always pass len - 1 to GOMP_sections_start, even...
[gcc.git] / gcc / omp-low.c
1 /* Lowering pass for OpenMP directives. Converts OpenMP directives
2 into explicit calls to the runtime library (libgomp) and data
3 marshalling to implement data sharing and copying clauses.
4 Contributed by Diego Novillo <dnovillo@redhat.com>
5
6 Copyright (C) 2005-2013 Free Software Foundation, Inc.
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
14
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "tm.h"
28 #include "tree.h"
29 #include "rtl.h"
30 #include "gimple.h"
31 #include "tree-iterator.h"
32 #include "tree-inline.h"
33 #include "langhooks.h"
34 #include "diagnostic-core.h"
35 #include "tree-ssa.h"
36 #include "flags.h"
37 #include "function.h"
38 #include "expr.h"
39 #include "tree-pass.h"
40 #include "ggc.h"
41 #include "except.h"
42 #include "splay-tree.h"
43 #include "optabs.h"
44 #include "cfgloop.h"
45 #include "target.h"
46
47
48 /* Lowering of OpenMP parallel and workshare constructs proceeds in two
49 phases. The first phase scans the function looking for OMP statements
50 and then for variables that must be replaced to satisfy data sharing
51 clauses. The second phase expands code for the constructs, as well as
52 re-gimplifying things when variables have been replaced with complex
53 expressions.
54
55 Final code generation is done by pass_expand_omp. The flowgraph is
56 scanned for parallel regions which are then moved to a new
57 function, to be invoked by the thread library. */
58
59 /* Context structure. Used to store information about each parallel
60 directive in the code. */
61
62 typedef struct omp_context
63 {
64 /* This field must be at the beginning, as we do "inheritance": Some
65 callback functions for tree-inline.c (e.g., omp_copy_decl)
66 receive a copy_body_data pointer that is up-casted to an
67 omp_context pointer. */
68 copy_body_data cb;
69
70 /* The tree of contexts corresponding to the encountered constructs. */
71 struct omp_context *outer;
72 gimple stmt;
73
74 /* Map variables to fields in a structure that allows communication
75 between sending and receiving threads. */
76 splay_tree field_map;
77 tree record_type;
78 tree sender_decl;
79 tree receiver_decl;
80
81 /* These are used just by task contexts, if task firstprivate fn is
82 needed. srecord_type is used to communicate from the thread
83 that encountered the task construct to task firstprivate fn,
84 record_type is allocated by GOMP_task, initialized by task firstprivate
85 fn and passed to the task body fn. */
86 splay_tree sfield_map;
87 tree srecord_type;
88
89 /* A chain of variables to add to the top-level block surrounding the
90 construct. In the case of a parallel, this is in the child function. */
91 tree block_vars;
92
93 /* What to do with variables with implicitly determined sharing
94 attributes. */
95 enum omp_clause_default_kind default_kind;
96
97 /* Nesting depth of this context. Used to beautify error messages re
98 invalid gotos. The outermost ctx is depth 1, with depth 0 being
99 reserved for the main body of the function. */
100 int depth;
101
102 /* True if this parallel directive is nested within another. */
103 bool is_nested;
104 } omp_context;
105
106
107 struct omp_for_data_loop
108 {
109 tree v, n1, n2, step;
110 enum tree_code cond_code;
111 };
112
113 /* A structure describing the main elements of a parallel loop. */
114
115 struct omp_for_data
116 {
117 struct omp_for_data_loop loop;
118 tree chunk_size;
119 gimple for_stmt;
120 tree pre, iter_type;
121 int collapse;
122 bool have_nowait, have_ordered;
123 enum omp_clause_schedule_kind sched_kind;
124 struct omp_for_data_loop *loops;
125 };
126
127
128 static splay_tree all_contexts;
129 static int taskreg_nesting_level;
130 struct omp_region *root_omp_region;
131 static bitmap task_shared_vars;
132
133 static void scan_omp (gimple_seq *, omp_context *);
134 static tree scan_omp_1_op (tree *, int *, void *);
135
136 #define WALK_SUBSTMTS \
137 case GIMPLE_BIND: \
138 case GIMPLE_TRY: \
139 case GIMPLE_CATCH: \
140 case GIMPLE_EH_FILTER: \
141 case GIMPLE_TRANSACTION: \
142 /* The sub-statements for these should be walked. */ \
143 *handled_ops_p = false; \
144 break;
145
146 /* Convenience function for calling scan_omp_1_op on tree operands. */
147
148 static inline tree
149 scan_omp_op (tree *tp, omp_context *ctx)
150 {
151 struct walk_stmt_info wi;
152
153 memset (&wi, 0, sizeof (wi));
154 wi.info = ctx;
155 wi.want_locations = true;
156
157 return walk_tree (tp, scan_omp_1_op, &wi, NULL);
158 }
159
160 static void lower_omp (gimple_seq *, omp_context *);
161 static tree lookup_decl_in_outer_ctx (tree, omp_context *);
162 static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
163
164 /* Find an OpenMP clause of type KIND within CLAUSES. */
165
166 tree
167 find_omp_clause (tree clauses, enum omp_clause_code kind)
168 {
169 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
170 if (OMP_CLAUSE_CODE (clauses) == kind)
171 return clauses;
172
173 return NULL_TREE;
174 }
175
176 /* Return true if CTX is for an omp parallel. */
177
178 static inline bool
179 is_parallel_ctx (omp_context *ctx)
180 {
181 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL;
182 }
183
184
185 /* Return true if CTX is for an omp task. */
186
187 static inline bool
188 is_task_ctx (omp_context *ctx)
189 {
190 return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
191 }
192
193
194 /* Return true if CTX is for an omp parallel or omp task. */
195
196 static inline bool
197 is_taskreg_ctx (omp_context *ctx)
198 {
199 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL
200 || gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
201 }
202
203
204 /* Return true if REGION is a combined parallel+workshare region. */
205
206 static inline bool
207 is_combined_parallel (struct omp_region *region)
208 {
209 return region->is_combined_parallel;
210 }
211
212
213 /* Extract the header elements of parallel loop FOR_STMT and store
214 them into *FD. */
215
216 static void
217 extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
218 struct omp_for_data_loop *loops)
219 {
220 tree t, var, *collapse_iter, *collapse_count;
221 tree count = NULL_TREE, iter_type = long_integer_type_node;
222 struct omp_for_data_loop *loop;
223 int i;
224 struct omp_for_data_loop dummy_loop;
225 location_t loc = gimple_location (for_stmt);
226 bool simd = gimple_omp_for_kind (for_stmt) == GF_OMP_FOR_KIND_SIMD;
227
228 fd->for_stmt = for_stmt;
229 fd->pre = NULL;
230 fd->collapse = gimple_omp_for_collapse (for_stmt);
231 if (fd->collapse > 1)
232 fd->loops = loops;
233 else
234 fd->loops = &fd->loop;
235
236 fd->have_nowait = fd->have_ordered = false;
237 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
238 fd->chunk_size = NULL_TREE;
239 collapse_iter = NULL;
240 collapse_count = NULL;
241
242 for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
243 switch (OMP_CLAUSE_CODE (t))
244 {
245 case OMP_CLAUSE_NOWAIT:
246 fd->have_nowait = true;
247 break;
248 case OMP_CLAUSE_ORDERED:
249 fd->have_ordered = true;
250 break;
251 case OMP_CLAUSE_SCHEDULE:
252 fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
253 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
254 break;
255 case OMP_CLAUSE_COLLAPSE:
256 if (fd->collapse > 1)
257 {
258 collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
259 collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
260 }
261 default:
262 break;
263 }
264
265 /* FIXME: for now map schedule(auto) to schedule(static).
266 There should be analysis to determine whether all iterations
267 are approximately the same amount of work (then schedule(static)
268 is best) or if it varies (then schedule(dynamic,N) is better). */
269 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
270 {
271 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
272 gcc_assert (fd->chunk_size == NULL);
273 }
274 gcc_assert (fd->collapse == 1 || collapse_iter != NULL);
275 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
276 gcc_assert (fd->chunk_size == NULL);
277 else if (fd->chunk_size == NULL)
278 {
279 /* We only need to compute a default chunk size for ordered
280 static loops and dynamic loops. */
281 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
282 || fd->have_ordered
283 || fd->collapse > 1)
284 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
285 ? integer_zero_node : integer_one_node;
286 }
287
288 for (i = 0; i < fd->collapse; i++)
289 {
290 if (fd->collapse == 1)
291 loop = &fd->loop;
292 else if (loops != NULL)
293 loop = loops + i;
294 else
295 loop = &dummy_loop;
296
297
298 loop->v = gimple_omp_for_index (for_stmt, i);
299 gcc_assert (SSA_VAR_P (loop->v));
300 gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
301 || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
302 var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
303 loop->n1 = gimple_omp_for_initial (for_stmt, i);
304
305 loop->cond_code = gimple_omp_for_cond (for_stmt, i);
306 loop->n2 = gimple_omp_for_final (for_stmt, i);
307 switch (loop->cond_code)
308 {
309 case LT_EXPR:
310 case GT_EXPR:
311 break;
312 case LE_EXPR:
313 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
314 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, 1);
315 else
316 loop->n2 = fold_build2_loc (loc,
317 PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
318 build_int_cst (TREE_TYPE (loop->n2), 1));
319 loop->cond_code = LT_EXPR;
320 break;
321 case GE_EXPR:
322 if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
323 loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, -1);
324 else
325 loop->n2 = fold_build2_loc (loc,
326 MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
327 build_int_cst (TREE_TYPE (loop->n2), 1));
328 loop->cond_code = GT_EXPR;
329 break;
330 default:
331 gcc_unreachable ();
332 }
333
334 t = gimple_omp_for_incr (for_stmt, i);
335 gcc_assert (TREE_OPERAND (t, 0) == var);
336 switch (TREE_CODE (t))
337 {
338 case PLUS_EXPR:
339 loop->step = TREE_OPERAND (t, 1);
340 break;
341 case POINTER_PLUS_EXPR:
342 loop->step = fold_convert (ssizetype, TREE_OPERAND (t, 1));
343 break;
344 case MINUS_EXPR:
345 loop->step = TREE_OPERAND (t, 1);
346 loop->step = fold_build1_loc (loc,
347 NEGATE_EXPR, TREE_TYPE (loop->step),
348 loop->step);
349 break;
350 default:
351 gcc_unreachable ();
352 }
353
354 if (simd)
355 {
356 if (fd->collapse == 1)
357 iter_type = TREE_TYPE (loop->v);
358 else if (i == 0
359 || TYPE_PRECISION (iter_type)
360 < TYPE_PRECISION (TREE_TYPE (loop->v)))
361 iter_type
362 = build_nonstandard_integer_type
363 (TYPE_PRECISION (TREE_TYPE (loop->v)), 1);
364 }
365 else if (iter_type != long_long_unsigned_type_node)
366 {
367 if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
368 iter_type = long_long_unsigned_type_node;
369 else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
370 && TYPE_PRECISION (TREE_TYPE (loop->v))
371 >= TYPE_PRECISION (iter_type))
372 {
373 tree n;
374
375 if (loop->cond_code == LT_EXPR)
376 n = fold_build2_loc (loc,
377 PLUS_EXPR, TREE_TYPE (loop->v),
378 loop->n2, loop->step);
379 else
380 n = loop->n1;
381 if (TREE_CODE (n) != INTEGER_CST
382 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
383 iter_type = long_long_unsigned_type_node;
384 }
385 else if (TYPE_PRECISION (TREE_TYPE (loop->v))
386 > TYPE_PRECISION (iter_type))
387 {
388 tree n1, n2;
389
390 if (loop->cond_code == LT_EXPR)
391 {
392 n1 = loop->n1;
393 n2 = fold_build2_loc (loc,
394 PLUS_EXPR, TREE_TYPE (loop->v),
395 loop->n2, loop->step);
396 }
397 else
398 {
399 n1 = fold_build2_loc (loc,
400 MINUS_EXPR, TREE_TYPE (loop->v),
401 loop->n2, loop->step);
402 n2 = loop->n1;
403 }
404 if (TREE_CODE (n1) != INTEGER_CST
405 || TREE_CODE (n2) != INTEGER_CST
406 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
407 || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
408 iter_type = long_long_unsigned_type_node;
409 }
410 }
411
412 if (collapse_count && *collapse_count == NULL)
413 {
414 t = fold_binary (loop->cond_code, boolean_type_node,
415 fold_convert (TREE_TYPE (loop->v), loop->n1),
416 fold_convert (TREE_TYPE (loop->v), loop->n2));
417 if (t && integer_zerop (t))
418 count = build_zero_cst (long_long_unsigned_type_node);
419 else if ((i == 0 || count != NULL_TREE)
420 && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
421 && TREE_CONSTANT (loop->n1)
422 && TREE_CONSTANT (loop->n2)
423 && TREE_CODE (loop->step) == INTEGER_CST)
424 {
425 tree itype = TREE_TYPE (loop->v);
426
427 if (POINTER_TYPE_P (itype))
428 itype = signed_type_for (itype);
429 t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
430 t = fold_build2_loc (loc,
431 PLUS_EXPR, itype,
432 fold_convert_loc (loc, itype, loop->step), t);
433 t = fold_build2_loc (loc, PLUS_EXPR, itype, t,
434 fold_convert_loc (loc, itype, loop->n2));
435 t = fold_build2_loc (loc, MINUS_EXPR, itype, t,
436 fold_convert_loc (loc, itype, loop->n1));
437 if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
438 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype,
439 fold_build1_loc (loc, NEGATE_EXPR, itype, t),
440 fold_build1_loc (loc, NEGATE_EXPR, itype,
441 fold_convert_loc (loc, itype,
442 loop->step)));
443 else
444 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, t,
445 fold_convert_loc (loc, itype, loop->step));
446 t = fold_convert_loc (loc, long_long_unsigned_type_node, t);
447 if (count != NULL_TREE)
448 count = fold_build2_loc (loc,
449 MULT_EXPR, long_long_unsigned_type_node,
450 count, t);
451 else
452 count = t;
453 if (TREE_CODE (count) != INTEGER_CST)
454 count = NULL_TREE;
455 }
456 else if (count && !integer_zerop (count))
457 count = NULL_TREE;
458 }
459 }
460
461 if (count
462 && !simd)
463 {
464 if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
465 iter_type = long_long_unsigned_type_node;
466 else
467 iter_type = long_integer_type_node;
468 }
469 else if (collapse_iter && *collapse_iter != NULL)
470 iter_type = TREE_TYPE (*collapse_iter);
471 fd->iter_type = iter_type;
472 if (collapse_iter && *collapse_iter == NULL)
473 *collapse_iter = create_tmp_var (iter_type, ".iter");
474 if (collapse_count && *collapse_count == NULL)
475 {
476 if (count)
477 *collapse_count = fold_convert_loc (loc, iter_type, count);
478 else
479 *collapse_count = create_tmp_var (iter_type, ".count");
480 }
481
482 if (fd->collapse > 1)
483 {
484 fd->loop.v = *collapse_iter;
485 fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
486 fd->loop.n2 = *collapse_count;
487 fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
488 fd->loop.cond_code = LT_EXPR;
489 }
490 }
491
492
493 /* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
494 is the immediate dominator of PAR_ENTRY_BB, return true if there
495 are no data dependencies that would prevent expanding the parallel
496 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
497
498 When expanding a combined parallel+workshare region, the call to
499 the child function may need additional arguments in the case of
500 GIMPLE_OMP_FOR regions. In some cases, these arguments are
501 computed out of variables passed in from the parent to the child
502 via 'struct .omp_data_s'. For instance:
503
504 #pragma omp parallel for schedule (guided, i * 4)
505 for (j ...)
506
507 Is lowered into:
508
509 # BLOCK 2 (PAR_ENTRY_BB)
510 .omp_data_o.i = i;
511 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
512
513 # BLOCK 3 (WS_ENTRY_BB)
514 .omp_data_i = &.omp_data_o;
515 D.1667 = .omp_data_i->i;
516 D.1598 = D.1667 * 4;
517 #pragma omp for schedule (guided, D.1598)
518
519 When we outline the parallel region, the call to the child function
520 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
521 that value is computed *after* the call site. So, in principle we
522 cannot do the transformation.
523
524 To see whether the code in WS_ENTRY_BB blocks the combined
525 parallel+workshare call, we collect all the variables used in the
526 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
527 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
528 call.
529
530 FIXME. If we had the SSA form built at this point, we could merely
531 hoist the code in block 3 into block 2 and be done with it. But at
532 this point we don't have dataflow information and though we could
533 hack something up here, it is really not worth the aggravation. */
534
535 static bool
536 workshare_safe_to_combine_p (basic_block ws_entry_bb)
537 {
538 struct omp_for_data fd;
539 gimple ws_stmt = last_stmt (ws_entry_bb);
540
541 if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
542 return true;
543
544 gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR);
545
546 extract_omp_for_data (ws_stmt, &fd, NULL);
547
548 if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
549 return false;
550 if (fd.iter_type != long_integer_type_node)
551 return false;
552
553 /* FIXME. We give up too easily here. If any of these arguments
554 are not constants, they will likely involve variables that have
555 been mapped into fields of .omp_data_s for sharing with the child
556 function. With appropriate data flow, it would be possible to
557 see through this. */
558 if (!is_gimple_min_invariant (fd.loop.n1)
559 || !is_gimple_min_invariant (fd.loop.n2)
560 || !is_gimple_min_invariant (fd.loop.step)
561 || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
562 return false;
563
564 return true;
565 }
566
567
568 /* Collect additional arguments needed to emit a combined
569 parallel+workshare call. WS_STMT is the workshare directive being
570 expanded. */
571
572 static vec<tree, va_gc> *
573 get_ws_args_for (gimple ws_stmt)
574 {
575 tree t;
576 location_t loc = gimple_location (ws_stmt);
577 vec<tree, va_gc> *ws_args;
578
579 if (gimple_code (ws_stmt) == GIMPLE_OMP_FOR)
580 {
581 struct omp_for_data fd;
582
583 extract_omp_for_data (ws_stmt, &fd, NULL);
584
585 vec_alloc (ws_args, 3 + (fd.chunk_size != 0));
586
587 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n1);
588 ws_args->quick_push (t);
589
590 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n2);
591 ws_args->quick_push (t);
592
593 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.step);
594 ws_args->quick_push (t);
595
596 if (fd.chunk_size)
597 {
598 t = fold_convert_loc (loc, long_integer_type_node, fd.chunk_size);
599 ws_args->quick_push (t);
600 }
601
602 return ws_args;
603 }
604 else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
605 {
606 /* Number of sections is equal to the number of edges from the
607 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
608 the exit of the sections region. */
609 basic_block bb = single_succ (gimple_bb (ws_stmt));
610 t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
611 vec_alloc (ws_args, 1);
612 ws_args->quick_push (t);
613 return ws_args;
614 }
615
616 gcc_unreachable ();
617 }
618
619
620 /* Discover whether REGION is a combined parallel+workshare region. */
621
622 static void
623 determine_parallel_type (struct omp_region *region)
624 {
625 basic_block par_entry_bb, par_exit_bb;
626 basic_block ws_entry_bb, ws_exit_bb;
627
628 if (region == NULL || region->inner == NULL
629 || region->exit == NULL || region->inner->exit == NULL
630 || region->inner->cont == NULL)
631 return;
632
633 /* We only support parallel+for and parallel+sections. */
634 if (region->type != GIMPLE_OMP_PARALLEL
635 || (region->inner->type != GIMPLE_OMP_FOR
636 && region->inner->type != GIMPLE_OMP_SECTIONS))
637 return;
638
639 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
640 WS_EXIT_BB -> PAR_EXIT_BB. */
641 par_entry_bb = region->entry;
642 par_exit_bb = region->exit;
643 ws_entry_bb = region->inner->entry;
644 ws_exit_bb = region->inner->exit;
645
646 if (single_succ (par_entry_bb) == ws_entry_bb
647 && single_succ (ws_exit_bb) == par_exit_bb
648 && workshare_safe_to_combine_p (ws_entry_bb)
649 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb))
650 || (last_and_only_stmt (ws_entry_bb)
651 && last_and_only_stmt (par_exit_bb))))
652 {
653 gimple ws_stmt = last_stmt (ws_entry_bb);
654
655 if (region->inner->type == GIMPLE_OMP_FOR)
656 {
657 /* If this is a combined parallel loop, we need to determine
658 whether or not to use the combined library calls. There
659 are two cases where we do not apply the transformation:
660 static loops and any kind of ordered loop. In the first
661 case, we already open code the loop so there is no need
662 to do anything else. In the latter case, the combined
663 parallel loop call would still need extra synchronization
664 to implement ordered semantics, so there would not be any
665 gain in using the combined call. */
666 tree clauses = gimple_omp_for_clauses (ws_stmt);
667 tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
668 if (c == NULL
669 || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC
670 || find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
671 {
672 region->is_combined_parallel = false;
673 region->inner->is_combined_parallel = false;
674 return;
675 }
676 }
677
678 region->is_combined_parallel = true;
679 region->inner->is_combined_parallel = true;
680 region->ws_args = get_ws_args_for (ws_stmt);
681 }
682 }
683
684
685 /* Return true if EXPR is variable sized. */
686
687 static inline bool
688 is_variable_sized (const_tree expr)
689 {
690 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
691 }
692
693 /* Return true if DECL is a reference type. */
694
695 static inline bool
696 is_reference (tree decl)
697 {
698 return lang_hooks.decls.omp_privatize_by_reference (decl);
699 }
700
701 /* Lookup variables in the decl or field splay trees. The "maybe" form
702 allows for the variable form to not have been entered, otherwise we
703 assert that the variable must have been entered. */
704
705 static inline tree
706 lookup_decl (tree var, omp_context *ctx)
707 {
708 tree *n;
709 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
710 return *n;
711 }
712
713 static inline tree
714 maybe_lookup_decl (const_tree var, omp_context *ctx)
715 {
716 tree *n;
717 n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
718 return n ? *n : NULL_TREE;
719 }
720
721 static inline tree
722 lookup_field (tree var, omp_context *ctx)
723 {
724 splay_tree_node n;
725 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
726 return (tree) n->value;
727 }
728
729 static inline tree
730 lookup_sfield (tree var, omp_context *ctx)
731 {
732 splay_tree_node n;
733 n = splay_tree_lookup (ctx->sfield_map
734 ? ctx->sfield_map : ctx->field_map,
735 (splay_tree_key) var);
736 return (tree) n->value;
737 }
738
739 static inline tree
740 maybe_lookup_field (tree var, omp_context *ctx)
741 {
742 splay_tree_node n;
743 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
744 return n ? (tree) n->value : NULL_TREE;
745 }
746
747 /* Return true if DECL should be copied by pointer. SHARED_CTX is
748 the parallel context if DECL is to be shared. */
749
750 static bool
751 use_pointer_for_field (tree decl, omp_context *shared_ctx)
752 {
753 if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
754 return true;
755
756 /* We can only use copy-in/copy-out semantics for shared variables
757 when we know the value is not accessible from an outer scope. */
758 if (shared_ctx)
759 {
760 /* ??? Trivially accessible from anywhere. But why would we even
761 be passing an address in this case? Should we simply assert
762 this to be false, or should we have a cleanup pass that removes
763 these from the list of mappings? */
764 if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
765 return true;
766
767 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
768 without analyzing the expression whether or not its location
769 is accessible to anyone else. In the case of nested parallel
770 regions it certainly may be. */
771 if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
772 return true;
773
774 /* Do not use copy-in/copy-out for variables that have their
775 address taken. */
776 if (TREE_ADDRESSABLE (decl))
777 return true;
778
779 /* lower_send_shared_vars only uses copy-in, but not copy-out
780 for these. */
781 if (TREE_READONLY (decl)
782 || ((TREE_CODE (decl) == RESULT_DECL
783 || TREE_CODE (decl) == PARM_DECL)
784 && DECL_BY_REFERENCE (decl)))
785 return false;
786
787 /* Disallow copy-in/out in nested parallel if
788 decl is shared in outer parallel, otherwise
789 each thread could store the shared variable
790 in its own copy-in location, making the
791 variable no longer really shared. */
792 if (shared_ctx->is_nested)
793 {
794 omp_context *up;
795
796 for (up = shared_ctx->outer; up; up = up->outer)
797 if (is_taskreg_ctx (up) && maybe_lookup_decl (decl, up))
798 break;
799
800 if (up)
801 {
802 tree c;
803
804 for (c = gimple_omp_taskreg_clauses (up->stmt);
805 c; c = OMP_CLAUSE_CHAIN (c))
806 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
807 && OMP_CLAUSE_DECL (c) == decl)
808 break;
809
810 if (c)
811 goto maybe_mark_addressable_and_ret;
812 }
813 }
814
815 /* For tasks avoid using copy-in/out. As tasks can be
816 deferred or executed in different thread, when GOMP_task
817 returns, the task hasn't necessarily terminated. */
818 if (is_task_ctx (shared_ctx))
819 {
820 tree outer;
821 maybe_mark_addressable_and_ret:
822 outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
823 if (is_gimple_reg (outer))
824 {
825 /* Taking address of OUTER in lower_send_shared_vars
826 might need regimplification of everything that uses the
827 variable. */
828 if (!task_shared_vars)
829 task_shared_vars = BITMAP_ALLOC (NULL);
830 bitmap_set_bit (task_shared_vars, DECL_UID (outer));
831 TREE_ADDRESSABLE (outer) = 1;
832 }
833 return true;
834 }
835 }
836
837 return false;
838 }
839
840 /* Create a new VAR_DECL and copy information from VAR to it. */
841
842 tree
843 copy_var_decl (tree var, tree name, tree type)
844 {
845 tree copy = build_decl (DECL_SOURCE_LOCATION (var), VAR_DECL, name, type);
846
847 TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (var);
848 TREE_THIS_VOLATILE (copy) = TREE_THIS_VOLATILE (var);
849 DECL_GIMPLE_REG_P (copy) = DECL_GIMPLE_REG_P (var);
850 DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (var);
851 DECL_IGNORED_P (copy) = DECL_IGNORED_P (var);
852 DECL_CONTEXT (copy) = DECL_CONTEXT (var);
853 TREE_NO_WARNING (copy) = TREE_NO_WARNING (var);
854 TREE_USED (copy) = 1;
855 DECL_SEEN_IN_BIND_EXPR_P (copy) = 1;
856 DECL_ATTRIBUTES (copy) = DECL_ATTRIBUTES (var);
857
858 return copy;
859 }
860
861 /* Construct a new automatic decl similar to VAR. */
862
863 static tree
864 omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
865 {
866 tree copy = copy_var_decl (var, name, type);
867
868 DECL_CONTEXT (copy) = current_function_decl;
869 DECL_CHAIN (copy) = ctx->block_vars;
870 ctx->block_vars = copy;
871
872 return copy;
873 }
874
875 static tree
876 omp_copy_decl_1 (tree var, omp_context *ctx)
877 {
878 return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
879 }
880
881 /* Build COMPONENT_REF and set TREE_THIS_VOLATILE and TREE_READONLY on it
882 as appropriate. */
883 static tree
884 omp_build_component_ref (tree obj, tree field)
885 {
886 tree ret = build3 (COMPONENT_REF, TREE_TYPE (field), obj, field, NULL);
887 if (TREE_THIS_VOLATILE (field))
888 TREE_THIS_VOLATILE (ret) |= 1;
889 if (TREE_READONLY (field))
890 TREE_READONLY (ret) |= 1;
891 return ret;
892 }
893
894 /* Build tree nodes to access the field for VAR on the receiver side. */
895
896 static tree
897 build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
898 {
899 tree x, field = lookup_field (var, ctx);
900
901 /* If the receiver record type was remapped in the child function,
902 remap the field into the new record type. */
903 x = maybe_lookup_field (field, ctx);
904 if (x != NULL)
905 field = x;
906
907 x = build_simple_mem_ref (ctx->receiver_decl);
908 x = omp_build_component_ref (x, field);
909 if (by_ref)
910 x = build_simple_mem_ref (x);
911
912 return x;
913 }
914
915 /* Build tree nodes to access VAR in the scope outer to CTX. In the case
916 of a parallel, this is a component reference; for workshare constructs
917 this is some variable. */
918
919 static tree
920 build_outer_var_ref (tree var, omp_context *ctx)
921 {
922 tree x;
923
924 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
925 x = var;
926 else if (is_variable_sized (var))
927 {
928 x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
929 x = build_outer_var_ref (x, ctx);
930 x = build_simple_mem_ref (x);
931 }
932 else if (is_taskreg_ctx (ctx))
933 {
934 bool by_ref = use_pointer_for_field (var, NULL);
935 x = build_receiver_ref (var, by_ref, ctx);
936 }
937 else if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
938 && gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_SIMD)
939 {
940 /* #pragma omp simd isn't a worksharing construct, and can reference even
941 private vars in its linear etc. clauses. */
942 x = NULL_TREE;
943 if (ctx->outer && is_taskreg_ctx (ctx))
944 x = lookup_decl (var, ctx->outer);
945 else if (ctx->outer)
946 x = maybe_lookup_decl_in_outer_ctx (var, ctx);
947 if (x == NULL_TREE)
948 x = var;
949 }
950 else if (ctx->outer)
951 x = lookup_decl (var, ctx->outer);
952 else if (is_reference (var))
953 /* This can happen with orphaned constructs. If var is reference, it is
954 possible it is shared and as such valid. */
955 x = var;
956 else
957 gcc_unreachable ();
958
959 if (is_reference (var))
960 x = build_simple_mem_ref (x);
961
962 return x;
963 }
964
965 /* Build tree nodes to access the field for VAR on the sender side. */
966
967 static tree
968 build_sender_ref (tree var, omp_context *ctx)
969 {
970 tree field = lookup_sfield (var, ctx);
971 return omp_build_component_ref (ctx->sender_decl, field);
972 }
973
974 /* Add a new field for VAR inside the structure CTX->SENDER_DECL. */
975
976 static void
977 install_var_field (tree var, bool by_ref, int mask, omp_context *ctx)
978 {
979 tree field, type, sfield = NULL_TREE;
980
981 gcc_assert ((mask & 1) == 0
982 || !splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
983 gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
984 || !splay_tree_lookup (ctx->sfield_map, (splay_tree_key) var));
985
986 type = TREE_TYPE (var);
987 if (by_ref)
988 type = build_pointer_type (type);
989 else if ((mask & 3) == 1 && is_reference (var))
990 type = TREE_TYPE (type);
991
992 field = build_decl (DECL_SOURCE_LOCATION (var),
993 FIELD_DECL, DECL_NAME (var), type);
994
995 /* Remember what variable this field was created for. This does have a
996 side effect of making dwarf2out ignore this member, so for helpful
997 debugging we clear it later in delete_omp_context. */
998 DECL_ABSTRACT_ORIGIN (field) = var;
999 if (type == TREE_TYPE (var))
1000 {
1001 DECL_ALIGN (field) = DECL_ALIGN (var);
1002 DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
1003 TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
1004 }
1005 else
1006 DECL_ALIGN (field) = TYPE_ALIGN (type);
1007
1008 if ((mask & 3) == 3)
1009 {
1010 insert_field_into_struct (ctx->record_type, field);
1011 if (ctx->srecord_type)
1012 {
1013 sfield = build_decl (DECL_SOURCE_LOCATION (var),
1014 FIELD_DECL, DECL_NAME (var), type);
1015 DECL_ABSTRACT_ORIGIN (sfield) = var;
1016 DECL_ALIGN (sfield) = DECL_ALIGN (field);
1017 DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
1018 TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
1019 insert_field_into_struct (ctx->srecord_type, sfield);
1020 }
1021 }
1022 else
1023 {
1024 if (ctx->srecord_type == NULL_TREE)
1025 {
1026 tree t;
1027
1028 ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
1029 ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1030 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
1031 {
1032 sfield = build_decl (DECL_SOURCE_LOCATION (var),
1033 FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
1034 DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
1035 insert_field_into_struct (ctx->srecord_type, sfield);
1036 splay_tree_insert (ctx->sfield_map,
1037 (splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
1038 (splay_tree_value) sfield);
1039 }
1040 }
1041 sfield = field;
1042 insert_field_into_struct ((mask & 1) ? ctx->record_type
1043 : ctx->srecord_type, field);
1044 }
1045
1046 if (mask & 1)
1047 splay_tree_insert (ctx->field_map, (splay_tree_key) var,
1048 (splay_tree_value) field);
1049 if ((mask & 2) && ctx->sfield_map)
1050 splay_tree_insert (ctx->sfield_map, (splay_tree_key) var,
1051 (splay_tree_value) sfield);
1052 }
1053
1054 static tree
1055 install_var_local (tree var, omp_context *ctx)
1056 {
1057 tree new_var = omp_copy_decl_1 (var, ctx);
1058 insert_decl_map (&ctx->cb, var, new_var);
1059 return new_var;
1060 }
1061
1062 /* Adjust the replacement for DECL in CTX for the new context. This means
1063 copying the DECL_VALUE_EXPR, and fixing up the type. */
1064
1065 static void
1066 fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
1067 {
1068 tree new_decl, size;
1069
1070 new_decl = lookup_decl (decl, ctx);
1071
1072 TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
1073
1074 if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
1075 && DECL_HAS_VALUE_EXPR_P (decl))
1076 {
1077 tree ve = DECL_VALUE_EXPR (decl);
1078 walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL);
1079 SET_DECL_VALUE_EXPR (new_decl, ve);
1080 DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
1081 }
1082
1083 if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
1084 {
1085 size = remap_decl (DECL_SIZE (decl), &ctx->cb);
1086 if (size == error_mark_node)
1087 size = TYPE_SIZE (TREE_TYPE (new_decl));
1088 DECL_SIZE (new_decl) = size;
1089
1090 size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
1091 if (size == error_mark_node)
1092 size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
1093 DECL_SIZE_UNIT (new_decl) = size;
1094 }
1095 }
1096
1097 /* The callback for remap_decl. Search all containing contexts for a
1098 mapping of the variable; this avoids having to duplicate the splay
1099 tree ahead of time. We know a mapping doesn't already exist in the
1100 given context. Create new mappings to implement default semantics. */
1101
1102 static tree
1103 omp_copy_decl (tree var, copy_body_data *cb)
1104 {
1105 omp_context *ctx = (omp_context *) cb;
1106 tree new_var;
1107
1108 if (TREE_CODE (var) == LABEL_DECL)
1109 {
1110 new_var = create_artificial_label (DECL_SOURCE_LOCATION (var));
1111 DECL_CONTEXT (new_var) = current_function_decl;
1112 insert_decl_map (&ctx->cb, var, new_var);
1113 return new_var;
1114 }
1115
1116 while (!is_taskreg_ctx (ctx))
1117 {
1118 ctx = ctx->outer;
1119 if (ctx == NULL)
1120 return var;
1121 new_var = maybe_lookup_decl (var, ctx);
1122 if (new_var)
1123 return new_var;
1124 }
1125
1126 if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
1127 return var;
1128
1129 return error_mark_node;
1130 }
1131
1132
1133 /* Return the parallel region associated with STMT. */
1134
1135 /* Debugging dumps for parallel regions. */
1136 void dump_omp_region (FILE *, struct omp_region *, int);
1137 void debug_omp_region (struct omp_region *);
1138 void debug_all_omp_regions (void);
1139
1140 /* Dump the parallel region tree rooted at REGION. */
1141
1142 void
1143 dump_omp_region (FILE *file, struct omp_region *region, int indent)
1144 {
1145 fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
1146 gimple_code_name[region->type]);
1147
1148 if (region->inner)
1149 dump_omp_region (file, region->inner, indent + 4);
1150
1151 if (region->cont)
1152 {
1153 fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
1154 region->cont->index);
1155 }
1156
1157 if (region->exit)
1158 fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
1159 region->exit->index);
1160 else
1161 fprintf (file, "%*s[no exit marker]\n", indent, "");
1162
1163 if (region->next)
1164 dump_omp_region (file, region->next, indent);
1165 }
1166
1167 DEBUG_FUNCTION void
1168 debug_omp_region (struct omp_region *region)
1169 {
1170 dump_omp_region (stderr, region, 0);
1171 }
1172
1173 DEBUG_FUNCTION void
1174 debug_all_omp_regions (void)
1175 {
1176 dump_omp_region (stderr, root_omp_region, 0);
1177 }
1178
1179
1180 /* Create a new parallel region starting at STMT inside region PARENT. */
1181
1182 struct omp_region *
1183 new_omp_region (basic_block bb, enum gimple_code type,
1184 struct omp_region *parent)
1185 {
1186 struct omp_region *region = XCNEW (struct omp_region);
1187
1188 region->outer = parent;
1189 region->entry = bb;
1190 region->type = type;
1191
1192 if (parent)
1193 {
1194 /* This is a nested region. Add it to the list of inner
1195 regions in PARENT. */
1196 region->next = parent->inner;
1197 parent->inner = region;
1198 }
1199 else
1200 {
1201 /* This is a toplevel region. Add it to the list of toplevel
1202 regions in ROOT_OMP_REGION. */
1203 region->next = root_omp_region;
1204 root_omp_region = region;
1205 }
1206
1207 return region;
1208 }
1209
1210 /* Release the memory associated with the region tree rooted at REGION. */
1211
1212 static void
1213 free_omp_region_1 (struct omp_region *region)
1214 {
1215 struct omp_region *i, *n;
1216
1217 for (i = region->inner; i ; i = n)
1218 {
1219 n = i->next;
1220 free_omp_region_1 (i);
1221 }
1222
1223 free (region);
1224 }
1225
1226 /* Release the memory for the entire omp region tree. */
1227
1228 void
1229 free_omp_regions (void)
1230 {
1231 struct omp_region *r, *n;
1232 for (r = root_omp_region; r ; r = n)
1233 {
1234 n = r->next;
1235 free_omp_region_1 (r);
1236 }
1237 root_omp_region = NULL;
1238 }
1239
1240
1241 /* Create a new context, with OUTER_CTX being the surrounding context. */
1242
1243 static omp_context *
1244 new_omp_context (gimple stmt, omp_context *outer_ctx)
1245 {
1246 omp_context *ctx = XCNEW (omp_context);
1247
1248 splay_tree_insert (all_contexts, (splay_tree_key) stmt,
1249 (splay_tree_value) ctx);
1250 ctx->stmt = stmt;
1251
1252 if (outer_ctx)
1253 {
1254 ctx->outer = outer_ctx;
1255 ctx->cb = outer_ctx->cb;
1256 ctx->cb.block = NULL;
1257 ctx->depth = outer_ctx->depth + 1;
1258 }
1259 else
1260 {
1261 ctx->cb.src_fn = current_function_decl;
1262 ctx->cb.dst_fn = current_function_decl;
1263 ctx->cb.src_node = cgraph_get_node (current_function_decl);
1264 gcc_checking_assert (ctx->cb.src_node);
1265 ctx->cb.dst_node = ctx->cb.src_node;
1266 ctx->cb.src_cfun = cfun;
1267 ctx->cb.copy_decl = omp_copy_decl;
1268 ctx->cb.eh_lp_nr = 0;
1269 ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
1270 ctx->depth = 1;
1271 }
1272
1273 ctx->cb.decl_map = pointer_map_create ();
1274
1275 return ctx;
1276 }
1277
1278 static gimple_seq maybe_catch_exception (gimple_seq);
1279
1280 /* Finalize task copyfn. */
1281
1282 static void
1283 finalize_task_copyfn (gimple task_stmt)
1284 {
1285 struct function *child_cfun;
1286 tree child_fn;
1287 gimple_seq seq = NULL, new_seq;
1288 gimple bind;
1289
1290 child_fn = gimple_omp_task_copy_fn (task_stmt);
1291 if (child_fn == NULL_TREE)
1292 return;
1293
1294 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
1295 DECL_STRUCT_FUNCTION (child_fn)->curr_properties = cfun->curr_properties;
1296
1297 push_cfun (child_cfun);
1298 bind = gimplify_body (child_fn, false);
1299 gimple_seq_add_stmt (&seq, bind);
1300 new_seq = maybe_catch_exception (seq);
1301 if (new_seq != seq)
1302 {
1303 bind = gimple_build_bind (NULL, new_seq, NULL);
1304 seq = NULL;
1305 gimple_seq_add_stmt (&seq, bind);
1306 }
1307 gimple_set_body (child_fn, seq);
1308 pop_cfun ();
1309
1310 /* Inform the callgraph about the new function. */
1311 cgraph_add_new_function (child_fn, false);
1312 }
1313
1314 /* Destroy a omp_context data structures. Called through the splay tree
1315 value delete callback. */
1316
1317 static void
1318 delete_omp_context (splay_tree_value value)
1319 {
1320 omp_context *ctx = (omp_context *) value;
1321
1322 pointer_map_destroy (ctx->cb.decl_map);
1323
1324 if (ctx->field_map)
1325 splay_tree_delete (ctx->field_map);
1326 if (ctx->sfield_map)
1327 splay_tree_delete (ctx->sfield_map);
1328
1329 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
1330 it produces corrupt debug information. */
1331 if (ctx->record_type)
1332 {
1333 tree t;
1334 for (t = TYPE_FIELDS (ctx->record_type); t ; t = DECL_CHAIN (t))
1335 DECL_ABSTRACT_ORIGIN (t) = NULL;
1336 }
1337 if (ctx->srecord_type)
1338 {
1339 tree t;
1340 for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = DECL_CHAIN (t))
1341 DECL_ABSTRACT_ORIGIN (t) = NULL;
1342 }
1343
1344 if (is_task_ctx (ctx))
1345 finalize_task_copyfn (ctx->stmt);
1346
1347 XDELETE (ctx);
1348 }
1349
1350 /* Fix up RECEIVER_DECL with a type that has been remapped to the child
1351 context. */
1352
1353 static void
1354 fixup_child_record_type (omp_context *ctx)
1355 {
1356 tree f, type = ctx->record_type;
1357
1358 /* ??? It isn't sufficient to just call remap_type here, because
1359 variably_modified_type_p doesn't work the way we expect for
1360 record types. Testing each field for whether it needs remapping
1361 and creating a new record by hand works, however. */
1362 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
1363 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
1364 break;
1365 if (f)
1366 {
1367 tree name, new_fields = NULL;
1368
1369 type = lang_hooks.types.make_type (RECORD_TYPE);
1370 name = DECL_NAME (TYPE_NAME (ctx->record_type));
1371 name = build_decl (DECL_SOURCE_LOCATION (ctx->receiver_decl),
1372 TYPE_DECL, name, type);
1373 TYPE_NAME (type) = name;
1374
1375 for (f = TYPE_FIELDS (ctx->record_type); f ; f = DECL_CHAIN (f))
1376 {
1377 tree new_f = copy_node (f);
1378 DECL_CONTEXT (new_f) = type;
1379 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
1380 DECL_CHAIN (new_f) = new_fields;
1381 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
1382 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
1383 &ctx->cb, NULL);
1384 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
1385 &ctx->cb, NULL);
1386 new_fields = new_f;
1387
1388 /* Arrange to be able to look up the receiver field
1389 given the sender field. */
1390 splay_tree_insert (ctx->field_map, (splay_tree_key) f,
1391 (splay_tree_value) new_f);
1392 }
1393 TYPE_FIELDS (type) = nreverse (new_fields);
1394 layout_type (type);
1395 }
1396
1397 TREE_TYPE (ctx->receiver_decl) = build_pointer_type (type);
1398 }
1399
1400 /* Instantiate decls as necessary in CTX to satisfy the data sharing
1401 specified by CLAUSES. */
1402
1403 static void
1404 scan_sharing_clauses (tree clauses, omp_context *ctx)
1405 {
1406 tree c, decl;
1407 bool scan_array_reductions = false;
1408
1409 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1410 {
1411 bool by_ref;
1412
1413 switch (OMP_CLAUSE_CODE (c))
1414 {
1415 case OMP_CLAUSE_PRIVATE:
1416 decl = OMP_CLAUSE_DECL (c);
1417 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
1418 goto do_private;
1419 else if (!is_variable_sized (decl))
1420 install_var_local (decl, ctx);
1421 break;
1422
1423 case OMP_CLAUSE_SHARED:
1424 gcc_assert (is_taskreg_ctx (ctx));
1425 decl = OMP_CLAUSE_DECL (c);
1426 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
1427 || !is_variable_sized (decl));
1428 /* Global variables don't need to be copied,
1429 the receiver side will use them directly. */
1430 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1431 break;
1432 by_ref = use_pointer_for_field (decl, ctx);
1433 if (! TREE_READONLY (decl)
1434 || TREE_ADDRESSABLE (decl)
1435 || by_ref
1436 || is_reference (decl))
1437 {
1438 install_var_field (decl, by_ref, 3, ctx);
1439 install_var_local (decl, ctx);
1440 break;
1441 }
1442 /* We don't need to copy const scalar vars back. */
1443 OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
1444 goto do_private;
1445
1446 case OMP_CLAUSE_LASTPRIVATE:
1447 /* Let the corresponding firstprivate clause create
1448 the variable. */
1449 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1450 break;
1451 /* FALLTHRU */
1452
1453 case OMP_CLAUSE_FIRSTPRIVATE:
1454 case OMP_CLAUSE_REDUCTION:
1455 case OMP_CLAUSE_LINEAR:
1456 decl = OMP_CLAUSE_DECL (c);
1457 do_private:
1458 if (is_variable_sized (decl))
1459 {
1460 if (is_task_ctx (ctx))
1461 install_var_field (decl, false, 1, ctx);
1462 break;
1463 }
1464 else if (is_taskreg_ctx (ctx))
1465 {
1466 bool global
1467 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
1468 by_ref = use_pointer_for_field (decl, NULL);
1469
1470 if (is_task_ctx (ctx)
1471 && (global || by_ref || is_reference (decl)))
1472 {
1473 install_var_field (decl, false, 1, ctx);
1474 if (!global)
1475 install_var_field (decl, by_ref, 2, ctx);
1476 }
1477 else if (!global)
1478 install_var_field (decl, by_ref, 3, ctx);
1479 }
1480 install_var_local (decl, ctx);
1481 break;
1482
1483 case OMP_CLAUSE_COPYPRIVATE:
1484 case OMP_CLAUSE_COPYIN:
1485 decl = OMP_CLAUSE_DECL (c);
1486 by_ref = use_pointer_for_field (decl, NULL);
1487 install_var_field (decl, by_ref, 3, ctx);
1488 break;
1489
1490 case OMP_CLAUSE_DEFAULT:
1491 ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
1492 break;
1493
1494 case OMP_CLAUSE_FINAL:
1495 case OMP_CLAUSE_IF:
1496 case OMP_CLAUSE_NUM_THREADS:
1497 case OMP_CLAUSE_SCHEDULE:
1498 if (ctx->outer)
1499 scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
1500 break;
1501
1502 case OMP_CLAUSE_NOWAIT:
1503 case OMP_CLAUSE_ORDERED:
1504 case OMP_CLAUSE_COLLAPSE:
1505 case OMP_CLAUSE_UNTIED:
1506 case OMP_CLAUSE_MERGEABLE:
1507 case OMP_CLAUSE_SAFELEN:
1508 break;
1509
1510 default:
1511 gcc_unreachable ();
1512 }
1513 }
1514
1515 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1516 {
1517 switch (OMP_CLAUSE_CODE (c))
1518 {
1519 case OMP_CLAUSE_LASTPRIVATE:
1520 /* Let the corresponding firstprivate clause create
1521 the variable. */
1522 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1523 scan_array_reductions = true;
1524 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1525 break;
1526 /* FALLTHRU */
1527
1528 case OMP_CLAUSE_PRIVATE:
1529 case OMP_CLAUSE_FIRSTPRIVATE:
1530 case OMP_CLAUSE_REDUCTION:
1531 case OMP_CLAUSE_LINEAR:
1532 decl = OMP_CLAUSE_DECL (c);
1533 if (is_variable_sized (decl))
1534 install_var_local (decl, ctx);
1535 fixup_remapped_decl (decl, ctx,
1536 OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
1537 && OMP_CLAUSE_PRIVATE_DEBUG (c));
1538 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1539 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1540 scan_array_reductions = true;
1541 break;
1542
1543 case OMP_CLAUSE_SHARED:
1544 decl = OMP_CLAUSE_DECL (c);
1545 if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1546 fixup_remapped_decl (decl, ctx, false);
1547 break;
1548
1549 case OMP_CLAUSE_COPYPRIVATE:
1550 case OMP_CLAUSE_COPYIN:
1551 case OMP_CLAUSE_DEFAULT:
1552 case OMP_CLAUSE_IF:
1553 case OMP_CLAUSE_NUM_THREADS:
1554 case OMP_CLAUSE_SCHEDULE:
1555 case OMP_CLAUSE_NOWAIT:
1556 case OMP_CLAUSE_ORDERED:
1557 case OMP_CLAUSE_COLLAPSE:
1558 case OMP_CLAUSE_UNTIED:
1559 case OMP_CLAUSE_FINAL:
1560 case OMP_CLAUSE_MERGEABLE:
1561 case OMP_CLAUSE_SAFELEN:
1562 break;
1563
1564 default:
1565 gcc_unreachable ();
1566 }
1567 }
1568
1569 if (scan_array_reductions)
1570 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1571 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1572 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1573 {
1574 scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
1575 scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
1576 }
1577 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
1578 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1579 scan_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
1580 }
1581
1582 /* Create a new name for omp child function. Returns an identifier. */
1583
1584 static GTY(()) unsigned int tmp_ompfn_id_num;
1585
1586 static tree
1587 create_omp_child_function_name (bool task_copy)
1588 {
1589 return (clone_function_name (current_function_decl,
1590 task_copy ? "_omp_cpyfn" : "_omp_fn"));
1591 }
1592
1593 /* Build a decl for the omp child function. It'll not contain a body
1594 yet, just the bare decl. */
1595
1596 static void
1597 create_omp_child_function (omp_context *ctx, bool task_copy)
1598 {
1599 tree decl, type, name, t;
1600
1601 name = create_omp_child_function_name (task_copy);
1602 if (task_copy)
1603 type = build_function_type_list (void_type_node, ptr_type_node,
1604 ptr_type_node, NULL_TREE);
1605 else
1606 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
1607
1608 decl = build_decl (gimple_location (ctx->stmt),
1609 FUNCTION_DECL, name, type);
1610
1611 if (!task_copy)
1612 ctx->cb.dst_fn = decl;
1613 else
1614 gimple_omp_task_set_copy_fn (ctx->stmt, decl);
1615
1616 TREE_STATIC (decl) = 1;
1617 TREE_USED (decl) = 1;
1618 DECL_ARTIFICIAL (decl) = 1;
1619 DECL_NAMELESS (decl) = 1;
1620 DECL_IGNORED_P (decl) = 0;
1621 TREE_PUBLIC (decl) = 0;
1622 DECL_UNINLINABLE (decl) = 1;
1623 DECL_EXTERNAL (decl) = 0;
1624 DECL_CONTEXT (decl) = NULL_TREE;
1625 DECL_INITIAL (decl) = make_node (BLOCK);
1626
1627 t = build_decl (DECL_SOURCE_LOCATION (decl),
1628 RESULT_DECL, NULL_TREE, void_type_node);
1629 DECL_ARTIFICIAL (t) = 1;
1630 DECL_IGNORED_P (t) = 1;
1631 DECL_CONTEXT (t) = decl;
1632 DECL_RESULT (decl) = t;
1633
1634 t = build_decl (DECL_SOURCE_LOCATION (decl),
1635 PARM_DECL, get_identifier (".omp_data_i"), ptr_type_node);
1636 DECL_ARTIFICIAL (t) = 1;
1637 DECL_NAMELESS (t) = 1;
1638 DECL_ARG_TYPE (t) = ptr_type_node;
1639 DECL_CONTEXT (t) = current_function_decl;
1640 TREE_USED (t) = 1;
1641 DECL_ARGUMENTS (decl) = t;
1642 if (!task_copy)
1643 ctx->receiver_decl = t;
1644 else
1645 {
1646 t = build_decl (DECL_SOURCE_LOCATION (decl),
1647 PARM_DECL, get_identifier (".omp_data_o"),
1648 ptr_type_node);
1649 DECL_ARTIFICIAL (t) = 1;
1650 DECL_NAMELESS (t) = 1;
1651 DECL_ARG_TYPE (t) = ptr_type_node;
1652 DECL_CONTEXT (t) = current_function_decl;
1653 TREE_USED (t) = 1;
1654 TREE_ADDRESSABLE (t) = 1;
1655 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
1656 DECL_ARGUMENTS (decl) = t;
1657 }
1658
1659 /* Allocate memory for the function structure. The call to
1660 allocate_struct_function clobbers CFUN, so we need to restore
1661 it afterward. */
1662 push_struct_function (decl);
1663 cfun->function_end_locus = gimple_location (ctx->stmt);
1664 pop_cfun ();
1665 }
1666
1667 /* Scan an OpenMP parallel directive. */
1668
1669 static void
1670 scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1671 {
1672 omp_context *ctx;
1673 tree name;
1674 gimple stmt = gsi_stmt (*gsi);
1675
1676 /* Ignore parallel directives with empty bodies, unless there
1677 are copyin clauses. */
1678 if (optimize > 0
1679 && empty_body_p (gimple_omp_body (stmt))
1680 && find_omp_clause (gimple_omp_parallel_clauses (stmt),
1681 OMP_CLAUSE_COPYIN) == NULL)
1682 {
1683 gsi_replace (gsi, gimple_build_nop (), false);
1684 return;
1685 }
1686
1687 ctx = new_omp_context (stmt, outer_ctx);
1688 if (taskreg_nesting_level > 1)
1689 ctx->is_nested = true;
1690 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1691 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1692 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1693 name = create_tmp_var_name (".omp_data_s");
1694 name = build_decl (gimple_location (stmt),
1695 TYPE_DECL, name, ctx->record_type);
1696 DECL_ARTIFICIAL (name) = 1;
1697 DECL_NAMELESS (name) = 1;
1698 TYPE_NAME (ctx->record_type) = name;
1699 create_omp_child_function (ctx, false);
1700 gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
1701
1702 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx);
1703 scan_omp (gimple_omp_body_ptr (stmt), ctx);
1704
1705 if (TYPE_FIELDS (ctx->record_type) == NULL)
1706 ctx->record_type = ctx->receiver_decl = NULL;
1707 else
1708 {
1709 layout_type (ctx->record_type);
1710 fixup_child_record_type (ctx);
1711 }
1712 }
1713
1714 /* Scan an OpenMP task directive. */
1715
1716 static void
1717 scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1718 {
1719 omp_context *ctx;
1720 tree name, t;
1721 gimple stmt = gsi_stmt (*gsi);
1722 location_t loc = gimple_location (stmt);
1723
1724 /* Ignore task directives with empty bodies. */
1725 if (optimize > 0
1726 && empty_body_p (gimple_omp_body (stmt)))
1727 {
1728 gsi_replace (gsi, gimple_build_nop (), false);
1729 return;
1730 }
1731
1732 ctx = new_omp_context (stmt, outer_ctx);
1733 if (taskreg_nesting_level > 1)
1734 ctx->is_nested = true;
1735 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1736 ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1737 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1738 name = create_tmp_var_name (".omp_data_s");
1739 name = build_decl (gimple_location (stmt),
1740 TYPE_DECL, name, ctx->record_type);
1741 DECL_ARTIFICIAL (name) = 1;
1742 DECL_NAMELESS (name) = 1;
1743 TYPE_NAME (ctx->record_type) = name;
1744 create_omp_child_function (ctx, false);
1745 gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
1746
1747 scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
1748
1749 if (ctx->srecord_type)
1750 {
1751 name = create_tmp_var_name (".omp_data_a");
1752 name = build_decl (gimple_location (stmt),
1753 TYPE_DECL, name, ctx->srecord_type);
1754 DECL_ARTIFICIAL (name) = 1;
1755 DECL_NAMELESS (name) = 1;
1756 TYPE_NAME (ctx->srecord_type) = name;
1757 create_omp_child_function (ctx, true);
1758 }
1759
1760 scan_omp (gimple_omp_body_ptr (stmt), ctx);
1761
1762 if (TYPE_FIELDS (ctx->record_type) == NULL)
1763 {
1764 ctx->record_type = ctx->receiver_decl = NULL;
1765 t = build_int_cst (long_integer_type_node, 0);
1766 gimple_omp_task_set_arg_size (stmt, t);
1767 t = build_int_cst (long_integer_type_node, 1);
1768 gimple_omp_task_set_arg_align (stmt, t);
1769 }
1770 else
1771 {
1772 tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
1773 /* Move VLA fields to the end. */
1774 p = &TYPE_FIELDS (ctx->record_type);
1775 while (*p)
1776 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
1777 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
1778 {
1779 *q = *p;
1780 *p = TREE_CHAIN (*p);
1781 TREE_CHAIN (*q) = NULL_TREE;
1782 q = &TREE_CHAIN (*q);
1783 }
1784 else
1785 p = &DECL_CHAIN (*p);
1786 *p = vla_fields;
1787 layout_type (ctx->record_type);
1788 fixup_child_record_type (ctx);
1789 if (ctx->srecord_type)
1790 layout_type (ctx->srecord_type);
1791 t = fold_convert_loc (loc, long_integer_type_node,
1792 TYPE_SIZE_UNIT (ctx->record_type));
1793 gimple_omp_task_set_arg_size (stmt, t);
1794 t = build_int_cst (long_integer_type_node,
1795 TYPE_ALIGN_UNIT (ctx->record_type));
1796 gimple_omp_task_set_arg_align (stmt, t);
1797 }
1798 }
1799
1800
1801 /* Scan an OpenMP loop directive. */
1802
1803 static void
1804 scan_omp_for (gimple stmt, omp_context *outer_ctx)
1805 {
1806 omp_context *ctx;
1807 size_t i;
1808
1809 ctx = new_omp_context (stmt, outer_ctx);
1810
1811 scan_sharing_clauses (gimple_omp_for_clauses (stmt), ctx);
1812
1813 scan_omp (gimple_omp_for_pre_body_ptr (stmt), ctx);
1814 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
1815 {
1816 scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx);
1817 scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx);
1818 scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx);
1819 scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx);
1820 }
1821 scan_omp (gimple_omp_body_ptr (stmt), ctx);
1822 }
1823
1824 /* Scan an OpenMP sections directive. */
1825
1826 static void
1827 scan_omp_sections (gimple stmt, omp_context *outer_ctx)
1828 {
1829 omp_context *ctx;
1830
1831 ctx = new_omp_context (stmt, outer_ctx);
1832 scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx);
1833 scan_omp (gimple_omp_body_ptr (stmt), ctx);
1834 }
1835
1836 /* Scan an OpenMP single directive. */
1837
1838 static void
1839 scan_omp_single (gimple stmt, omp_context *outer_ctx)
1840 {
1841 omp_context *ctx;
1842 tree name;
1843
1844 ctx = new_omp_context (stmt, outer_ctx);
1845 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1846 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1847 name = create_tmp_var_name (".omp_copy_s");
1848 name = build_decl (gimple_location (stmt),
1849 TYPE_DECL, name, ctx->record_type);
1850 TYPE_NAME (ctx->record_type) = name;
1851
1852 scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx);
1853 scan_omp (gimple_omp_body_ptr (stmt), ctx);
1854
1855 if (TYPE_FIELDS (ctx->record_type) == NULL)
1856 ctx->record_type = NULL;
1857 else
1858 layout_type (ctx->record_type);
1859 }
1860
1861
1862 /* Check OpenMP nesting restrictions. */
1863 static bool
1864 check_omp_nesting_restrictions (gimple stmt, omp_context *ctx)
1865 {
1866 if (ctx != NULL)
1867 {
1868 if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
1869 && gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_SIMD)
1870 {
1871 error_at (gimple_location (stmt),
1872 "OpenMP constructs may not be nested inside simd region");
1873 return false;
1874 }
1875 }
1876 switch (gimple_code (stmt))
1877 {
1878 case GIMPLE_OMP_FOR:
1879 if (gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_SIMD)
1880 return true;
1881 /* FALLTHRU */
1882 case GIMPLE_OMP_SECTIONS:
1883 case GIMPLE_OMP_SINGLE:
1884 case GIMPLE_CALL:
1885 for (; ctx != NULL; ctx = ctx->outer)
1886 switch (gimple_code (ctx->stmt))
1887 {
1888 case GIMPLE_OMP_FOR:
1889 case GIMPLE_OMP_SECTIONS:
1890 case GIMPLE_OMP_SINGLE:
1891 case GIMPLE_OMP_ORDERED:
1892 case GIMPLE_OMP_MASTER:
1893 case GIMPLE_OMP_TASK:
1894 if (is_gimple_call (stmt))
1895 {
1896 error_at (gimple_location (stmt),
1897 "barrier region may not be closely nested inside "
1898 "of work-sharing, critical, ordered, master or "
1899 "explicit task region");
1900 return false;
1901 }
1902 error_at (gimple_location (stmt),
1903 "work-sharing region may not be closely nested inside "
1904 "of work-sharing, critical, ordered, master or explicit "
1905 "task region");
1906 return false;
1907 case GIMPLE_OMP_PARALLEL:
1908 return true;
1909 default:
1910 break;
1911 }
1912 break;
1913 case GIMPLE_OMP_MASTER:
1914 for (; ctx != NULL; ctx = ctx->outer)
1915 switch (gimple_code (ctx->stmt))
1916 {
1917 case GIMPLE_OMP_FOR:
1918 case GIMPLE_OMP_SECTIONS:
1919 case GIMPLE_OMP_SINGLE:
1920 case GIMPLE_OMP_TASK:
1921 error_at (gimple_location (stmt),
1922 "master region may not be closely nested inside "
1923 "of work-sharing or explicit task region");
1924 return false;
1925 case GIMPLE_OMP_PARALLEL:
1926 return true;
1927 default:
1928 break;
1929 }
1930 break;
1931 case GIMPLE_OMP_ORDERED:
1932 for (; ctx != NULL; ctx = ctx->outer)
1933 switch (gimple_code (ctx->stmt))
1934 {
1935 case GIMPLE_OMP_CRITICAL:
1936 case GIMPLE_OMP_TASK:
1937 error_at (gimple_location (stmt),
1938 "ordered region may not be closely nested inside "
1939 "of critical or explicit task region");
1940 return false;
1941 case GIMPLE_OMP_FOR:
1942 if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
1943 OMP_CLAUSE_ORDERED) == NULL)
1944 {
1945 error_at (gimple_location (stmt),
1946 "ordered region must be closely nested inside "
1947 "a loop region with an ordered clause");
1948 return false;
1949 }
1950 return true;
1951 case GIMPLE_OMP_PARALLEL:
1952 return true;
1953 default:
1954 break;
1955 }
1956 break;
1957 case GIMPLE_OMP_CRITICAL:
1958 for (; ctx != NULL; ctx = ctx->outer)
1959 if (gimple_code (ctx->stmt) == GIMPLE_OMP_CRITICAL
1960 && (gimple_omp_critical_name (stmt)
1961 == gimple_omp_critical_name (ctx->stmt)))
1962 {
1963 error_at (gimple_location (stmt),
1964 "critical region may not be nested inside a critical "
1965 "region with the same name");
1966 return false;
1967 }
1968 break;
1969 default:
1970 break;
1971 }
1972 return true;
1973 }
1974
1975
1976 /* Helper function scan_omp.
1977
1978 Callback for walk_tree or operators in walk_gimple_stmt used to
1979 scan for OpenMP directives in TP. */
1980
1981 static tree
1982 scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
1983 {
1984 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
1985 omp_context *ctx = (omp_context *) wi->info;
1986 tree t = *tp;
1987
1988 switch (TREE_CODE (t))
1989 {
1990 case VAR_DECL:
1991 case PARM_DECL:
1992 case LABEL_DECL:
1993 case RESULT_DECL:
1994 if (ctx)
1995 *tp = remap_decl (t, &ctx->cb);
1996 break;
1997
1998 default:
1999 if (ctx && TYPE_P (t))
2000 *tp = remap_type (t, &ctx->cb);
2001 else if (!DECL_P (t))
2002 {
2003 *walk_subtrees = 1;
2004 if (ctx)
2005 {
2006 tree tem = remap_type (TREE_TYPE (t), &ctx->cb);
2007 if (tem != TREE_TYPE (t))
2008 {
2009 if (TREE_CODE (t) == INTEGER_CST)
2010 *tp = build_int_cst_wide (tem,
2011 TREE_INT_CST_LOW (t),
2012 TREE_INT_CST_HIGH (t));
2013 else
2014 TREE_TYPE (t) = tem;
2015 }
2016 }
2017 }
2018 break;
2019 }
2020
2021 return NULL_TREE;
2022 }
2023
2024
2025 /* Helper function for scan_omp.
2026
2027 Callback for walk_gimple_stmt used to scan for OpenMP directives in
2028 the current statement in GSI. */
2029
2030 static tree
2031 scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
2032 struct walk_stmt_info *wi)
2033 {
2034 gimple stmt = gsi_stmt (*gsi);
2035 omp_context *ctx = (omp_context *) wi->info;
2036
2037 if (gimple_has_location (stmt))
2038 input_location = gimple_location (stmt);
2039
2040 /* Check the OpenMP nesting restrictions. */
2041 if (ctx != NULL)
2042 {
2043 bool remove = false;
2044 if (is_gimple_omp (stmt))
2045 remove = !check_omp_nesting_restrictions (stmt, ctx);
2046 else if (is_gimple_call (stmt))
2047 {
2048 tree fndecl = gimple_call_fndecl (stmt);
2049 if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
2050 && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
2051 remove = !check_omp_nesting_restrictions (stmt, ctx);
2052 }
2053 if (remove)
2054 {
2055 stmt = gimple_build_nop ();
2056 gsi_replace (gsi, stmt, false);
2057 }
2058 }
2059
2060 *handled_ops_p = true;
2061
2062 switch (gimple_code (stmt))
2063 {
2064 case GIMPLE_OMP_PARALLEL:
2065 taskreg_nesting_level++;
2066 scan_omp_parallel (gsi, ctx);
2067 taskreg_nesting_level--;
2068 break;
2069
2070 case GIMPLE_OMP_TASK:
2071 taskreg_nesting_level++;
2072 scan_omp_task (gsi, ctx);
2073 taskreg_nesting_level--;
2074 break;
2075
2076 case GIMPLE_OMP_FOR:
2077 scan_omp_for (stmt, ctx);
2078 break;
2079
2080 case GIMPLE_OMP_SECTIONS:
2081 scan_omp_sections (stmt, ctx);
2082 break;
2083
2084 case GIMPLE_OMP_SINGLE:
2085 scan_omp_single (stmt, ctx);
2086 break;
2087
2088 case GIMPLE_OMP_SECTION:
2089 case GIMPLE_OMP_MASTER:
2090 case GIMPLE_OMP_ORDERED:
2091 case GIMPLE_OMP_CRITICAL:
2092 ctx = new_omp_context (stmt, ctx);
2093 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2094 break;
2095
2096 case GIMPLE_BIND:
2097 {
2098 tree var;
2099
2100 *handled_ops_p = false;
2101 if (ctx)
2102 for (var = gimple_bind_vars (stmt); var ; var = DECL_CHAIN (var))
2103 insert_decl_map (&ctx->cb, var, var);
2104 }
2105 break;
2106 default:
2107 *handled_ops_p = false;
2108 break;
2109 }
2110
2111 return NULL_TREE;
2112 }
2113
2114
2115 /* Scan all the statements starting at the current statement. CTX
2116 contains context information about the OpenMP directives and
2117 clauses found during the scan. */
2118
2119 static void
2120 scan_omp (gimple_seq *body_p, omp_context *ctx)
2121 {
2122 location_t saved_location;
2123 struct walk_stmt_info wi;
2124
2125 memset (&wi, 0, sizeof (wi));
2126 wi.info = ctx;
2127 wi.want_locations = true;
2128
2129 saved_location = input_location;
2130 walk_gimple_seq_mod (body_p, scan_omp_1_stmt, scan_omp_1_op, &wi);
2131 input_location = saved_location;
2132 }
2133 \f
2134 /* Re-gimplification and code generation routines. */
2135
2136 /* Build a call to GOMP_barrier. */
2137
2138 static tree
2139 build_omp_barrier (void)
2140 {
2141 return build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_BARRIER), 0);
2142 }
2143
2144 /* If a context was created for STMT when it was scanned, return it. */
2145
2146 static omp_context *
2147 maybe_lookup_ctx (gimple stmt)
2148 {
2149 splay_tree_node n;
2150 n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
2151 return n ? (omp_context *) n->value : NULL;
2152 }
2153
2154
2155 /* Find the mapping for DECL in CTX or the immediately enclosing
2156 context that has a mapping for DECL.
2157
2158 If CTX is a nested parallel directive, we may have to use the decl
2159 mappings created in CTX's parent context. Suppose that we have the
2160 following parallel nesting (variable UIDs showed for clarity):
2161
2162 iD.1562 = 0;
2163 #omp parallel shared(iD.1562) -> outer parallel
2164 iD.1562 = iD.1562 + 1;
2165
2166 #omp parallel shared (iD.1562) -> inner parallel
2167 iD.1562 = iD.1562 - 1;
2168
2169 Each parallel structure will create a distinct .omp_data_s structure
2170 for copying iD.1562 in/out of the directive:
2171
2172 outer parallel .omp_data_s.1.i -> iD.1562
2173 inner parallel .omp_data_s.2.i -> iD.1562
2174
2175 A shared variable mapping will produce a copy-out operation before
2176 the parallel directive and a copy-in operation after it. So, in
2177 this case we would have:
2178
2179 iD.1562 = 0;
2180 .omp_data_o.1.i = iD.1562;
2181 #omp parallel shared(iD.1562) -> outer parallel
2182 .omp_data_i.1 = &.omp_data_o.1
2183 .omp_data_i.1->i = .omp_data_i.1->i + 1;
2184
2185 .omp_data_o.2.i = iD.1562; -> **
2186 #omp parallel shared(iD.1562) -> inner parallel
2187 .omp_data_i.2 = &.omp_data_o.2
2188 .omp_data_i.2->i = .omp_data_i.2->i - 1;
2189
2190
2191 ** This is a problem. The symbol iD.1562 cannot be referenced
2192 inside the body of the outer parallel region. But since we are
2193 emitting this copy operation while expanding the inner parallel
2194 directive, we need to access the CTX structure of the outer
2195 parallel directive to get the correct mapping:
2196
2197 .omp_data_o.2.i = .omp_data_i.1->i
2198
2199 Since there may be other workshare or parallel directives enclosing
2200 the parallel directive, it may be necessary to walk up the context
2201 parent chain. This is not a problem in general because nested
2202 parallelism happens only rarely. */
2203
2204 static tree
2205 lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2206 {
2207 tree t;
2208 omp_context *up;
2209
2210 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2211 t = maybe_lookup_decl (decl, up);
2212
2213 gcc_assert (!ctx->is_nested || t || is_global_var (decl));
2214
2215 return t ? t : decl;
2216 }
2217
2218
2219 /* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
2220 in outer contexts. */
2221
2222 static tree
2223 maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2224 {
2225 tree t = NULL;
2226 omp_context *up;
2227
2228 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2229 t = maybe_lookup_decl (decl, up);
2230
2231 return t ? t : decl;
2232 }
2233
2234
2235 /* Construct the initialization value for reduction CLAUSE. */
2236
2237 tree
2238 omp_reduction_init (tree clause, tree type)
2239 {
2240 location_t loc = OMP_CLAUSE_LOCATION (clause);
2241 switch (OMP_CLAUSE_REDUCTION_CODE (clause))
2242 {
2243 case PLUS_EXPR:
2244 case MINUS_EXPR:
2245 case BIT_IOR_EXPR:
2246 case BIT_XOR_EXPR:
2247 case TRUTH_OR_EXPR:
2248 case TRUTH_ORIF_EXPR:
2249 case TRUTH_XOR_EXPR:
2250 case NE_EXPR:
2251 return build_zero_cst (type);
2252
2253 case MULT_EXPR:
2254 case TRUTH_AND_EXPR:
2255 case TRUTH_ANDIF_EXPR:
2256 case EQ_EXPR:
2257 return fold_convert_loc (loc, type, integer_one_node);
2258
2259 case BIT_AND_EXPR:
2260 return fold_convert_loc (loc, type, integer_minus_one_node);
2261
2262 case MAX_EXPR:
2263 if (SCALAR_FLOAT_TYPE_P (type))
2264 {
2265 REAL_VALUE_TYPE max, min;
2266 if (HONOR_INFINITIES (TYPE_MODE (type)))
2267 {
2268 real_inf (&max);
2269 real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
2270 }
2271 else
2272 real_maxval (&min, 1, TYPE_MODE (type));
2273 return build_real (type, min);
2274 }
2275 else
2276 {
2277 gcc_assert (INTEGRAL_TYPE_P (type));
2278 return TYPE_MIN_VALUE (type);
2279 }
2280
2281 case MIN_EXPR:
2282 if (SCALAR_FLOAT_TYPE_P (type))
2283 {
2284 REAL_VALUE_TYPE max;
2285 if (HONOR_INFINITIES (TYPE_MODE (type)))
2286 real_inf (&max);
2287 else
2288 real_maxval (&max, 0, TYPE_MODE (type));
2289 return build_real (type, max);
2290 }
2291 else
2292 {
2293 gcc_assert (INTEGRAL_TYPE_P (type));
2294 return TYPE_MAX_VALUE (type);
2295 }
2296
2297 default:
2298 gcc_unreachable ();
2299 }
2300 }
2301
2302 /* Return maximum possible vectorization factor for the target. */
2303
2304 static int
2305 omp_max_vf (void)
2306 {
2307 if (!optimize
2308 || optimize_debug
2309 || (!flag_tree_loop_vectorize
2310 && (global_options_set.x_flag_tree_loop_vectorize
2311 || global_options_set.x_flag_tree_vectorize)))
2312 return 1;
2313
2314 int vs = targetm.vectorize.autovectorize_vector_sizes ();
2315 if (vs)
2316 {
2317 vs = 1 << floor_log2 (vs);
2318 return vs;
2319 }
2320 enum machine_mode vqimode = targetm.vectorize.preferred_simd_mode (QImode);
2321 if (GET_MODE_CLASS (vqimode) == MODE_VECTOR_INT)
2322 return GET_MODE_NUNITS (vqimode);
2323 return 1;
2324 }
2325
2326 /* Helper function of lower_rec_input_clauses, used for #pragma omp simd
2327 privatization. */
2328
2329 static bool
2330 lower_rec_simd_input_clauses (tree new_var, omp_context *ctx, int &max_vf,
2331 tree &idx, tree &lane, tree &ivar, tree &lvar)
2332 {
2333 if (max_vf == 0)
2334 {
2335 max_vf = omp_max_vf ();
2336 if (max_vf > 1)
2337 {
2338 tree c = find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
2339 OMP_CLAUSE_SAFELEN);
2340 if (c
2341 && compare_tree_int (OMP_CLAUSE_SAFELEN_EXPR (c), max_vf) == -1)
2342 max_vf = tree_low_cst (OMP_CLAUSE_SAFELEN_EXPR (c), 0);
2343 }
2344 if (max_vf > 1)
2345 {
2346 idx = create_tmp_var (unsigned_type_node, NULL);
2347 lane = create_tmp_var (unsigned_type_node, NULL);
2348 }
2349 }
2350 if (max_vf == 1)
2351 return false;
2352
2353 tree atype = build_array_type_nelts (TREE_TYPE (new_var), max_vf);
2354 tree avar = create_tmp_var_raw (atype, NULL);
2355 if (TREE_ADDRESSABLE (new_var))
2356 TREE_ADDRESSABLE (avar) = 1;
2357 DECL_ATTRIBUTES (avar)
2358 = tree_cons (get_identifier ("omp simd array"), NULL,
2359 DECL_ATTRIBUTES (avar));
2360 gimple_add_tmp_var (avar);
2361 ivar = build4 (ARRAY_REF, TREE_TYPE (new_var), avar, idx,
2362 NULL_TREE, NULL_TREE);
2363 lvar = build4 (ARRAY_REF, TREE_TYPE (new_var), avar, lane,
2364 NULL_TREE, NULL_TREE);
2365 SET_DECL_VALUE_EXPR (new_var, lvar);
2366 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2367 return true;
2368 }
2369
2370 /* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
2371 from the receiver (aka child) side and initializers for REFERENCE_TYPE
2372 private variables. Initialization statements go in ILIST, while calls
2373 to destructors go in DLIST. */
2374
2375 static void
2376 lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
2377 omp_context *ctx)
2378 {
2379 tree c, dtor, copyin_seq, x, ptr;
2380 bool copyin_by_ref = false;
2381 bool lastprivate_firstprivate = false;
2382 int pass;
2383 bool is_simd = (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
2384 && gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_SIMD);
2385 int max_vf = 0;
2386 tree lane = NULL_TREE, idx = NULL_TREE;
2387 tree ivar = NULL_TREE, lvar = NULL_TREE;
2388 gimple_seq llist[2] = { NULL, NULL };
2389
2390 copyin_seq = NULL;
2391
2392 /* Set max_vf=1 (which will later enforce safelen=1) in simd loops
2393 with data sharing clauses referencing variable sized vars. That
2394 is unnecessarily hard to support and very unlikely to result in
2395 vectorized code anyway. */
2396 if (is_simd)
2397 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2398 switch (OMP_CLAUSE_CODE (c))
2399 {
2400 case OMP_CLAUSE_REDUCTION:
2401 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2402 max_vf = 1;
2403 /* FALLTHRU */
2404 case OMP_CLAUSE_PRIVATE:
2405 case OMP_CLAUSE_FIRSTPRIVATE:
2406 case OMP_CLAUSE_LASTPRIVATE:
2407 case OMP_CLAUSE_LINEAR:
2408 if (is_variable_sized (OMP_CLAUSE_DECL (c)))
2409 max_vf = 1;
2410 break;
2411 default:
2412 continue;
2413 }
2414
2415 /* Do all the fixed sized types in the first pass, and the variable sized
2416 types in the second pass. This makes sure that the scalar arguments to
2417 the variable sized types are processed before we use them in the
2418 variable sized operations. */
2419 for (pass = 0; pass < 2; ++pass)
2420 {
2421 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2422 {
2423 enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
2424 tree var, new_var;
2425 bool by_ref;
2426 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2427
2428 switch (c_kind)
2429 {
2430 case OMP_CLAUSE_PRIVATE:
2431 if (OMP_CLAUSE_PRIVATE_DEBUG (c))
2432 continue;
2433 break;
2434 case OMP_CLAUSE_SHARED:
2435 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
2436 {
2437 gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
2438 continue;
2439 }
2440 case OMP_CLAUSE_FIRSTPRIVATE:
2441 case OMP_CLAUSE_COPYIN:
2442 case OMP_CLAUSE_REDUCTION:
2443 break;
2444 case OMP_CLAUSE_LINEAR:
2445 break;
2446 case OMP_CLAUSE_LASTPRIVATE:
2447 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2448 {
2449 lastprivate_firstprivate = true;
2450 if (pass != 0)
2451 continue;
2452 }
2453 break;
2454 default:
2455 continue;
2456 }
2457
2458 new_var = var = OMP_CLAUSE_DECL (c);
2459 if (c_kind != OMP_CLAUSE_COPYIN)
2460 new_var = lookup_decl (var, ctx);
2461
2462 if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
2463 {
2464 if (pass != 0)
2465 continue;
2466 }
2467 else if (is_variable_sized (var))
2468 {
2469 /* For variable sized types, we need to allocate the
2470 actual storage here. Call alloca and store the
2471 result in the pointer decl that we created elsewhere. */
2472 if (pass == 0)
2473 continue;
2474
2475 if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
2476 {
2477 gimple stmt;
2478 tree tmp, atmp;
2479
2480 ptr = DECL_VALUE_EXPR (new_var);
2481 gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
2482 ptr = TREE_OPERAND (ptr, 0);
2483 gcc_assert (DECL_P (ptr));
2484 x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
2485
2486 /* void *tmp = __builtin_alloca */
2487 atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
2488 stmt = gimple_build_call (atmp, 1, x);
2489 tmp = create_tmp_var_raw (ptr_type_node, NULL);
2490 gimple_add_tmp_var (tmp);
2491 gimple_call_set_lhs (stmt, tmp);
2492
2493 gimple_seq_add_stmt (ilist, stmt);
2494
2495 x = fold_convert_loc (clause_loc, TREE_TYPE (ptr), tmp);
2496 gimplify_assign (ptr, x, ilist);
2497 }
2498 }
2499 else if (is_reference (var))
2500 {
2501 /* For references that are being privatized for Fortran,
2502 allocate new backing storage for the new pointer
2503 variable. This allows us to avoid changing all the
2504 code that expects a pointer to something that expects
2505 a direct variable. Note that this doesn't apply to
2506 C++, since reference types are disallowed in data
2507 sharing clauses there, except for NRV optimized
2508 return values. */
2509 if (pass == 0)
2510 continue;
2511
2512 x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
2513 if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
2514 {
2515 x = build_receiver_ref (var, false, ctx);
2516 x = build_fold_addr_expr_loc (clause_loc, x);
2517 }
2518 else if (TREE_CONSTANT (x))
2519 {
2520 const char *name = NULL;
2521 if (DECL_NAME (var))
2522 name = IDENTIFIER_POINTER (DECL_NAME (new_var));
2523
2524 x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
2525 name);
2526 gimple_add_tmp_var (x);
2527 TREE_ADDRESSABLE (x) = 1;
2528 x = build_fold_addr_expr_loc (clause_loc, x);
2529 }
2530 else
2531 {
2532 tree atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
2533 x = build_call_expr_loc (clause_loc, atmp, 1, x);
2534 }
2535
2536 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
2537 gimplify_assign (new_var, x, ilist);
2538
2539 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2540 }
2541 else if (c_kind == OMP_CLAUSE_REDUCTION
2542 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2543 {
2544 if (pass == 0)
2545 continue;
2546 }
2547 else if (pass != 0)
2548 continue;
2549
2550 switch (OMP_CLAUSE_CODE (c))
2551 {
2552 case OMP_CLAUSE_SHARED:
2553 /* Shared global vars are just accessed directly. */
2554 if (is_global_var (new_var))
2555 break;
2556 /* Set up the DECL_VALUE_EXPR for shared variables now. This
2557 needs to be delayed until after fixup_child_record_type so
2558 that we get the correct type during the dereference. */
2559 by_ref = use_pointer_for_field (var, ctx);
2560 x = build_receiver_ref (var, by_ref, ctx);
2561 SET_DECL_VALUE_EXPR (new_var, x);
2562 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2563
2564 /* ??? If VAR is not passed by reference, and the variable
2565 hasn't been initialized yet, then we'll get a warning for
2566 the store into the omp_data_s structure. Ideally, we'd be
2567 able to notice this and not store anything at all, but
2568 we're generating code too early. Suppress the warning. */
2569 if (!by_ref)
2570 TREE_NO_WARNING (var) = 1;
2571 break;
2572
2573 case OMP_CLAUSE_LASTPRIVATE:
2574 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2575 break;
2576 /* FALLTHRU */
2577
2578 case OMP_CLAUSE_PRIVATE:
2579 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE)
2580 x = build_outer_var_ref (var, ctx);
2581 else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2582 {
2583 if (is_task_ctx (ctx))
2584 x = build_receiver_ref (var, false, ctx);
2585 else
2586 x = build_outer_var_ref (var, ctx);
2587 }
2588 else
2589 x = NULL;
2590 do_private:
2591 x = lang_hooks.decls.omp_clause_default_ctor (c, new_var, x);
2592 if (is_simd)
2593 {
2594 tree y = lang_hooks.decls.omp_clause_dtor (c, new_var);
2595 if ((TREE_ADDRESSABLE (new_var) || x || y
2596 || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
2597 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
2598 idx, lane, ivar, lvar))
2599 {
2600 if (x)
2601 x = lang_hooks.decls.omp_clause_default_ctor
2602 (c, unshare_expr (ivar), x);
2603 if (x)
2604 gimplify_and_add (x, &llist[0]);
2605 if (y)
2606 {
2607 y = lang_hooks.decls.omp_clause_dtor (c, ivar);
2608 if (y)
2609 {
2610 gimple_seq tseq = NULL;
2611
2612 dtor = y;
2613 gimplify_stmt (&dtor, &tseq);
2614 gimple_seq_add_seq (&llist[1], tseq);
2615 }
2616 }
2617 break;
2618 }
2619 }
2620 if (x)
2621 gimplify_and_add (x, ilist);
2622 /* FALLTHRU */
2623
2624 do_dtor:
2625 x = lang_hooks.decls.omp_clause_dtor (c, new_var);
2626 if (x)
2627 {
2628 gimple_seq tseq = NULL;
2629
2630 dtor = x;
2631 gimplify_stmt (&dtor, &tseq);
2632 gimple_seq_add_seq (dlist, tseq);
2633 }
2634 break;
2635
2636 case OMP_CLAUSE_LINEAR:
2637 if (!OMP_CLAUSE_LINEAR_NO_COPYIN (c))
2638 goto do_firstprivate;
2639 if (OMP_CLAUSE_LINEAR_NO_COPYOUT (c))
2640 x = NULL;
2641 else
2642 x = build_outer_var_ref (var, ctx);
2643 goto do_private;
2644
2645 case OMP_CLAUSE_FIRSTPRIVATE:
2646 if (is_task_ctx (ctx))
2647 {
2648 if (is_reference (var) || is_variable_sized (var))
2649 goto do_dtor;
2650 else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var,
2651 ctx))
2652 || use_pointer_for_field (var, NULL))
2653 {
2654 x = build_receiver_ref (var, false, ctx);
2655 SET_DECL_VALUE_EXPR (new_var, x);
2656 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2657 goto do_dtor;
2658 }
2659 }
2660 do_firstprivate:
2661 x = build_outer_var_ref (var, ctx);
2662 if (is_simd)
2663 {
2664 if ((OMP_CLAUSE_CODE (c) != OMP_CLAUSE_LINEAR
2665 || TREE_ADDRESSABLE (new_var))
2666 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
2667 idx, lane, ivar, lvar))
2668 {
2669 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR)
2670 {
2671 tree iv = create_tmp_var (TREE_TYPE (new_var), NULL);
2672 x = lang_hooks.decls.omp_clause_copy_ctor (c, iv, x);
2673 gimplify_and_add (x, ilist);
2674 gimple_stmt_iterator gsi
2675 = gsi_start_1 (gimple_omp_body_ptr (ctx->stmt));
2676 gimple g
2677 = gimple_build_assign (unshare_expr (lvar), iv);
2678 gsi_insert_before_without_update (&gsi, g,
2679 GSI_SAME_STMT);
2680 tree stept = POINTER_TYPE_P (TREE_TYPE (x))
2681 ? sizetype : TREE_TYPE (x);
2682 tree t = fold_convert (stept,
2683 OMP_CLAUSE_LINEAR_STEP (c));
2684 enum tree_code code = PLUS_EXPR;
2685 if (POINTER_TYPE_P (TREE_TYPE (new_var)))
2686 code = POINTER_PLUS_EXPR;
2687 g = gimple_build_assign_with_ops (code, iv, iv, t);
2688 gsi_insert_before_without_update (&gsi, g,
2689 GSI_SAME_STMT);
2690 break;
2691 }
2692 x = lang_hooks.decls.omp_clause_copy_ctor
2693 (c, unshare_expr (ivar), x);
2694 gimplify_and_add (x, &llist[0]);
2695 x = lang_hooks.decls.omp_clause_dtor (c, ivar);
2696 if (x)
2697 {
2698 gimple_seq tseq = NULL;
2699
2700 dtor = x;
2701 gimplify_stmt (&dtor, &tseq);
2702 gimple_seq_add_seq (&llist[1], tseq);
2703 }
2704 break;
2705 }
2706 }
2707 x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x);
2708 gimplify_and_add (x, ilist);
2709 goto do_dtor;
2710
2711 case OMP_CLAUSE_COPYIN:
2712 by_ref = use_pointer_for_field (var, NULL);
2713 x = build_receiver_ref (var, by_ref, ctx);
2714 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
2715 append_to_statement_list (x, &copyin_seq);
2716 copyin_by_ref |= by_ref;
2717 break;
2718
2719 case OMP_CLAUSE_REDUCTION:
2720 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2721 {
2722 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2723 x = build_outer_var_ref (var, ctx);
2724
2725 /* FIXME: Not handled yet. */
2726 gcc_assert (!is_simd);
2727 if (is_reference (var))
2728 x = build_fold_addr_expr_loc (clause_loc, x);
2729 SET_DECL_VALUE_EXPR (placeholder, x);
2730 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2731 lower_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
2732 gimple_seq_add_seq (ilist,
2733 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c));
2734 OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
2735 DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
2736 }
2737 else
2738 {
2739 x = omp_reduction_init (c, TREE_TYPE (new_var));
2740 gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
2741 if (is_simd
2742 && lower_rec_simd_input_clauses (new_var, ctx, max_vf,
2743 idx, lane, ivar, lvar))
2744 {
2745 enum tree_code code = OMP_CLAUSE_REDUCTION_CODE (c);
2746 tree ref = build_outer_var_ref (var, ctx);
2747
2748 gimplify_assign (unshare_expr (ivar), x, &llist[0]);
2749
2750 /* reduction(-:var) sums up the partial results, so it
2751 acts identically to reduction(+:var). */
2752 if (code == MINUS_EXPR)
2753 code = PLUS_EXPR;
2754
2755 x = build2 (code, TREE_TYPE (ref), ref, ivar);
2756 ref = build_outer_var_ref (var, ctx);
2757 gimplify_assign (ref, x, &llist[1]);
2758 }
2759 else
2760 {
2761 gimplify_assign (new_var, x, ilist);
2762 if (is_simd)
2763 gimplify_assign (build_outer_var_ref (var, ctx),
2764 new_var, dlist);
2765 }
2766 }
2767 break;
2768
2769 default:
2770 gcc_unreachable ();
2771 }
2772 }
2773 }
2774
2775 if (lane)
2776 {
2777 tree uid = create_tmp_var (ptr_type_node, "simduid");
2778 gimple g
2779 = gimple_build_call_internal (IFN_GOMP_SIMD_LANE, 1, uid);
2780 gimple_call_set_lhs (g, lane);
2781 gimple_stmt_iterator gsi = gsi_start_1 (gimple_omp_body_ptr (ctx->stmt));
2782 gsi_insert_before_without_update (&gsi, g, GSI_SAME_STMT);
2783 c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__SIMDUID_);
2784 OMP_CLAUSE__SIMDUID__DECL (c) = uid;
2785 OMP_CLAUSE_CHAIN (c) = gimple_omp_for_clauses (ctx->stmt);
2786 gimple_omp_for_set_clauses (ctx->stmt, c);
2787 g = gimple_build_assign_with_ops (INTEGER_CST, lane,
2788 build_int_cst (unsigned_type_node, 0),
2789 NULL_TREE);
2790 gimple_seq_add_stmt (ilist, g);
2791 for (int i = 0; i < 2; i++)
2792 if (llist[i])
2793 {
2794 tree vf = create_tmp_var (unsigned_type_node, NULL);
2795 g = gimple_build_call_internal (IFN_GOMP_SIMD_VF, 1, uid);
2796 gimple_call_set_lhs (g, vf);
2797 gimple_seq *seq = i == 0 ? ilist : dlist;
2798 gimple_seq_add_stmt (seq, g);
2799 tree t = build_int_cst (unsigned_type_node, 0);
2800 g = gimple_build_assign_with_ops (INTEGER_CST, idx, t, NULL_TREE);
2801 gimple_seq_add_stmt (seq, g);
2802 tree body = create_artificial_label (UNKNOWN_LOCATION);
2803 tree header = create_artificial_label (UNKNOWN_LOCATION);
2804 tree end = create_artificial_label (UNKNOWN_LOCATION);
2805 gimple_seq_add_stmt (seq, gimple_build_goto (header));
2806 gimple_seq_add_stmt (seq, gimple_build_label (body));
2807 gimple_seq_add_seq (seq, llist[i]);
2808 t = build_int_cst (unsigned_type_node, 1);
2809 g = gimple_build_assign_with_ops (PLUS_EXPR, idx, idx, t);
2810 gimple_seq_add_stmt (seq, g);
2811 gimple_seq_add_stmt (seq, gimple_build_label (header));
2812 g = gimple_build_cond (LT_EXPR, idx, vf, body, end);
2813 gimple_seq_add_stmt (seq, g);
2814 gimple_seq_add_stmt (seq, gimple_build_label (end));
2815 }
2816 }
2817
2818 /* The copyin sequence is not to be executed by the main thread, since
2819 that would result in self-copies. Perhaps not visible to scalars,
2820 but it certainly is to C++ operator=. */
2821 if (copyin_seq)
2822 {
2823 x = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM),
2824 0);
2825 x = build2 (NE_EXPR, boolean_type_node, x,
2826 build_int_cst (TREE_TYPE (x), 0));
2827 x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
2828 gimplify_and_add (x, ilist);
2829 }
2830
2831 /* If any copyin variable is passed by reference, we must ensure the
2832 master thread doesn't modify it before it is copied over in all
2833 threads. Similarly for variables in both firstprivate and
2834 lastprivate clauses we need to ensure the lastprivate copying
2835 happens after firstprivate copying in all threads. */
2836 if (copyin_by_ref || lastprivate_firstprivate)
2837 {
2838 /* Don't add any barrier for #pragma omp simd or
2839 #pragma omp distribute. */
2840 if (gimple_code (ctx->stmt) != GIMPLE_OMP_FOR
2841 || gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_FOR)
2842 gimplify_and_add (build_omp_barrier (), ilist);
2843 }
2844
2845 /* If max_vf is non-zero, then we can use only a vectorization factor
2846 up to the max_vf we chose. So stick it into the safelen clause. */
2847 if (max_vf)
2848 {
2849 tree c = find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
2850 OMP_CLAUSE_SAFELEN);
2851 if (c == NULL_TREE
2852 || compare_tree_int (OMP_CLAUSE_SAFELEN_EXPR (c),
2853 max_vf) == 1)
2854 {
2855 c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE_SAFELEN);
2856 OMP_CLAUSE_SAFELEN_EXPR (c) = build_int_cst (integer_type_node,
2857 max_vf);
2858 OMP_CLAUSE_CHAIN (c) = gimple_omp_for_clauses (ctx->stmt);
2859 gimple_omp_for_set_clauses (ctx->stmt, c);
2860 }
2861 }
2862 }
2863
2864
2865 /* Generate code to implement the LASTPRIVATE clauses. This is used for
2866 both parallel and workshare constructs. PREDICATE may be NULL if it's
2867 always true. */
2868
2869 static void
2870 lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *stmt_list,
2871 omp_context *ctx)
2872 {
2873 tree x, c, label = NULL, orig_clauses = clauses;
2874 bool par_clauses = false;
2875 tree simduid = NULL, lastlane = NULL;
2876
2877 /* Early exit if there are no lastprivate or linear clauses. */
2878 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
2879 if (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_LASTPRIVATE
2880 || (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_LINEAR
2881 && !OMP_CLAUSE_LINEAR_NO_COPYOUT (clauses)))
2882 break;
2883 if (clauses == NULL)
2884 {
2885 /* If this was a workshare clause, see if it had been combined
2886 with its parallel. In that case, look for the clauses on the
2887 parallel statement itself. */
2888 if (is_parallel_ctx (ctx))
2889 return;
2890
2891 ctx = ctx->outer;
2892 if (ctx == NULL || !is_parallel_ctx (ctx))
2893 return;
2894
2895 clauses = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2896 OMP_CLAUSE_LASTPRIVATE);
2897 if (clauses == NULL)
2898 return;
2899 par_clauses = true;
2900 }
2901
2902 if (predicate)
2903 {
2904 gimple stmt;
2905 tree label_true, arm1, arm2;
2906
2907 label = create_artificial_label (UNKNOWN_LOCATION);
2908 label_true = create_artificial_label (UNKNOWN_LOCATION);
2909 arm1 = TREE_OPERAND (predicate, 0);
2910 arm2 = TREE_OPERAND (predicate, 1);
2911 gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
2912 gimplify_expr (&arm2, stmt_list, NULL, is_gimple_val, fb_rvalue);
2913 stmt = gimple_build_cond (TREE_CODE (predicate), arm1, arm2,
2914 label_true, label);
2915 gimple_seq_add_stmt (stmt_list, stmt);
2916 gimple_seq_add_stmt (stmt_list, gimple_build_label (label_true));
2917 }
2918
2919 if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
2920 && gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_SIMD)
2921 {
2922 simduid = find_omp_clause (orig_clauses, OMP_CLAUSE__SIMDUID_);
2923 if (simduid)
2924 simduid = OMP_CLAUSE__SIMDUID__DECL (simduid);
2925 }
2926
2927 for (c = clauses; c ;)
2928 {
2929 tree var, new_var;
2930 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2931
2932 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
2933 || (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
2934 && !OMP_CLAUSE_LINEAR_NO_COPYOUT (c)))
2935 {
2936 var = OMP_CLAUSE_DECL (c);
2937 new_var = lookup_decl (var, ctx);
2938
2939 if (simduid && DECL_HAS_VALUE_EXPR_P (new_var))
2940 {
2941 tree val = DECL_VALUE_EXPR (new_var);
2942 if (TREE_CODE (val) == ARRAY_REF
2943 && VAR_P (TREE_OPERAND (val, 0))
2944 && lookup_attribute ("omp simd array",
2945 DECL_ATTRIBUTES (TREE_OPERAND (val,
2946 0))))
2947 {
2948 if (lastlane == NULL)
2949 {
2950 lastlane = create_tmp_var (unsigned_type_node, NULL);
2951 gimple g
2952 = gimple_build_call_internal (IFN_GOMP_SIMD_LAST_LANE,
2953 2, simduid,
2954 TREE_OPERAND (val, 1));
2955 gimple_call_set_lhs (g, lastlane);
2956 gimple_seq_add_stmt (stmt_list, g);
2957 }
2958 new_var = build4 (ARRAY_REF, TREE_TYPE (val),
2959 TREE_OPERAND (val, 0), lastlane,
2960 NULL_TREE, NULL_TREE);
2961 }
2962 }
2963
2964 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
2965 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
2966 {
2967 lower_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
2968 gimple_seq_add_seq (stmt_list,
2969 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
2970 OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) = NULL;
2971 }
2972
2973 x = build_outer_var_ref (var, ctx);
2974 if (is_reference (var))
2975 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2976 x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
2977 gimplify_and_add (x, stmt_list);
2978 }
2979 c = OMP_CLAUSE_CHAIN (c);
2980 if (c == NULL && !par_clauses)
2981 {
2982 /* If this was a workshare clause, see if it had been combined
2983 with its parallel. In that case, continue looking for the
2984 clauses also on the parallel statement itself. */
2985 if (is_parallel_ctx (ctx))
2986 break;
2987
2988 ctx = ctx->outer;
2989 if (ctx == NULL || !is_parallel_ctx (ctx))
2990 break;
2991
2992 c = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2993 OMP_CLAUSE_LASTPRIVATE);
2994 par_clauses = true;
2995 }
2996 }
2997
2998 if (label)
2999 gimple_seq_add_stmt (stmt_list, gimple_build_label (label));
3000 }
3001
3002
3003 /* Generate code to implement the REDUCTION clauses. */
3004
3005 static void
3006 lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
3007 {
3008 gimple_seq sub_seq = NULL;
3009 gimple stmt;
3010 tree x, c;
3011 int count = 0;
3012
3013 /* SIMD reductions are handled in lower_rec_input_clauses. */
3014 if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
3015 && gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_SIMD)
3016 return;
3017
3018 /* First see if there is exactly one reduction clause. Use OMP_ATOMIC
3019 update in that case, otherwise use a lock. */
3020 for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
3021 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
3022 {
3023 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
3024 {
3025 /* Never use OMP_ATOMIC for array reductions. */
3026 count = -1;
3027 break;
3028 }
3029 count++;
3030 }
3031
3032 if (count == 0)
3033 return;
3034
3035 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
3036 {
3037 tree var, ref, new_var;
3038 enum tree_code code;
3039 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
3040
3041 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
3042 continue;
3043
3044 var = OMP_CLAUSE_DECL (c);
3045 new_var = lookup_decl (var, ctx);
3046 if (is_reference (var))
3047 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
3048 ref = build_outer_var_ref (var, ctx);
3049 code = OMP_CLAUSE_REDUCTION_CODE (c);
3050
3051 /* reduction(-:var) sums up the partial results, so it acts
3052 identically to reduction(+:var). */
3053 if (code == MINUS_EXPR)
3054 code = PLUS_EXPR;
3055
3056 if (count == 1)
3057 {
3058 tree addr = build_fold_addr_expr_loc (clause_loc, ref);
3059
3060 addr = save_expr (addr);
3061 ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
3062 x = fold_build2_loc (clause_loc, code, TREE_TYPE (ref), ref, new_var);
3063 x = build2 (OMP_ATOMIC, void_type_node, addr, x);
3064 gimplify_and_add (x, stmt_seqp);
3065 return;
3066 }
3067
3068 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
3069 {
3070 tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
3071
3072 if (is_reference (var))
3073 ref = build_fold_addr_expr_loc (clause_loc, ref);
3074 SET_DECL_VALUE_EXPR (placeholder, ref);
3075 DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
3076 lower_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
3077 gimple_seq_add_seq (&sub_seq, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
3078 OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
3079 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
3080 }
3081 else
3082 {
3083 x = build2 (code, TREE_TYPE (ref), ref, new_var);
3084 ref = build_outer_var_ref (var, ctx);
3085 gimplify_assign (ref, x, &sub_seq);
3086 }
3087 }
3088
3089 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START),
3090 0);
3091 gimple_seq_add_stmt (stmt_seqp, stmt);
3092
3093 gimple_seq_add_seq (stmt_seqp, sub_seq);
3094
3095 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END),
3096 0);
3097 gimple_seq_add_stmt (stmt_seqp, stmt);
3098 }
3099
3100
3101 /* Generate code to implement the COPYPRIVATE clauses. */
3102
3103 static void
3104 lower_copyprivate_clauses (tree clauses, gimple_seq *slist, gimple_seq *rlist,
3105 omp_context *ctx)
3106 {
3107 tree c;
3108
3109 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
3110 {
3111 tree var, new_var, ref, x;
3112 bool by_ref;
3113 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
3114
3115 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
3116 continue;
3117
3118 var = OMP_CLAUSE_DECL (c);
3119 by_ref = use_pointer_for_field (var, NULL);
3120
3121 ref = build_sender_ref (var, ctx);
3122 x = new_var = lookup_decl_in_outer_ctx (var, ctx);
3123 if (by_ref)
3124 {
3125 x = build_fold_addr_expr_loc (clause_loc, new_var);
3126 x = fold_convert_loc (clause_loc, TREE_TYPE (ref), x);
3127 }
3128 gimplify_assign (ref, x, slist);
3129
3130 ref = build_receiver_ref (var, false, ctx);
3131 if (by_ref)
3132 {
3133 ref = fold_convert_loc (clause_loc,
3134 build_pointer_type (TREE_TYPE (new_var)),
3135 ref);
3136 ref = build_fold_indirect_ref_loc (clause_loc, ref);
3137 }
3138 if (is_reference (var))
3139 {
3140 ref = fold_convert_loc (clause_loc, TREE_TYPE (new_var), ref);
3141 ref = build_simple_mem_ref_loc (clause_loc, ref);
3142 new_var = build_simple_mem_ref_loc (clause_loc, new_var);
3143 }
3144 x = lang_hooks.decls.omp_clause_assign_op (c, new_var, ref);
3145 gimplify_and_add (x, rlist);
3146 }
3147 }
3148
3149
3150 /* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
3151 and REDUCTION from the sender (aka parent) side. */
3152
3153 static void
3154 lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
3155 omp_context *ctx)
3156 {
3157 tree c;
3158
3159 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
3160 {
3161 tree val, ref, x, var;
3162 bool by_ref, do_in = false, do_out = false;
3163 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
3164
3165 switch (OMP_CLAUSE_CODE (c))
3166 {
3167 case OMP_CLAUSE_PRIVATE:
3168 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
3169 break;
3170 continue;
3171 case OMP_CLAUSE_FIRSTPRIVATE:
3172 case OMP_CLAUSE_COPYIN:
3173 case OMP_CLAUSE_LASTPRIVATE:
3174 case OMP_CLAUSE_REDUCTION:
3175 break;
3176 default:
3177 continue;
3178 }
3179
3180 val = OMP_CLAUSE_DECL (c);
3181 var = lookup_decl_in_outer_ctx (val, ctx);
3182
3183 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
3184 && is_global_var (var))
3185 continue;
3186 if (is_variable_sized (val))
3187 continue;
3188 by_ref = use_pointer_for_field (val, NULL);
3189
3190 switch (OMP_CLAUSE_CODE (c))
3191 {
3192 case OMP_CLAUSE_PRIVATE:
3193 case OMP_CLAUSE_FIRSTPRIVATE:
3194 case OMP_CLAUSE_COPYIN:
3195 do_in = true;
3196 break;
3197
3198 case OMP_CLAUSE_LASTPRIVATE:
3199 if (by_ref || is_reference (val))
3200 {
3201 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
3202 continue;
3203 do_in = true;
3204 }
3205 else
3206 {
3207 do_out = true;
3208 if (lang_hooks.decls.omp_private_outer_ref (val))
3209 do_in = true;
3210 }
3211 break;
3212
3213 case OMP_CLAUSE_REDUCTION:
3214 do_in = true;
3215 do_out = !(by_ref || is_reference (val));
3216 break;
3217
3218 default:
3219 gcc_unreachable ();
3220 }
3221
3222 if (do_in)
3223 {
3224 ref = build_sender_ref (val, ctx);
3225 x = by_ref ? build_fold_addr_expr_loc (clause_loc, var) : var;
3226 gimplify_assign (ref, x, ilist);
3227 if (is_task_ctx (ctx))
3228 DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL;
3229 }
3230
3231 if (do_out)
3232 {
3233 ref = build_sender_ref (val, ctx);
3234 gimplify_assign (var, ref, olist);
3235 }
3236 }
3237 }
3238
3239 /* Generate code to implement SHARED from the sender (aka parent)
3240 side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
3241 list things that got automatically shared. */
3242
3243 static void
3244 lower_send_shared_vars (gimple_seq *ilist, gimple_seq *olist, omp_context *ctx)
3245 {
3246 tree var, ovar, nvar, f, x, record_type;
3247
3248 if (ctx->record_type == NULL)
3249 return;
3250
3251 record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type;
3252 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
3253 {
3254 ovar = DECL_ABSTRACT_ORIGIN (f);
3255 nvar = maybe_lookup_decl (ovar, ctx);
3256 if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
3257 continue;
3258
3259 /* If CTX is a nested parallel directive. Find the immediately
3260 enclosing parallel or workshare construct that contains a
3261 mapping for OVAR. */
3262 var = lookup_decl_in_outer_ctx (ovar, ctx);
3263
3264 if (use_pointer_for_field (ovar, ctx))
3265 {
3266 x = build_sender_ref (ovar, ctx);
3267 var = build_fold_addr_expr (var);
3268 gimplify_assign (x, var, ilist);
3269 }
3270 else
3271 {
3272 x = build_sender_ref (ovar, ctx);
3273 gimplify_assign (x, var, ilist);
3274
3275 if (!TREE_READONLY (var)
3276 /* We don't need to receive a new reference to a result
3277 or parm decl. In fact we may not store to it as we will
3278 invalidate any pending RSO and generate wrong gimple
3279 during inlining. */
3280 && !((TREE_CODE (var) == RESULT_DECL
3281 || TREE_CODE (var) == PARM_DECL)
3282 && DECL_BY_REFERENCE (var)))
3283 {
3284 x = build_sender_ref (ovar, ctx);
3285 gimplify_assign (var, x, olist);
3286 }
3287 }
3288 }
3289 }
3290
3291
3292 /* A convenience function to build an empty GIMPLE_COND with just the
3293 condition. */
3294
3295 static gimple
3296 gimple_build_cond_empty (tree cond)
3297 {
3298 enum tree_code pred_code;
3299 tree lhs, rhs;
3300
3301 gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs);
3302 return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE);
3303 }
3304
3305
3306 /* Build the function calls to GOMP_parallel_start etc to actually
3307 generate the parallel operation. REGION is the parallel region
3308 being expanded. BB is the block where to insert the code. WS_ARGS
3309 will be set if this is a call to a combined parallel+workshare
3310 construct, it contains the list of additional arguments needed by
3311 the workshare construct. */
3312
3313 static void
3314 expand_parallel_call (struct omp_region *region, basic_block bb,
3315 gimple entry_stmt, vec<tree, va_gc> *ws_args)
3316 {
3317 tree t, t1, t2, val, cond, c, clauses;
3318 gimple_stmt_iterator gsi;
3319 gimple stmt;
3320 enum built_in_function start_ix;
3321 int start_ix2;
3322 location_t clause_loc;
3323 vec<tree, va_gc> *args;
3324
3325 clauses = gimple_omp_parallel_clauses (entry_stmt);
3326
3327 /* Determine what flavor of GOMP_parallel_start we will be
3328 emitting. */
3329 start_ix = BUILT_IN_GOMP_PARALLEL_START;
3330 if (is_combined_parallel (region))
3331 {
3332 switch (region->inner->type)
3333 {
3334 case GIMPLE_OMP_FOR:
3335 gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
3336 start_ix2 = ((int)BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START
3337 + (region->inner->sched_kind
3338 == OMP_CLAUSE_SCHEDULE_RUNTIME
3339 ? 3 : region->inner->sched_kind));
3340 start_ix = (enum built_in_function)start_ix2;
3341 break;
3342 case GIMPLE_OMP_SECTIONS:
3343 start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS_START;
3344 break;
3345 default:
3346 gcc_unreachable ();
3347 }
3348 }
3349
3350 /* By default, the value of NUM_THREADS is zero (selected at run time)
3351 and there is no conditional. */
3352 cond = NULL_TREE;
3353 val = build_int_cst (unsigned_type_node, 0);
3354
3355 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
3356 if (c)
3357 cond = OMP_CLAUSE_IF_EXPR (c);
3358
3359 c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
3360 if (c)
3361 {
3362 val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
3363 clause_loc = OMP_CLAUSE_LOCATION (c);
3364 }
3365 else
3366 clause_loc = gimple_location (entry_stmt);
3367
3368 /* Ensure 'val' is of the correct type. */
3369 val = fold_convert_loc (clause_loc, unsigned_type_node, val);
3370
3371 /* If we found the clause 'if (cond)', build either
3372 (cond != 0) or (cond ? val : 1u). */
3373 if (cond)
3374 {
3375 gimple_stmt_iterator gsi;
3376
3377 cond = gimple_boolify (cond);
3378
3379 if (integer_zerop (val))
3380 val = fold_build2_loc (clause_loc,
3381 EQ_EXPR, unsigned_type_node, cond,
3382 build_int_cst (TREE_TYPE (cond), 0));
3383 else
3384 {
3385 basic_block cond_bb, then_bb, else_bb;
3386 edge e, e_then, e_else;
3387 tree tmp_then, tmp_else, tmp_join, tmp_var;
3388
3389 tmp_var = create_tmp_var (TREE_TYPE (val), NULL);
3390 if (gimple_in_ssa_p (cfun))
3391 {
3392 tmp_then = make_ssa_name (tmp_var, NULL);
3393 tmp_else = make_ssa_name (tmp_var, NULL);
3394 tmp_join = make_ssa_name (tmp_var, NULL);
3395 }
3396 else
3397 {
3398 tmp_then = tmp_var;
3399 tmp_else = tmp_var;
3400 tmp_join = tmp_var;
3401 }
3402
3403 e = split_block (bb, NULL);
3404 cond_bb = e->src;
3405 bb = e->dest;
3406 remove_edge (e);
3407
3408 then_bb = create_empty_bb (cond_bb);
3409 else_bb = create_empty_bb (then_bb);
3410 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
3411 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
3412
3413 stmt = gimple_build_cond_empty (cond);
3414 gsi = gsi_start_bb (cond_bb);
3415 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3416
3417 gsi = gsi_start_bb (then_bb);
3418 stmt = gimple_build_assign (tmp_then, val);
3419 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3420
3421 gsi = gsi_start_bb (else_bb);
3422 stmt = gimple_build_assign
3423 (tmp_else, build_int_cst (unsigned_type_node, 1));
3424 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3425
3426 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
3427 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
3428 if (current_loops)
3429 {
3430 add_bb_to_loop (then_bb, cond_bb->loop_father);
3431 add_bb_to_loop (else_bb, cond_bb->loop_father);
3432 }
3433 e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
3434 e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
3435
3436 if (gimple_in_ssa_p (cfun))
3437 {
3438 gimple phi = create_phi_node (tmp_join, bb);
3439 add_phi_arg (phi, tmp_then, e_then, UNKNOWN_LOCATION);
3440 add_phi_arg (phi, tmp_else, e_else, UNKNOWN_LOCATION);
3441 }
3442
3443 val = tmp_join;
3444 }
3445
3446 gsi = gsi_start_bb (bb);
3447 val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE,
3448 false, GSI_CONTINUE_LINKING);
3449 }
3450
3451 gsi = gsi_last_bb (bb);
3452 t = gimple_omp_parallel_data_arg (entry_stmt);
3453 if (t == NULL)
3454 t1 = null_pointer_node;
3455 else
3456 t1 = build_fold_addr_expr (t);
3457 t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
3458
3459 vec_alloc (args, 3 + vec_safe_length (ws_args));
3460 args->quick_push (t2);
3461 args->quick_push (t1);
3462 args->quick_push (val);
3463 if (ws_args)
3464 args->splice (*ws_args);
3465
3466 t = build_call_expr_loc_vec (UNKNOWN_LOCATION,
3467 builtin_decl_explicit (start_ix), args);
3468
3469 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3470 false, GSI_CONTINUE_LINKING);
3471
3472 t = gimple_omp_parallel_data_arg (entry_stmt);
3473 if (t == NULL)
3474 t = null_pointer_node;
3475 else
3476 t = build_fold_addr_expr (t);
3477 t = build_call_expr_loc (gimple_location (entry_stmt),
3478 gimple_omp_parallel_child_fn (entry_stmt), 1, t);
3479 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3480 false, GSI_CONTINUE_LINKING);
3481
3482 t = build_call_expr_loc (gimple_location (entry_stmt),
3483 builtin_decl_explicit (BUILT_IN_GOMP_PARALLEL_END),
3484 0);
3485 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3486 false, GSI_CONTINUE_LINKING);
3487 }
3488
3489
3490 /* Build the function call to GOMP_task to actually
3491 generate the task operation. BB is the block where to insert the code. */
3492
3493 static void
3494 expand_task_call (basic_block bb, gimple entry_stmt)
3495 {
3496 tree t, t1, t2, t3, flags, cond, c, c2, clauses;
3497 gimple_stmt_iterator gsi;
3498 location_t loc = gimple_location (entry_stmt);
3499
3500 clauses = gimple_omp_task_clauses (entry_stmt);
3501
3502 c = find_omp_clause (clauses, OMP_CLAUSE_IF);
3503 if (c)
3504 cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (c));
3505 else
3506 cond = boolean_true_node;
3507
3508 c = find_omp_clause (clauses, OMP_CLAUSE_UNTIED);
3509 c2 = find_omp_clause (clauses, OMP_CLAUSE_MERGEABLE);
3510 flags = build_int_cst (unsigned_type_node,
3511 (c ? 1 : 0) + (c2 ? 4 : 0));
3512
3513 c = find_omp_clause (clauses, OMP_CLAUSE_FINAL);
3514 if (c)
3515 {
3516 c = gimple_boolify (OMP_CLAUSE_FINAL_EXPR (c));
3517 c = fold_build3_loc (loc, COND_EXPR, unsigned_type_node, c,
3518 build_int_cst (unsigned_type_node, 2),
3519 build_int_cst (unsigned_type_node, 0));
3520 flags = fold_build2_loc (loc, PLUS_EXPR, unsigned_type_node, flags, c);
3521 }
3522
3523 gsi = gsi_last_bb (bb);
3524 t = gimple_omp_task_data_arg (entry_stmt);
3525 if (t == NULL)
3526 t2 = null_pointer_node;
3527 else
3528 t2 = build_fold_addr_expr_loc (loc, t);
3529 t1 = build_fold_addr_expr_loc (loc, gimple_omp_task_child_fn (entry_stmt));
3530 t = gimple_omp_task_copy_fn (entry_stmt);
3531 if (t == NULL)
3532 t3 = null_pointer_node;
3533 else
3534 t3 = build_fold_addr_expr_loc (loc, t);
3535
3536 t = build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_TASK),
3537 7, t1, t2, t3,
3538 gimple_omp_task_arg_size (entry_stmt),
3539 gimple_omp_task_arg_align (entry_stmt), cond, flags);
3540
3541 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3542 false, GSI_CONTINUE_LINKING);
3543 }
3544
3545
3546 /* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
3547 catch handler and return it. This prevents programs from violating the
3548 structured block semantics with throws. */
3549
3550 static gimple_seq
3551 maybe_catch_exception (gimple_seq body)
3552 {
3553 gimple g;
3554 tree decl;
3555
3556 if (!flag_exceptions)
3557 return body;
3558
3559 if (lang_hooks.eh_protect_cleanup_actions != NULL)
3560 decl = lang_hooks.eh_protect_cleanup_actions ();
3561 else
3562 decl = builtin_decl_explicit (BUILT_IN_TRAP);
3563
3564 g = gimple_build_eh_must_not_throw (decl);
3565 g = gimple_build_try (body, gimple_seq_alloc_with_stmt (g),
3566 GIMPLE_TRY_CATCH);
3567
3568 return gimple_seq_alloc_with_stmt (g);
3569 }
3570
3571 /* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
3572
3573 static tree
3574 vec2chain (vec<tree, va_gc> *v)
3575 {
3576 tree chain = NULL_TREE, t;
3577 unsigned ix;
3578
3579 FOR_EACH_VEC_SAFE_ELT_REVERSE (v, ix, t)
3580 {
3581 DECL_CHAIN (t) = chain;
3582 chain = t;
3583 }
3584
3585 return chain;
3586 }
3587
3588
3589 /* Remove barriers in REGION->EXIT's block. Note that this is only
3590 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
3591 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
3592 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
3593 removed. */
3594
3595 static void
3596 remove_exit_barrier (struct omp_region *region)
3597 {
3598 gimple_stmt_iterator gsi;
3599 basic_block exit_bb;
3600 edge_iterator ei;
3601 edge e;
3602 gimple stmt;
3603 int any_addressable_vars = -1;
3604
3605 exit_bb = region->exit;
3606
3607 /* If the parallel region doesn't return, we don't have REGION->EXIT
3608 block at all. */
3609 if (! exit_bb)
3610 return;
3611
3612 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
3613 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
3614 statements that can appear in between are extremely limited -- no
3615 memory operations at all. Here, we allow nothing at all, so the
3616 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
3617 gsi = gsi_last_bb (exit_bb);
3618 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3619 gsi_prev (&gsi);
3620 if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
3621 return;
3622
3623 FOR_EACH_EDGE (e, ei, exit_bb->preds)
3624 {
3625 gsi = gsi_last_bb (e->src);
3626 if (gsi_end_p (gsi))
3627 continue;
3628 stmt = gsi_stmt (gsi);
3629 if (gimple_code (stmt) == GIMPLE_OMP_RETURN
3630 && !gimple_omp_return_nowait_p (stmt))
3631 {
3632 /* OpenMP 3.0 tasks unfortunately prevent this optimization
3633 in many cases. If there could be tasks queued, the barrier
3634 might be needed to let the tasks run before some local
3635 variable of the parallel that the task uses as shared
3636 runs out of scope. The task can be spawned either
3637 from within current function (this would be easy to check)
3638 or from some function it calls and gets passed an address
3639 of such a variable. */
3640 if (any_addressable_vars < 0)
3641 {
3642 gimple parallel_stmt = last_stmt (region->entry);
3643 tree child_fun = gimple_omp_parallel_child_fn (parallel_stmt);
3644 tree local_decls, block, decl;
3645 unsigned ix;
3646
3647 any_addressable_vars = 0;
3648 FOR_EACH_LOCAL_DECL (DECL_STRUCT_FUNCTION (child_fun), ix, decl)
3649 if (TREE_ADDRESSABLE (decl))
3650 {
3651 any_addressable_vars = 1;
3652 break;
3653 }
3654 for (block = gimple_block (stmt);
3655 !any_addressable_vars
3656 && block
3657 && TREE_CODE (block) == BLOCK;
3658 block = BLOCK_SUPERCONTEXT (block))
3659 {
3660 for (local_decls = BLOCK_VARS (block);
3661 local_decls;
3662 local_decls = DECL_CHAIN (local_decls))
3663 if (TREE_ADDRESSABLE (local_decls))
3664 {
3665 any_addressable_vars = 1;
3666 break;
3667 }
3668 if (block == gimple_block (parallel_stmt))
3669 break;
3670 }
3671 }
3672 if (!any_addressable_vars)
3673 gimple_omp_return_set_nowait (stmt);
3674 }
3675 }
3676 }
3677
3678 static void
3679 remove_exit_barriers (struct omp_region *region)
3680 {
3681 if (region->type == GIMPLE_OMP_PARALLEL)
3682 remove_exit_barrier (region);
3683
3684 if (region->inner)
3685 {
3686 region = region->inner;
3687 remove_exit_barriers (region);
3688 while (region->next)
3689 {
3690 region = region->next;
3691 remove_exit_barriers (region);
3692 }
3693 }
3694 }
3695
3696 /* Optimize omp_get_thread_num () and omp_get_num_threads ()
3697 calls. These can't be declared as const functions, but
3698 within one parallel body they are constant, so they can be
3699 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
3700 which are declared const. Similarly for task body, except
3701 that in untied task omp_get_thread_num () can change at any task
3702 scheduling point. */
3703
3704 static void
3705 optimize_omp_library_calls (gimple entry_stmt)
3706 {
3707 basic_block bb;
3708 gimple_stmt_iterator gsi;
3709 tree thr_num_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
3710 tree thr_num_id = DECL_ASSEMBLER_NAME (thr_num_tree);
3711 tree num_thr_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
3712 tree num_thr_id = DECL_ASSEMBLER_NAME (num_thr_tree);
3713 bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK
3714 && find_omp_clause (gimple_omp_task_clauses (entry_stmt),
3715 OMP_CLAUSE_UNTIED) != NULL);
3716
3717 FOR_EACH_BB (bb)
3718 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3719 {
3720 gimple call = gsi_stmt (gsi);
3721 tree decl;
3722
3723 if (is_gimple_call (call)
3724 && (decl = gimple_call_fndecl (call))
3725 && DECL_EXTERNAL (decl)
3726 && TREE_PUBLIC (decl)
3727 && DECL_INITIAL (decl) == NULL)
3728 {
3729 tree built_in;
3730
3731 if (DECL_NAME (decl) == thr_num_id)
3732 {
3733 /* In #pragma omp task untied omp_get_thread_num () can change
3734 during the execution of the task region. */
3735 if (untied_task)
3736 continue;
3737 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
3738 }
3739 else if (DECL_NAME (decl) == num_thr_id)
3740 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
3741 else
3742 continue;
3743
3744 if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
3745 || gimple_call_num_args (call) != 0)
3746 continue;
3747
3748 if (flag_exceptions && !TREE_NOTHROW (decl))
3749 continue;
3750
3751 if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
3752 || !types_compatible_p (TREE_TYPE (TREE_TYPE (decl)),
3753 TREE_TYPE (TREE_TYPE (built_in))))
3754 continue;
3755
3756 gimple_call_set_fndecl (call, built_in);
3757 }
3758 }
3759 }
3760
3761 /* Callback for expand_omp_build_assign. Return non-NULL if *tp needs to be
3762 regimplified. */
3763
3764 static tree
3765 expand_omp_regimplify_p (tree *tp, int *walk_subtrees, void *)
3766 {
3767 tree t = *tp;
3768
3769 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
3770 if (TREE_CODE (t) == VAR_DECL && DECL_HAS_VALUE_EXPR_P (t))
3771 return t;
3772
3773 if (TREE_CODE (t) == ADDR_EXPR)
3774 recompute_tree_invariant_for_addr_expr (t);
3775
3776 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
3777 return NULL_TREE;
3778 }
3779
3780 /* Prepend TO = FROM assignment before *GSI_P. */
3781
3782 static void
3783 expand_omp_build_assign (gimple_stmt_iterator *gsi_p, tree to, tree from)
3784 {
3785 bool simple_p = DECL_P (to) && TREE_ADDRESSABLE (to);
3786 from = force_gimple_operand_gsi (gsi_p, from, simple_p, NULL_TREE,
3787 true, GSI_SAME_STMT);
3788 gimple stmt = gimple_build_assign (to, from);
3789 gsi_insert_before (gsi_p, stmt, GSI_SAME_STMT);
3790 if (walk_tree (&from, expand_omp_regimplify_p, NULL, NULL)
3791 || walk_tree (&to, expand_omp_regimplify_p, NULL, NULL))
3792 {
3793 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
3794 gimple_regimplify_operands (stmt, &gsi);
3795 }
3796 }
3797
3798 /* Expand the OpenMP parallel or task directive starting at REGION. */
3799
3800 static void
3801 expand_omp_taskreg (struct omp_region *region)
3802 {
3803 basic_block entry_bb, exit_bb, new_bb;
3804 struct function *child_cfun;
3805 tree child_fn, block, t;
3806 gimple_stmt_iterator gsi;
3807 gimple entry_stmt, stmt;
3808 edge e;
3809 vec<tree, va_gc> *ws_args;
3810
3811 entry_stmt = last_stmt (region->entry);
3812 child_fn = gimple_omp_taskreg_child_fn (entry_stmt);
3813 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
3814
3815 entry_bb = region->entry;
3816 exit_bb = region->exit;
3817
3818 if (is_combined_parallel (region))
3819 ws_args = region->ws_args;
3820 else
3821 ws_args = NULL;
3822
3823 if (child_cfun->cfg)
3824 {
3825 /* Due to inlining, it may happen that we have already outlined
3826 the region, in which case all we need to do is make the
3827 sub-graph unreachable and emit the parallel call. */
3828 edge entry_succ_e, exit_succ_e;
3829 gimple_stmt_iterator gsi;
3830
3831 entry_succ_e = single_succ_edge (entry_bb);
3832
3833 gsi = gsi_last_bb (entry_bb);
3834 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL
3835 || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK);
3836 gsi_remove (&gsi, true);
3837
3838 new_bb = entry_bb;
3839 if (exit_bb)
3840 {
3841 exit_succ_e = single_succ_edge (exit_bb);
3842 make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
3843 }
3844 remove_edge_and_dominated_blocks (entry_succ_e);
3845 }
3846 else
3847 {
3848 unsigned srcidx, dstidx, num;
3849
3850 /* If the parallel region needs data sent from the parent
3851 function, then the very first statement (except possible
3852 tree profile counter updates) of the parallel body
3853 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
3854 &.OMP_DATA_O is passed as an argument to the child function,
3855 we need to replace it with the argument as seen by the child
3856 function.
3857
3858 In most cases, this will end up being the identity assignment
3859 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
3860 a function call that has been inlined, the original PARM_DECL
3861 .OMP_DATA_I may have been converted into a different local
3862 variable. In which case, we need to keep the assignment. */
3863 if (gimple_omp_taskreg_data_arg (entry_stmt))
3864 {
3865 basic_block entry_succ_bb = single_succ (entry_bb);
3866 gimple_stmt_iterator gsi;
3867 tree arg, narg;
3868 gimple parcopy_stmt = NULL;
3869
3870 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
3871 {
3872 gimple stmt;
3873
3874 gcc_assert (!gsi_end_p (gsi));
3875 stmt = gsi_stmt (gsi);
3876 if (gimple_code (stmt) != GIMPLE_ASSIGN)
3877 continue;
3878
3879 if (gimple_num_ops (stmt) == 2)
3880 {
3881 tree arg = gimple_assign_rhs1 (stmt);
3882
3883 /* We're ignore the subcode because we're
3884 effectively doing a STRIP_NOPS. */
3885
3886 if (TREE_CODE (arg) == ADDR_EXPR
3887 && TREE_OPERAND (arg, 0)
3888 == gimple_omp_taskreg_data_arg (entry_stmt))
3889 {
3890 parcopy_stmt = stmt;
3891 break;
3892 }
3893 }
3894 }
3895
3896 gcc_assert (parcopy_stmt != NULL);
3897 arg = DECL_ARGUMENTS (child_fn);
3898
3899 if (!gimple_in_ssa_p (cfun))
3900 {
3901 if (gimple_assign_lhs (parcopy_stmt) == arg)
3902 gsi_remove (&gsi, true);
3903 else
3904 {
3905 /* ?? Is setting the subcode really necessary ?? */
3906 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg));
3907 gimple_assign_set_rhs1 (parcopy_stmt, arg);
3908 }
3909 }
3910 else
3911 {
3912 /* If we are in ssa form, we must load the value from the default
3913 definition of the argument. That should not be defined now,
3914 since the argument is not used uninitialized. */
3915 gcc_assert (ssa_default_def (cfun, arg) == NULL);
3916 narg = make_ssa_name (arg, gimple_build_nop ());
3917 set_ssa_default_def (cfun, arg, narg);
3918 /* ?? Is setting the subcode really necessary ?? */
3919 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (narg));
3920 gimple_assign_set_rhs1 (parcopy_stmt, narg);
3921 update_stmt (parcopy_stmt);
3922 }
3923 }
3924
3925 /* Declare local variables needed in CHILD_CFUN. */
3926 block = DECL_INITIAL (child_fn);
3927 BLOCK_VARS (block) = vec2chain (child_cfun->local_decls);
3928 /* The gimplifier could record temporaries in parallel/task block
3929 rather than in containing function's local_decls chain,
3930 which would mean cgraph missed finalizing them. Do it now. */
3931 for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t))
3932 if (TREE_CODE (t) == VAR_DECL
3933 && TREE_STATIC (t)
3934 && !DECL_EXTERNAL (t))
3935 varpool_finalize_decl (t);
3936 DECL_SAVED_TREE (child_fn) = NULL;
3937 /* We'll create a CFG for child_fn, so no gimple body is needed. */
3938 gimple_set_body (child_fn, NULL);
3939 TREE_USED (block) = 1;
3940
3941 /* Reset DECL_CONTEXT on function arguments. */
3942 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
3943 DECL_CONTEXT (t) = child_fn;
3944
3945 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
3946 so that it can be moved to the child function. */
3947 gsi = gsi_last_bb (entry_bb);
3948 stmt = gsi_stmt (gsi);
3949 gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
3950 || gimple_code (stmt) == GIMPLE_OMP_TASK));
3951 gsi_remove (&gsi, true);
3952 e = split_block (entry_bb, stmt);
3953 entry_bb = e->dest;
3954 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
3955
3956 /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR. */
3957 if (exit_bb)
3958 {
3959 gsi = gsi_last_bb (exit_bb);
3960 gcc_assert (!gsi_end_p (gsi)
3961 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3962 stmt = gimple_build_return (NULL);
3963 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
3964 gsi_remove (&gsi, true);
3965 }
3966
3967 /* Move the parallel region into CHILD_CFUN. */
3968
3969 if (gimple_in_ssa_p (cfun))
3970 {
3971 init_tree_ssa (child_cfun);
3972 init_ssa_operands (child_cfun);
3973 child_cfun->gimple_df->in_ssa_p = true;
3974 block = NULL_TREE;
3975 }
3976 else
3977 block = gimple_block (entry_stmt);
3978
3979 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
3980 if (exit_bb)
3981 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
3982 /* When the OMP expansion process cannot guarantee an up-to-date
3983 loop tree arrange for the child function to fixup loops. */
3984 if (loops_state_satisfies_p (LOOPS_NEED_FIXUP))
3985 child_cfun->x_current_loops->state |= LOOPS_NEED_FIXUP;
3986
3987 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
3988 num = vec_safe_length (child_cfun->local_decls);
3989 for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
3990 {
3991 t = (*child_cfun->local_decls)[srcidx];
3992 if (DECL_CONTEXT (t) == cfun->decl)
3993 continue;
3994 if (srcidx != dstidx)
3995 (*child_cfun->local_decls)[dstidx] = t;
3996 dstidx++;
3997 }
3998 if (dstidx != num)
3999 vec_safe_truncate (child_cfun->local_decls, dstidx);
4000
4001 /* Inform the callgraph about the new function. */
4002 DECL_STRUCT_FUNCTION (child_fn)->curr_properties = cfun->curr_properties;
4003 cgraph_add_new_function (child_fn, true);
4004
4005 /* Fix the callgraph edges for child_cfun. Those for cfun will be
4006 fixed in a following pass. */
4007 push_cfun (child_cfun);
4008 if (optimize)
4009 optimize_omp_library_calls (entry_stmt);
4010 rebuild_cgraph_edges ();
4011
4012 /* Some EH regions might become dead, see PR34608. If
4013 pass_cleanup_cfg isn't the first pass to happen with the
4014 new child, these dead EH edges might cause problems.
4015 Clean them up now. */
4016 if (flag_exceptions)
4017 {
4018 basic_block bb;
4019 bool changed = false;
4020
4021 FOR_EACH_BB (bb)
4022 changed |= gimple_purge_dead_eh_edges (bb);
4023 if (changed)
4024 cleanup_tree_cfg ();
4025 }
4026 if (gimple_in_ssa_p (cfun))
4027 update_ssa (TODO_update_ssa);
4028 pop_cfun ();
4029 }
4030
4031 /* Emit a library call to launch the children threads. */
4032 if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
4033 expand_parallel_call (region, new_bb, entry_stmt, ws_args);
4034 else
4035 expand_task_call (new_bb, entry_stmt);
4036 if (gimple_in_ssa_p (cfun))
4037 update_ssa (TODO_update_ssa_only_virtuals);
4038 }
4039
4040
4041 /* Helper function for expand_omp_{for_*,simd}. If this is the outermost
4042 of the combined collapse > 1 loop constructs, generate code like:
4043 if (__builtin_expect (N32 cond3 N31, 0)) goto ZERO_ITER_BB;
4044 if (cond3 is <)
4045 adj = STEP3 - 1;
4046 else
4047 adj = STEP3 + 1;
4048 count3 = (adj + N32 - N31) / STEP3;
4049 if (__builtin_expect (N22 cond2 N21, 0)) goto ZERO_ITER_BB;
4050 if (cond2 is <)
4051 adj = STEP2 - 1;
4052 else
4053 adj = STEP2 + 1;
4054 count2 = (adj + N22 - N21) / STEP2;
4055 if (__builtin_expect (N12 cond1 N11, 0)) goto ZERO_ITER_BB;
4056 if (cond1 is <)
4057 adj = STEP1 - 1;
4058 else
4059 adj = STEP1 + 1;
4060 count1 = (adj + N12 - N11) / STEP1;
4061 count = count1 * count2 * count3;
4062 Furthermore, if ZERO_ITER_BB is NULL, create a BB which does:
4063 count = 0;
4064 and set ZERO_ITER_BB to that bb. */
4065
4066 /* NOTE: It *could* be better to moosh all of the BBs together,
4067 creating one larger BB with all the computation and the unexpected
4068 jump at the end. I.e.
4069
4070 bool zero3, zero2, zero1, zero;
4071
4072 zero3 = N32 c3 N31;
4073 count3 = (N32 - N31) /[cl] STEP3;
4074 zero2 = N22 c2 N21;
4075 count2 = (N22 - N21) /[cl] STEP2;
4076 zero1 = N12 c1 N11;
4077 count1 = (N12 - N11) /[cl] STEP1;
4078 zero = zero3 || zero2 || zero1;
4079 count = count1 * count2 * count3;
4080 if (__builtin_expect(zero, false)) goto zero_iter_bb;
4081
4082 After all, we expect the zero=false, and thus we expect to have to
4083 evaluate all of the comparison expressions, so short-circuiting
4084 oughtn't be a win. Since the condition isn't protecting a
4085 denominator, we're not concerned about divide-by-zero, so we can
4086 fully evaluate count even if a numerator turned out to be wrong.
4087
4088 It seems like putting this all together would create much better
4089 scheduling opportunities, and less pressure on the chip's branch
4090 predictor. */
4091
4092 static void
4093 expand_omp_for_init_counts (struct omp_for_data *fd, gimple_stmt_iterator *gsi,
4094 basic_block &entry_bb, tree *counts,
4095 basic_block &zero_iter_bb, int &first_zero_iter,
4096 basic_block &l2_dom_bb)
4097 {
4098 tree t, type = TREE_TYPE (fd->loop.v);
4099 gimple stmt;
4100 edge e, ne;
4101 int i;
4102
4103 /* Collapsed loops need work for expansion into SSA form. */
4104 gcc_assert (!gimple_in_ssa_p (cfun));
4105
4106 for (i = 0; i < fd->collapse; i++)
4107 {
4108 tree itype = TREE_TYPE (fd->loops[i].v);
4109
4110 if (SSA_VAR_P (fd->loop.n2)
4111 && ((t = fold_binary (fd->loops[i].cond_code, boolean_type_node,
4112 fold_convert (itype, fd->loops[i].n1),
4113 fold_convert (itype, fd->loops[i].n2)))
4114 == NULL_TREE || !integer_onep (t)))
4115 {
4116 tree n1, n2;
4117 n1 = fold_convert (itype, unshare_expr (fd->loops[i].n1));
4118 n1 = force_gimple_operand_gsi (gsi, n1, true, NULL_TREE,
4119 true, GSI_SAME_STMT);
4120 n2 = fold_convert (itype, unshare_expr (fd->loops[i].n2));
4121 n2 = force_gimple_operand_gsi (gsi, n2, true, NULL_TREE,
4122 true, GSI_SAME_STMT);
4123 stmt = gimple_build_cond (fd->loops[i].cond_code, n1, n2,
4124 NULL_TREE, NULL_TREE);
4125 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
4126 if (walk_tree (gimple_cond_lhs_ptr (stmt),
4127 expand_omp_regimplify_p, NULL, NULL)
4128 || walk_tree (gimple_cond_rhs_ptr (stmt),
4129 expand_omp_regimplify_p, NULL, NULL))
4130 {
4131 *gsi = gsi_for_stmt (stmt);
4132 gimple_regimplify_operands (stmt, gsi);
4133 }
4134 e = split_block (entry_bb, stmt);
4135 if (zero_iter_bb == NULL)
4136 {
4137 first_zero_iter = i;
4138 zero_iter_bb = create_empty_bb (entry_bb);
4139 if (current_loops)
4140 add_bb_to_loop (zero_iter_bb, entry_bb->loop_father);
4141 *gsi = gsi_after_labels (zero_iter_bb);
4142 stmt = gimple_build_assign (fd->loop.n2,
4143 build_zero_cst (type));
4144 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
4145 set_immediate_dominator (CDI_DOMINATORS, zero_iter_bb,
4146 entry_bb);
4147 }
4148 ne = make_edge (entry_bb, zero_iter_bb, EDGE_FALSE_VALUE);
4149 ne->probability = REG_BR_PROB_BASE / 2000 - 1;
4150 e->flags = EDGE_TRUE_VALUE;
4151 e->probability = REG_BR_PROB_BASE - ne->probability;
4152 if (l2_dom_bb == NULL)
4153 l2_dom_bb = entry_bb;
4154 entry_bb = e->dest;
4155 *gsi = gsi_last_bb (entry_bb);
4156 }
4157
4158 if (POINTER_TYPE_P (itype))
4159 itype = signed_type_for (itype);
4160 t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR
4161 ? -1 : 1));
4162 t = fold_build2 (PLUS_EXPR, itype,
4163 fold_convert (itype, fd->loops[i].step), t);
4164 t = fold_build2 (PLUS_EXPR, itype, t,
4165 fold_convert (itype, fd->loops[i].n2));
4166 t = fold_build2 (MINUS_EXPR, itype, t,
4167 fold_convert (itype, fd->loops[i].n1));
4168 /* ?? We could probably use CEIL_DIV_EXPR instead of
4169 TRUNC_DIV_EXPR and adjusting by hand. Unless we can't
4170 generate the same code in the end because generically we
4171 don't know that the values involved must be negative for
4172 GT?? */
4173 if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
4174 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4175 fold_build1 (NEGATE_EXPR, itype, t),
4176 fold_build1 (NEGATE_EXPR, itype,
4177 fold_convert (itype,
4178 fd->loops[i].step)));
4179 else
4180 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
4181 fold_convert (itype, fd->loops[i].step));
4182 t = fold_convert (type, t);
4183 if (TREE_CODE (t) == INTEGER_CST)
4184 counts[i] = t;
4185 else
4186 {
4187 counts[i] = create_tmp_reg (type, ".count");
4188 expand_omp_build_assign (gsi, counts[i], t);
4189 }
4190 if (SSA_VAR_P (fd->loop.n2))
4191 {
4192 if (i == 0)
4193 t = counts[0];
4194 else
4195 t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]);
4196 expand_omp_build_assign (gsi, fd->loop.n2, t);
4197 }
4198 }
4199 }
4200
4201
4202 /* Helper function for expand_omp_{for_*,simd}. Generate code like:
4203 T = V;
4204 V3 = N31 + (T % count3) * STEP3;
4205 T = T / count3;
4206 V2 = N21 + (T % count2) * STEP2;
4207 T = T / count2;
4208 V1 = N11 + T * STEP1;
4209 if this loop doesn't have an inner loop construct combined with it. */
4210
4211 static void
4212 expand_omp_for_init_vars (struct omp_for_data *fd, gimple_stmt_iterator *gsi,
4213 tree *counts, tree startvar)
4214 {
4215 int i;
4216 tree type = TREE_TYPE (fd->loop.v);
4217 tree tem = create_tmp_reg (type, ".tem");
4218 gimple stmt = gimple_build_assign (tem, startvar);
4219 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
4220
4221 for (i = fd->collapse - 1; i >= 0; i--)
4222 {
4223 tree vtype = TREE_TYPE (fd->loops[i].v), itype, t;
4224 itype = vtype;
4225 if (POINTER_TYPE_P (vtype))
4226 itype = signed_type_for (vtype);
4227 if (i != 0)
4228 t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]);
4229 else
4230 t = tem;
4231 t = fold_convert (itype, t);
4232 t = fold_build2 (MULT_EXPR, itype, t,
4233 fold_convert (itype, fd->loops[i].step));
4234 if (POINTER_TYPE_P (vtype))
4235 t = fold_build_pointer_plus (fd->loops[i].n1, t);
4236 else
4237 t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t);
4238 t = force_gimple_operand_gsi (gsi, t,
4239 DECL_P (fd->loops[i].v)
4240 && TREE_ADDRESSABLE (fd->loops[i].v),
4241 NULL_TREE, false,
4242 GSI_CONTINUE_LINKING);
4243 stmt = gimple_build_assign (fd->loops[i].v, t);
4244 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
4245 if (i != 0)
4246 {
4247 t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]);
4248 t = force_gimple_operand_gsi (gsi, t, false, NULL_TREE,
4249 false, GSI_CONTINUE_LINKING);
4250 stmt = gimple_build_assign (tem, t);
4251 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
4252 }
4253 }
4254 }
4255
4256
4257 /* Helper function for expand_omp_for_*. Generate code like:
4258 L10:
4259 V3 += STEP3;
4260 if (V3 cond3 N32) goto BODY_BB; else goto L11;
4261 L11:
4262 V3 = N31;
4263 V2 += STEP2;
4264 if (V2 cond2 N22) goto BODY_BB; else goto L12;
4265 L12:
4266 V2 = N21;
4267 V1 += STEP1;
4268 goto BODY_BB; */
4269
4270 static basic_block
4271 extract_omp_for_update_vars (struct omp_for_data *fd, basic_block cont_bb,
4272 basic_block body_bb)
4273 {
4274 basic_block last_bb, bb, collapse_bb = NULL;
4275 int i;
4276 gimple_stmt_iterator gsi;
4277 edge e;
4278 tree t;
4279 gimple stmt;
4280
4281 last_bb = cont_bb;
4282 for (i = fd->collapse - 1; i >= 0; i--)
4283 {
4284 tree vtype = TREE_TYPE (fd->loops[i].v);
4285
4286 bb = create_empty_bb (last_bb);
4287 if (current_loops)
4288 add_bb_to_loop (bb, last_bb->loop_father);
4289 gsi = gsi_start_bb (bb);
4290
4291 if (i < fd->collapse - 1)
4292 {
4293 e = make_edge (last_bb, bb, EDGE_FALSE_VALUE);
4294 e->probability = REG_BR_PROB_BASE / 8;
4295
4296 t = fd->loops[i + 1].n1;
4297 t = force_gimple_operand_gsi (&gsi, t,
4298 DECL_P (fd->loops[i + 1].v)
4299 && TREE_ADDRESSABLE (fd->loops[i
4300 + 1].v),
4301 NULL_TREE, false,
4302 GSI_CONTINUE_LINKING);
4303 stmt = gimple_build_assign (fd->loops[i + 1].v, t);
4304 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4305 }
4306 else
4307 collapse_bb = bb;
4308
4309 set_immediate_dominator (CDI_DOMINATORS, bb, last_bb);
4310
4311 if (POINTER_TYPE_P (vtype))
4312 t = fold_build_pointer_plus (fd->loops[i].v, fd->loops[i].step);
4313 else
4314 t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v, fd->loops[i].step);
4315 t = force_gimple_operand_gsi (&gsi, t,
4316 DECL_P (fd->loops[i].v)
4317 && TREE_ADDRESSABLE (fd->loops[i].v),
4318 NULL_TREE, false, GSI_CONTINUE_LINKING);
4319 stmt = gimple_build_assign (fd->loops[i].v, t);
4320 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4321
4322 if (i > 0)
4323 {
4324 t = fd->loops[i].n2;
4325 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4326 false, GSI_CONTINUE_LINKING);
4327 tree v = fd->loops[i].v;
4328 if (DECL_P (v) && TREE_ADDRESSABLE (v))
4329 v = force_gimple_operand_gsi (&gsi, v, true, NULL_TREE,
4330 false, GSI_CONTINUE_LINKING);
4331 t = fold_build2 (fd->loops[i].cond_code, boolean_type_node, v, t);
4332 stmt = gimple_build_cond_empty (t);
4333 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4334 e = make_edge (bb, body_bb, EDGE_TRUE_VALUE);
4335 e->probability = REG_BR_PROB_BASE * 7 / 8;
4336 }
4337 else
4338 make_edge (bb, body_bb, EDGE_FALLTHRU);
4339 last_bb = bb;
4340 }
4341
4342 return collapse_bb;
4343 }
4344
4345
4346 /* A subroutine of expand_omp_for. Generate code for a parallel
4347 loop with any schedule. Given parameters:
4348
4349 for (V = N1; V cond N2; V += STEP) BODY;
4350
4351 where COND is "<" or ">", we generate pseudocode
4352
4353 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
4354 if (more) goto L0; else goto L3;
4355 L0:
4356 V = istart0;
4357 iend = iend0;
4358 L1:
4359 BODY;
4360 V += STEP;
4361 if (V cond iend) goto L1; else goto L2;
4362 L2:
4363 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
4364 L3:
4365
4366 If this is a combined omp parallel loop, instead of the call to
4367 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
4368
4369 For collapsed loops, given parameters:
4370 collapse(3)
4371 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
4372 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
4373 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
4374 BODY;
4375
4376 we generate pseudocode
4377
4378 if (__builtin_expect (N32 cond3 N31, 0)) goto Z0;
4379 if (cond3 is <)
4380 adj = STEP3 - 1;
4381 else
4382 adj = STEP3 + 1;
4383 count3 = (adj + N32 - N31) / STEP3;
4384 if (__builtin_expect (N22 cond2 N21, 0)) goto Z0;
4385 if (cond2 is <)
4386 adj = STEP2 - 1;
4387 else
4388 adj = STEP2 + 1;
4389 count2 = (adj + N22 - N21) / STEP2;
4390 if (__builtin_expect (N12 cond1 N11, 0)) goto Z0;
4391 if (cond1 is <)
4392 adj = STEP1 - 1;
4393 else
4394 adj = STEP1 + 1;
4395 count1 = (adj + N12 - N11) / STEP1;
4396 count = count1 * count2 * count3;
4397 goto Z1;
4398 Z0:
4399 count = 0;
4400 Z1:
4401 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
4402 if (more) goto L0; else goto L3;
4403 L0:
4404 V = istart0;
4405 T = V;
4406 V3 = N31 + (T % count3) * STEP3;
4407 T = T / count3;
4408 V2 = N21 + (T % count2) * STEP2;
4409 T = T / count2;
4410 V1 = N11 + T * STEP1;
4411 iend = iend0;
4412 L1:
4413 BODY;
4414 V += 1;
4415 if (V < iend) goto L10; else goto L2;
4416 L10:
4417 V3 += STEP3;
4418 if (V3 cond3 N32) goto L1; else goto L11;
4419 L11:
4420 V3 = N31;
4421 V2 += STEP2;
4422 if (V2 cond2 N22) goto L1; else goto L12;
4423 L12:
4424 V2 = N21;
4425 V1 += STEP1;
4426 goto L1;
4427 L2:
4428 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
4429 L3:
4430
4431 */
4432
4433 static void
4434 expand_omp_for_generic (struct omp_region *region,
4435 struct omp_for_data *fd,
4436 enum built_in_function start_fn,
4437 enum built_in_function next_fn)
4438 {
4439 tree type, istart0, iend0, iend;
4440 tree t, vmain, vback, bias = NULL_TREE;
4441 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb;
4442 basic_block l2_bb = NULL, l3_bb = NULL;
4443 gimple_stmt_iterator gsi;
4444 gimple stmt;
4445 bool in_combined_parallel = is_combined_parallel (region);
4446 bool broken_loop = region->cont == NULL;
4447 edge e, ne;
4448 tree *counts = NULL;
4449 int i;
4450
4451 gcc_assert (!broken_loop || !in_combined_parallel);
4452 gcc_assert (fd->iter_type == long_integer_type_node
4453 || !in_combined_parallel);
4454
4455 type = TREE_TYPE (fd->loop.v);
4456 istart0 = create_tmp_var (fd->iter_type, ".istart0");
4457 iend0 = create_tmp_var (fd->iter_type, ".iend0");
4458 TREE_ADDRESSABLE (istart0) = 1;
4459 TREE_ADDRESSABLE (iend0) = 1;
4460
4461 /* See if we need to bias by LLONG_MIN. */
4462 if (fd->iter_type == long_long_unsigned_type_node
4463 && TREE_CODE (type) == INTEGER_TYPE
4464 && !TYPE_UNSIGNED (type))
4465 {
4466 tree n1, n2;
4467
4468 if (fd->loop.cond_code == LT_EXPR)
4469 {
4470 n1 = fd->loop.n1;
4471 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
4472 }
4473 else
4474 {
4475 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
4476 n2 = fd->loop.n1;
4477 }
4478 if (TREE_CODE (n1) != INTEGER_CST
4479 || TREE_CODE (n2) != INTEGER_CST
4480 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
4481 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
4482 }
4483
4484 entry_bb = region->entry;
4485 cont_bb = region->cont;
4486 collapse_bb = NULL;
4487 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
4488 gcc_assert (broken_loop
4489 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
4490 l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
4491 l1_bb = single_succ (l0_bb);
4492 if (!broken_loop)
4493 {
4494 l2_bb = create_empty_bb (cont_bb);
4495 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb);
4496 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4497 }
4498 else
4499 l2_bb = NULL;
4500 l3_bb = BRANCH_EDGE (entry_bb)->dest;
4501 exit_bb = region->exit;
4502
4503 gsi = gsi_last_bb (entry_bb);
4504
4505 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4506 if (fd->collapse > 1)
4507 {
4508 int first_zero_iter = -1;
4509 basic_block zero_iter_bb = NULL, l2_dom_bb = NULL;
4510
4511 counts = XALLOCAVEC (tree, fd->collapse);
4512 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
4513 zero_iter_bb, first_zero_iter,
4514 l2_dom_bb);
4515
4516 if (zero_iter_bb)
4517 {
4518 /* Some counts[i] vars might be uninitialized if
4519 some loop has zero iterations. But the body shouldn't
4520 be executed in that case, so just avoid uninit warnings. */
4521 for (i = first_zero_iter; i < fd->collapse; i++)
4522 if (SSA_VAR_P (counts[i]))
4523 TREE_NO_WARNING (counts[i]) = 1;
4524 gsi_prev (&gsi);
4525 e = split_block (entry_bb, gsi_stmt (gsi));
4526 entry_bb = e->dest;
4527 make_edge (zero_iter_bb, entry_bb, EDGE_FALLTHRU);
4528 gsi = gsi_last_bb (entry_bb);
4529 set_immediate_dominator (CDI_DOMINATORS, entry_bb,
4530 get_immediate_dominator (CDI_DOMINATORS,
4531 zero_iter_bb));
4532 }
4533 }
4534 if (in_combined_parallel)
4535 {
4536 /* In a combined parallel loop, emit a call to
4537 GOMP_loop_foo_next. */
4538 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
4539 build_fold_addr_expr (istart0),
4540 build_fold_addr_expr (iend0));
4541 }
4542 else
4543 {
4544 tree t0, t1, t2, t3, t4;
4545 /* If this is not a combined parallel loop, emit a call to
4546 GOMP_loop_foo_start in ENTRY_BB. */
4547 t4 = build_fold_addr_expr (iend0);
4548 t3 = build_fold_addr_expr (istart0);
4549 t2 = fold_convert (fd->iter_type, fd->loop.step);
4550 t1 = fd->loop.n2;
4551 t0 = fd->loop.n1;
4552 if (POINTER_TYPE_P (TREE_TYPE (t0))
4553 && TYPE_PRECISION (TREE_TYPE (t0))
4554 != TYPE_PRECISION (fd->iter_type))
4555 {
4556 /* Avoid casting pointers to integer of a different size. */
4557 tree itype = signed_type_for (type);
4558 t1 = fold_convert (fd->iter_type, fold_convert (itype, t1));
4559 t0 = fold_convert (fd->iter_type, fold_convert (itype, t0));
4560 }
4561 else
4562 {
4563 t1 = fold_convert (fd->iter_type, t1);
4564 t0 = fold_convert (fd->iter_type, t0);
4565 }
4566 if (bias)
4567 {
4568 t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
4569 t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
4570 }
4571 if (fd->iter_type == long_integer_type_node)
4572 {
4573 if (fd->chunk_size)
4574 {
4575 t = fold_convert (fd->iter_type, fd->chunk_size);
4576 t = build_call_expr (builtin_decl_explicit (start_fn),
4577 6, t0, t1, t2, t, t3, t4);
4578 }
4579 else
4580 t = build_call_expr (builtin_decl_explicit (start_fn),
4581 5, t0, t1, t2, t3, t4);
4582 }
4583 else
4584 {
4585 tree t5;
4586 tree c_bool_type;
4587 tree bfn_decl;
4588
4589 /* The GOMP_loop_ull_*start functions have additional boolean
4590 argument, true for < loops and false for > loops.
4591 In Fortran, the C bool type can be different from
4592 boolean_type_node. */
4593 bfn_decl = builtin_decl_explicit (start_fn);
4594 c_bool_type = TREE_TYPE (TREE_TYPE (bfn_decl));
4595 t5 = build_int_cst (c_bool_type,
4596 fd->loop.cond_code == LT_EXPR ? 1 : 0);
4597 if (fd->chunk_size)
4598 {
4599 tree bfn_decl = builtin_decl_explicit (start_fn);
4600 t = fold_convert (fd->iter_type, fd->chunk_size);
4601 t = build_call_expr (bfn_decl, 7, t5, t0, t1, t2, t, t3, t4);
4602 }
4603 else
4604 t = build_call_expr (builtin_decl_explicit (start_fn),
4605 6, t5, t0, t1, t2, t3, t4);
4606 }
4607 }
4608 if (TREE_TYPE (t) != boolean_type_node)
4609 t = fold_build2 (NE_EXPR, boolean_type_node,
4610 t, build_int_cst (TREE_TYPE (t), 0));
4611 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4612 true, GSI_SAME_STMT);
4613 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4614
4615 /* Remove the GIMPLE_OMP_FOR statement. */
4616 gsi_remove (&gsi, true);
4617
4618 /* Iteration setup for sequential loop goes in L0_BB. */
4619 tree startvar = fd->loop.v;
4620 tree endvar = NULL_TREE;
4621
4622 gsi = gsi_start_bb (l0_bb);
4623 t = istart0;
4624 if (bias)
4625 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
4626 if (POINTER_TYPE_P (TREE_TYPE (startvar)))
4627 t = fold_convert (signed_type_for (TREE_TYPE (startvar)), t);
4628 t = fold_convert (TREE_TYPE (startvar), t);
4629 t = force_gimple_operand_gsi (&gsi, t,
4630 DECL_P (startvar)
4631 && TREE_ADDRESSABLE (startvar),
4632 NULL_TREE, false, GSI_CONTINUE_LINKING);
4633 stmt = gimple_build_assign (startvar, t);
4634 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4635
4636 t = iend0;
4637 if (bias)
4638 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
4639 if (POINTER_TYPE_P (TREE_TYPE (startvar)))
4640 t = fold_convert (signed_type_for (TREE_TYPE (startvar)), t);
4641 t = fold_convert (TREE_TYPE (startvar), t);
4642 iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4643 false, GSI_CONTINUE_LINKING);
4644 if (endvar)
4645 {
4646 stmt = gimple_build_assign (endvar, iend);
4647 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4648 }
4649 if (fd->collapse > 1)
4650 expand_omp_for_init_vars (fd, &gsi, counts, startvar);
4651
4652 if (!broken_loop)
4653 {
4654 /* Code to control the increment and predicate for the sequential
4655 loop goes in the CONT_BB. */
4656 gsi = gsi_last_bb (cont_bb);
4657 stmt = gsi_stmt (gsi);
4658 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4659 vmain = gimple_omp_continue_control_use (stmt);
4660 vback = gimple_omp_continue_control_def (stmt);
4661
4662 /* OMP4 placeholder: if (!gimple_omp_for_combined_p (fd->for_stmt)). */
4663 if (1)
4664 {
4665 if (POINTER_TYPE_P (type))
4666 t = fold_build_pointer_plus (vmain, fd->loop.step);
4667 else
4668 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
4669 t = force_gimple_operand_gsi (&gsi, t,
4670 DECL_P (vback)
4671 && TREE_ADDRESSABLE (vback),
4672 NULL_TREE, true, GSI_SAME_STMT);
4673 stmt = gimple_build_assign (vback, t);
4674 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4675
4676 t = build2 (fd->loop.cond_code, boolean_type_node,
4677 DECL_P (vback) && TREE_ADDRESSABLE (vback) ? t : vback,
4678 iend);
4679 stmt = gimple_build_cond_empty (t);
4680 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4681 }
4682
4683 /* Remove GIMPLE_OMP_CONTINUE. */
4684 gsi_remove (&gsi, true);
4685
4686 if (fd->collapse > 1)
4687 collapse_bb = extract_omp_for_update_vars (fd, cont_bb, l1_bb);
4688
4689 /* Emit code to get the next parallel iteration in L2_BB. */
4690 gsi = gsi_start_bb (l2_bb);
4691
4692 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
4693 build_fold_addr_expr (istart0),
4694 build_fold_addr_expr (iend0));
4695 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4696 false, GSI_CONTINUE_LINKING);
4697 if (TREE_TYPE (t) != boolean_type_node)
4698 t = fold_build2 (NE_EXPR, boolean_type_node,
4699 t, build_int_cst (TREE_TYPE (t), 0));
4700 stmt = gimple_build_cond_empty (t);
4701 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4702 }
4703
4704 /* Add the loop cleanup function. */
4705 gsi = gsi_last_bb (exit_bb);
4706 if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4707 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_NOWAIT);
4708 else
4709 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END);
4710 stmt = gimple_build_call (t, 0);
4711 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
4712 gsi_remove (&gsi, true);
4713
4714 /* Connect the new blocks. */
4715 find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
4716 find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
4717
4718 if (!broken_loop)
4719 {
4720 gimple_seq phis;
4721
4722 e = find_edge (cont_bb, l3_bb);
4723 ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
4724
4725 phis = phi_nodes (l3_bb);
4726 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
4727 {
4728 gimple phi = gsi_stmt (gsi);
4729 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
4730 PHI_ARG_DEF_FROM_EDGE (phi, e));
4731 }
4732 remove_edge (e);
4733
4734 make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
4735 if (current_loops)
4736 add_bb_to_loop (l2_bb, cont_bb->loop_father);
4737 e = find_edge (cont_bb, l1_bb);
4738 /* OMP4 placeholder for gimple_omp_for_combined_p (fd->for_stmt). */
4739 if (0)
4740 ;
4741 else if (fd->collapse > 1)
4742 {
4743 remove_edge (e);
4744 e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
4745 }
4746 else
4747 e->flags = EDGE_TRUE_VALUE;
4748 if (e)
4749 {
4750 e->probability = REG_BR_PROB_BASE * 7 / 8;
4751 find_edge (cont_bb, l2_bb)->probability = REG_BR_PROB_BASE / 8;
4752 }
4753 else
4754 {
4755 e = find_edge (cont_bb, l2_bb);
4756 e->flags = EDGE_FALLTHRU;
4757 }
4758 make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
4759
4760 set_immediate_dominator (CDI_DOMINATORS, l2_bb,
4761 recompute_dominator (CDI_DOMINATORS, l2_bb));
4762 set_immediate_dominator (CDI_DOMINATORS, l3_bb,
4763 recompute_dominator (CDI_DOMINATORS, l3_bb));
4764 set_immediate_dominator (CDI_DOMINATORS, l0_bb,
4765 recompute_dominator (CDI_DOMINATORS, l0_bb));
4766 set_immediate_dominator (CDI_DOMINATORS, l1_bb,
4767 recompute_dominator (CDI_DOMINATORS, l1_bb));
4768
4769 struct loop *outer_loop = alloc_loop ();
4770 outer_loop->header = l0_bb;
4771 outer_loop->latch = l2_bb;
4772 add_loop (outer_loop, l0_bb->loop_father);
4773
4774 /* OMP4 placeholder: if (!gimple_omp_for_combined_p (fd->for_stmt)). */
4775 if (1)
4776 {
4777 struct loop *loop = alloc_loop ();
4778 loop->header = l1_bb;
4779 /* The loop may have multiple latches. */
4780 add_loop (loop, outer_loop);
4781 }
4782 }
4783 }
4784
4785
4786 /* A subroutine of expand_omp_for. Generate code for a parallel
4787 loop with static schedule and no specified chunk size. Given
4788 parameters:
4789
4790 for (V = N1; V cond N2; V += STEP) BODY;
4791
4792 where COND is "<" or ">", we generate pseudocode
4793
4794 if ((__typeof (V)) -1 > 0 && N2 cond N1) goto L2;
4795 if (cond is <)
4796 adj = STEP - 1;
4797 else
4798 adj = STEP + 1;
4799 if ((__typeof (V)) -1 > 0 && cond is >)
4800 n = -(adj + N2 - N1) / -STEP;
4801 else
4802 n = (adj + N2 - N1) / STEP;
4803 q = n / nthreads;
4804 tt = n % nthreads;
4805 if (threadid < tt) goto L3; else goto L4;
4806 L3:
4807 tt = 0;
4808 q = q + 1;
4809 L4:
4810 s0 = q * threadid + tt;
4811 e0 = s0 + q;
4812 V = s0 * STEP + N1;
4813 if (s0 >= e0) goto L2; else goto L0;
4814 L0:
4815 e = e0 * STEP + N1;
4816 L1:
4817 BODY;
4818 V += STEP;
4819 if (V cond e) goto L1;
4820 L2:
4821 */
4822
4823 static void
4824 expand_omp_for_static_nochunk (struct omp_region *region,
4825 struct omp_for_data *fd)
4826 {
4827 tree n, q, s0, e0, e, t, tt, nthreads, threadid;
4828 tree type, itype, vmain, vback;
4829 basic_block entry_bb, second_bb, third_bb, exit_bb, seq_start_bb;
4830 basic_block body_bb, cont_bb;
4831 basic_block fin_bb;
4832 gimple_stmt_iterator gsi;
4833 gimple stmt;
4834 edge ep;
4835
4836 itype = type = TREE_TYPE (fd->loop.v);
4837 if (POINTER_TYPE_P (type))
4838 itype = signed_type_for (type);
4839
4840 entry_bb = region->entry;
4841 cont_bb = region->cont;
4842 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
4843 gcc_assert (BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
4844 seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
4845 body_bb = single_succ (seq_start_bb);
4846 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4847 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4848 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4849 exit_bb = region->exit;
4850
4851 /* Iteration space partitioning goes in ENTRY_BB. */
4852 gsi = gsi_last_bb (entry_bb);
4853 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4854
4855 t = fold_binary (fd->loop.cond_code, boolean_type_node,
4856 fold_convert (type, fd->loop.n1),
4857 fold_convert (type, fd->loop.n2));
4858 if (TYPE_UNSIGNED (type)
4859 && (t == NULL_TREE || !integer_onep (t)))
4860 {
4861 tree n1, n2;
4862 n1 = fold_convert (type, unshare_expr (fd->loop.n1));
4863 n1 = force_gimple_operand_gsi (&gsi, n1, true, NULL_TREE,
4864 true, GSI_SAME_STMT);
4865 n2 = fold_convert (type, unshare_expr (fd->loop.n2));
4866 n2 = force_gimple_operand_gsi (&gsi, n2, true, NULL_TREE,
4867 true, GSI_SAME_STMT);
4868 stmt = gimple_build_cond (fd->loop.cond_code, n1, n2,
4869 NULL_TREE, NULL_TREE);
4870 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4871 if (walk_tree (gimple_cond_lhs_ptr (stmt),
4872 expand_omp_regimplify_p, NULL, NULL)
4873 || walk_tree (gimple_cond_rhs_ptr (stmt),
4874 expand_omp_regimplify_p, NULL, NULL))
4875 {
4876 gsi = gsi_for_stmt (stmt);
4877 gimple_regimplify_operands (stmt, &gsi);
4878 }
4879 ep = split_block (entry_bb, stmt);
4880 ep->flags = EDGE_TRUE_VALUE;
4881 entry_bb = ep->dest;
4882 ep->probability = REG_BR_PROB_BASE - (REG_BR_PROB_BASE / 2000 - 1);
4883 ep = make_edge (ep->src, fin_bb, EDGE_FALSE_VALUE);
4884 ep->probability = REG_BR_PROB_BASE / 2000 - 1;
4885 if (gimple_in_ssa_p (cfun))
4886 {
4887 int dest_idx = find_edge (entry_bb, fin_bb)->dest_idx;
4888 for (gsi = gsi_start_phis (fin_bb);
4889 !gsi_end_p (gsi); gsi_next (&gsi))
4890 {
4891 gimple phi = gsi_stmt (gsi);
4892 add_phi_arg (phi, gimple_phi_arg_def (phi, dest_idx),
4893 ep, UNKNOWN_LOCATION);
4894 }
4895 }
4896 gsi = gsi_last_bb (entry_bb);
4897 }
4898
4899 t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS), 0);
4900 t = fold_convert (itype, t);
4901 nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4902 true, GSI_SAME_STMT);
4903
4904 t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM), 0);
4905 t = fold_convert (itype, t);
4906 threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4907 true, GSI_SAME_STMT);
4908
4909 fd->loop.n1
4910 = force_gimple_operand_gsi (&gsi, fold_convert (type, fd->loop.n1),
4911 true, NULL_TREE, true, GSI_SAME_STMT);
4912 fd->loop.n2
4913 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.n2),
4914 true, NULL_TREE, true, GSI_SAME_STMT);
4915 fd->loop.step
4916 = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.step),
4917 true, NULL_TREE, true, GSI_SAME_STMT);
4918
4919 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4920 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4921 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4922 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4923 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4924 t = fold_build2 (TRUNC_DIV_EXPR, itype,
4925 fold_build1 (NEGATE_EXPR, itype, t),
4926 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4927 else
4928 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4929 t = fold_convert (itype, t);
4930 n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4931
4932 q = create_tmp_reg (itype, "q");
4933 t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads);
4934 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
4935 gsi_insert_before (&gsi, gimple_build_assign (q, t), GSI_SAME_STMT);
4936
4937 tt = create_tmp_reg (itype, "tt");
4938 t = fold_build2 (TRUNC_MOD_EXPR, itype, n, nthreads);
4939 t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
4940 gsi_insert_before (&gsi, gimple_build_assign (tt, t), GSI_SAME_STMT);
4941
4942 t = build2 (LT_EXPR, boolean_type_node, threadid, tt);
4943 stmt = gimple_build_cond_empty (t);
4944 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4945
4946 second_bb = split_block (entry_bb, stmt)->dest;
4947 gsi = gsi_last_bb (second_bb);
4948 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4949
4950 gsi_insert_before (&gsi, gimple_build_assign (tt, build_int_cst (itype, 0)),
4951 GSI_SAME_STMT);
4952 stmt = gimple_build_assign_with_ops (PLUS_EXPR, q, q,
4953 build_int_cst (itype, 1));
4954 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4955
4956 third_bb = split_block (second_bb, stmt)->dest;
4957 gsi = gsi_last_bb (third_bb);
4958 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4959
4960 t = build2 (MULT_EXPR, itype, q, threadid);
4961 t = build2 (PLUS_EXPR, itype, t, tt);
4962 s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4963
4964 t = fold_build2 (PLUS_EXPR, itype, s0, q);
4965 e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4966
4967 t = build2 (GE_EXPR, boolean_type_node, s0, e0);
4968 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4969
4970 /* Remove the GIMPLE_OMP_FOR statement. */
4971 gsi_remove (&gsi, true);
4972
4973 /* Setup code for sequential iteration goes in SEQ_START_BB. */
4974 gsi = gsi_start_bb (seq_start_bb);
4975
4976 t = fold_convert (itype, s0);
4977 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4978 if (POINTER_TYPE_P (type))
4979 t = fold_build_pointer_plus (fd->loop.n1, t);
4980 else
4981 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4982 t = force_gimple_operand_gsi (&gsi, t,
4983 DECL_P (fd->loop.v)
4984 && TREE_ADDRESSABLE (fd->loop.v),
4985 NULL_TREE, false, GSI_CONTINUE_LINKING);
4986 stmt = gimple_build_assign (fd->loop.v, t);
4987 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4988
4989 t = fold_convert (itype, e0);
4990 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4991 if (POINTER_TYPE_P (type))
4992 t = fold_build_pointer_plus (fd->loop.n1, t);
4993 else
4994 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4995 e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4996 false, GSI_CONTINUE_LINKING);
4997
4998 /* The code controlling the sequential loop replaces the
4999 GIMPLE_OMP_CONTINUE. */
5000 gsi = gsi_last_bb (cont_bb);
5001 stmt = gsi_stmt (gsi);
5002 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
5003 vmain = gimple_omp_continue_control_use (stmt);
5004 vback = gimple_omp_continue_control_def (stmt);
5005
5006 if (POINTER_TYPE_P (type))
5007 t = fold_build_pointer_plus (vmain, fd->loop.step);
5008 else
5009 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
5010 t = force_gimple_operand_gsi (&gsi, t,
5011 DECL_P (vback) && TREE_ADDRESSABLE (vback),
5012 NULL_TREE, true, GSI_SAME_STMT);
5013 stmt = gimple_build_assign (vback, t);
5014 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
5015
5016 t = build2 (fd->loop.cond_code, boolean_type_node,
5017 DECL_P (vback) && TREE_ADDRESSABLE (vback) ? t : vback, e);
5018 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
5019
5020 /* Remove the GIMPLE_OMP_CONTINUE statement. */
5021 gsi_remove (&gsi, true);
5022
5023 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
5024 gsi = gsi_last_bb (exit_bb);
5025 if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
5026 force_gimple_operand_gsi (&gsi, build_omp_barrier (), false, NULL_TREE,
5027 false, GSI_SAME_STMT);
5028 gsi_remove (&gsi, true);
5029
5030 /* Connect all the blocks. */
5031 ep = make_edge (entry_bb, third_bb, EDGE_FALSE_VALUE);
5032 ep->probability = REG_BR_PROB_BASE / 4 * 3;
5033 ep = find_edge (entry_bb, second_bb);
5034 ep->flags = EDGE_TRUE_VALUE;
5035 ep->probability = REG_BR_PROB_BASE / 4;
5036 find_edge (third_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
5037 find_edge (third_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
5038
5039 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
5040 find_edge (cont_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
5041
5042 set_immediate_dominator (CDI_DOMINATORS, second_bb, entry_bb);
5043 set_immediate_dominator (CDI_DOMINATORS, third_bb, entry_bb);
5044 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, third_bb);
5045 set_immediate_dominator (CDI_DOMINATORS, body_bb,
5046 recompute_dominator (CDI_DOMINATORS, body_bb));
5047 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
5048 recompute_dominator (CDI_DOMINATORS, fin_bb));
5049
5050 struct loop *loop = alloc_loop ();
5051 loop->header = body_bb;
5052 loop->latch = cont_bb;
5053 add_loop (loop, body_bb->loop_father);
5054 }
5055
5056
5057 /* A subroutine of expand_omp_for. Generate code for a parallel
5058 loop with static schedule and a specified chunk size. Given
5059 parameters:
5060
5061 for (V = N1; V cond N2; V += STEP) BODY;
5062
5063 where COND is "<" or ">", we generate pseudocode
5064
5065 if ((__typeof (V)) -1 > 0 && N2 cond N1) goto L2;
5066 if (cond is <)
5067 adj = STEP - 1;
5068 else
5069 adj = STEP + 1;
5070 if ((__typeof (V)) -1 > 0 && cond is >)
5071 n = -(adj + N2 - N1) / -STEP;
5072 else
5073 n = (adj + N2 - N1) / STEP;
5074 trip = 0;
5075 V = threadid * CHUNK * STEP + N1; -- this extra definition of V is
5076 here so that V is defined
5077 if the loop is not entered
5078 L0:
5079 s0 = (trip * nthreads + threadid) * CHUNK;
5080 e0 = min(s0 + CHUNK, n);
5081 if (s0 < n) goto L1; else goto L4;
5082 L1:
5083 V = s0 * STEP + N1;
5084 e = e0 * STEP + N1;
5085 L2:
5086 BODY;
5087 V += STEP;
5088 if (V cond e) goto L2; else goto L3;
5089 L3:
5090 trip += 1;
5091 goto L0;
5092 L4:
5093 */
5094
5095 static void
5096 expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
5097 {
5098 tree n, s0, e0, e, t;
5099 tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
5100 tree type, itype, v_main, v_back, v_extra;
5101 basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
5102 basic_block trip_update_bb, cont_bb, fin_bb;
5103 gimple_stmt_iterator si;
5104 gimple stmt;
5105 edge se;
5106
5107 itype = type = TREE_TYPE (fd->loop.v);
5108 if (POINTER_TYPE_P (type))
5109 itype = signed_type_for (type);
5110
5111 entry_bb = region->entry;
5112 se = split_block (entry_bb, last_stmt (entry_bb));
5113 entry_bb = se->src;
5114 iter_part_bb = se->dest;
5115 cont_bb = region->cont;
5116 gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
5117 gcc_assert (BRANCH_EDGE (iter_part_bb)->dest
5118 == FALLTHRU_EDGE (cont_bb)->dest);
5119 seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
5120 body_bb = single_succ (seq_start_bb);
5121 gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
5122 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
5123 fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
5124 trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
5125 exit_bb = region->exit;
5126
5127 /* Trip and adjustment setup goes in ENTRY_BB. */
5128 si = gsi_last_bb (entry_bb);
5129 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_FOR);
5130
5131 t = fold_binary (fd->loop.cond_code, boolean_type_node,
5132 fold_convert (type, fd->loop.n1),
5133 fold_convert (type, fd->loop.n2));
5134 if (TYPE_UNSIGNED (type)
5135 && (t == NULL_TREE || !integer_onep (t)))
5136 {
5137 tree n1, n2;
5138 n1 = fold_convert (type, unshare_expr (fd->loop.n1));
5139 n1 = force_gimple_operand_gsi (&si, n1, true, NULL_TREE,
5140 true, GSI_SAME_STMT);
5141 n2 = fold_convert (type, unshare_expr (fd->loop.n2));
5142 n2 = force_gimple_operand_gsi (&si, n2, true, NULL_TREE,
5143 true, GSI_SAME_STMT);
5144 stmt = gimple_build_cond (fd->loop.cond_code, n1, n2,
5145 NULL_TREE, NULL_TREE);
5146 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5147 if (walk_tree (gimple_cond_lhs_ptr (stmt),
5148 expand_omp_regimplify_p, NULL, NULL)
5149 || walk_tree (gimple_cond_rhs_ptr (stmt),
5150 expand_omp_regimplify_p, NULL, NULL))
5151 {
5152 si = gsi_for_stmt (stmt);
5153 gimple_regimplify_operands (stmt, &si);
5154 }
5155 se = split_block (entry_bb, stmt);
5156 se->flags = EDGE_TRUE_VALUE;
5157 entry_bb = se->dest;
5158 se->probability = REG_BR_PROB_BASE - (REG_BR_PROB_BASE / 2000 - 1);
5159 se = make_edge (se->src, fin_bb, EDGE_FALSE_VALUE);
5160 se->probability = REG_BR_PROB_BASE / 2000 - 1;
5161 if (gimple_in_ssa_p (cfun))
5162 {
5163 int dest_idx = find_edge (entry_bb, fin_bb)->dest_idx;
5164 for (si = gsi_start_phis (fin_bb);
5165 !gsi_end_p (si); gsi_next (&si))
5166 {
5167 gimple phi = gsi_stmt (si);
5168 add_phi_arg (phi, gimple_phi_arg_def (phi, dest_idx),
5169 se, UNKNOWN_LOCATION);
5170 }
5171 }
5172 si = gsi_last_bb (entry_bb);
5173 }
5174
5175 t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS), 0);
5176 t = fold_convert (itype, t);
5177 nthreads = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
5178 true, GSI_SAME_STMT);
5179
5180 t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM), 0);
5181 t = fold_convert (itype, t);
5182 threadid = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
5183 true, GSI_SAME_STMT);
5184
5185 fd->loop.n1
5186 = force_gimple_operand_gsi (&si, fold_convert (type, fd->loop.n1),
5187 true, NULL_TREE, true, GSI_SAME_STMT);
5188 fd->loop.n2
5189 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.n2),
5190 true, NULL_TREE, true, GSI_SAME_STMT);
5191 fd->loop.step
5192 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.step),
5193 true, NULL_TREE, true, GSI_SAME_STMT);
5194 fd->chunk_size
5195 = force_gimple_operand_gsi (&si, fold_convert (itype, fd->chunk_size),
5196 true, NULL_TREE, true, GSI_SAME_STMT);
5197
5198 t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
5199 t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
5200 t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
5201 t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
5202 if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
5203 t = fold_build2 (TRUNC_DIV_EXPR, itype,
5204 fold_build1 (NEGATE_EXPR, itype, t),
5205 fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
5206 else
5207 t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
5208 t = fold_convert (itype, t);
5209 n = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
5210 true, GSI_SAME_STMT);
5211
5212 trip_var = create_tmp_reg (itype, ".trip");
5213 if (gimple_in_ssa_p (cfun))
5214 {
5215 trip_init = make_ssa_name (trip_var, NULL);
5216 trip_main = make_ssa_name (trip_var, NULL);
5217 trip_back = make_ssa_name (trip_var, NULL);
5218 }
5219 else
5220 {
5221 trip_init = trip_var;
5222 trip_main = trip_var;
5223 trip_back = trip_var;
5224 }
5225
5226 stmt = gimple_build_assign (trip_init, build_int_cst (itype, 0));
5227 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5228
5229 t = fold_build2 (MULT_EXPR, itype, threadid, fd->chunk_size);
5230 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
5231 if (POINTER_TYPE_P (type))
5232 t = fold_build_pointer_plus (fd->loop.n1, t);
5233 else
5234 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
5235 v_extra = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
5236 true, GSI_SAME_STMT);
5237
5238 /* Remove the GIMPLE_OMP_FOR. */
5239 gsi_remove (&si, true);
5240
5241 /* Iteration space partitioning goes in ITER_PART_BB. */
5242 si = gsi_last_bb (iter_part_bb);
5243
5244 t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads);
5245 t = fold_build2 (PLUS_EXPR, itype, t, threadid);
5246 t = fold_build2 (MULT_EXPR, itype, t, fd->chunk_size);
5247 s0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
5248 false, GSI_CONTINUE_LINKING);
5249
5250 t = fold_build2 (PLUS_EXPR, itype, s0, fd->chunk_size);
5251 t = fold_build2 (MIN_EXPR, itype, t, n);
5252 e0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
5253 false, GSI_CONTINUE_LINKING);
5254
5255 t = build2 (LT_EXPR, boolean_type_node, s0, n);
5256 gsi_insert_after (&si, gimple_build_cond_empty (t), GSI_CONTINUE_LINKING);
5257
5258 /* Setup code for sequential iteration goes in SEQ_START_BB. */
5259 si = gsi_start_bb (seq_start_bb);
5260
5261 t = fold_convert (itype, s0);
5262 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
5263 if (POINTER_TYPE_P (type))
5264 t = fold_build_pointer_plus (fd->loop.n1, t);
5265 else
5266 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
5267 t = force_gimple_operand_gsi (&si, t,
5268 DECL_P (fd->loop.v)
5269 && TREE_ADDRESSABLE (fd->loop.v),
5270 NULL_TREE, false, GSI_CONTINUE_LINKING);
5271 stmt = gimple_build_assign (fd->loop.v, t);
5272 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
5273
5274 t = fold_convert (itype, e0);
5275 t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
5276 if (POINTER_TYPE_P (type))
5277 t = fold_build_pointer_plus (fd->loop.n1, t);
5278 else
5279 t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
5280 e = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
5281 false, GSI_CONTINUE_LINKING);
5282
5283 /* The code controlling the sequential loop goes in CONT_BB,
5284 replacing the GIMPLE_OMP_CONTINUE. */
5285 si = gsi_last_bb (cont_bb);
5286 stmt = gsi_stmt (si);
5287 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
5288 v_main = gimple_omp_continue_control_use (stmt);
5289 v_back = gimple_omp_continue_control_def (stmt);
5290
5291 if (POINTER_TYPE_P (type))
5292 t = fold_build_pointer_plus (v_main, fd->loop.step);
5293 else
5294 t = fold_build2 (PLUS_EXPR, type, v_main, fd->loop.step);
5295 if (DECL_P (v_back) && TREE_ADDRESSABLE (v_back))
5296 t = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
5297 true, GSI_SAME_STMT);
5298 stmt = gimple_build_assign (v_back, t);
5299 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5300
5301 t = build2 (fd->loop.cond_code, boolean_type_node,
5302 DECL_P (v_back) && TREE_ADDRESSABLE (v_back)
5303 ? t : v_back, e);
5304 gsi_insert_before (&si, gimple_build_cond_empty (t), GSI_SAME_STMT);
5305
5306 /* Remove GIMPLE_OMP_CONTINUE. */
5307 gsi_remove (&si, true);
5308
5309 /* Trip update code goes into TRIP_UPDATE_BB. */
5310 si = gsi_start_bb (trip_update_bb);
5311
5312 t = build_int_cst (itype, 1);
5313 t = build2 (PLUS_EXPR, itype, trip_main, t);
5314 stmt = gimple_build_assign (trip_back, t);
5315 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
5316
5317 /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing. */
5318 si = gsi_last_bb (exit_bb);
5319 if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
5320 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
5321 false, GSI_SAME_STMT);
5322 gsi_remove (&si, true);
5323
5324 /* Connect the new blocks. */
5325 find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
5326 find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
5327
5328 find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
5329 find_edge (cont_bb, trip_update_bb)->flags = EDGE_FALSE_VALUE;
5330
5331 redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
5332
5333 if (gimple_in_ssa_p (cfun))
5334 {
5335 gimple_stmt_iterator psi;
5336 gimple phi;
5337 edge re, ene;
5338 edge_var_map_vector *head;
5339 edge_var_map *vm;
5340 size_t i;
5341
5342 /* When we redirect the edge from trip_update_bb to iter_part_bb, we
5343 remove arguments of the phi nodes in fin_bb. We need to create
5344 appropriate phi nodes in iter_part_bb instead. */
5345 se = single_pred_edge (fin_bb);
5346 re = single_succ_edge (trip_update_bb);
5347 head = redirect_edge_var_map_vector (re);
5348 ene = single_succ_edge (entry_bb);
5349
5350 psi = gsi_start_phis (fin_bb);
5351 for (i = 0; !gsi_end_p (psi) && head->iterate (i, &vm);
5352 gsi_next (&psi), ++i)
5353 {
5354 gimple nphi;
5355 source_location locus;
5356
5357 phi = gsi_stmt (psi);
5358 t = gimple_phi_result (phi);
5359 gcc_assert (t == redirect_edge_var_map_result (vm));
5360 nphi = create_phi_node (t, iter_part_bb);
5361
5362 t = PHI_ARG_DEF_FROM_EDGE (phi, se);
5363 locus = gimple_phi_arg_location_from_edge (phi, se);
5364
5365 /* A special case -- fd->loop.v is not yet computed in
5366 iter_part_bb, we need to use v_extra instead. */
5367 if (t == fd->loop.v)
5368 t = v_extra;
5369 add_phi_arg (nphi, t, ene, locus);
5370 locus = redirect_edge_var_map_location (vm);
5371 add_phi_arg (nphi, redirect_edge_var_map_def (vm), re, locus);
5372 }
5373 gcc_assert (!gsi_end_p (psi) && i == head->length ());
5374 redirect_edge_var_map_clear (re);
5375 while (1)
5376 {
5377 psi = gsi_start_phis (fin_bb);
5378 if (gsi_end_p (psi))
5379 break;
5380 remove_phi_node (&psi, false);
5381 }
5382
5383 /* Make phi node for trip. */
5384 phi = create_phi_node (trip_main, iter_part_bb);
5385 add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb),
5386 UNKNOWN_LOCATION);
5387 add_phi_arg (phi, trip_init, single_succ_edge (entry_bb),
5388 UNKNOWN_LOCATION);
5389 }
5390
5391 set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
5392 set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
5393 recompute_dominator (CDI_DOMINATORS, iter_part_bb));
5394 set_immediate_dominator (CDI_DOMINATORS, fin_bb,
5395 recompute_dominator (CDI_DOMINATORS, fin_bb));
5396 set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
5397 recompute_dominator (CDI_DOMINATORS, seq_start_bb));
5398 set_immediate_dominator (CDI_DOMINATORS, body_bb,
5399 recompute_dominator (CDI_DOMINATORS, body_bb));
5400
5401 struct loop *trip_loop = alloc_loop ();
5402 trip_loop->header = iter_part_bb;
5403 trip_loop->latch = trip_update_bb;
5404 add_loop (trip_loop, iter_part_bb->loop_father);
5405
5406 struct loop *loop = alloc_loop ();
5407 loop->header = body_bb;
5408 loop->latch = cont_bb;
5409 add_loop (loop, trip_loop);
5410 }
5411
5412 /* A subroutine of expand_omp_for. Generate code for a simd non-worksharing
5413 loop. Given parameters:
5414
5415 for (V = N1; V cond N2; V += STEP) BODY;
5416
5417 where COND is "<" or ">", we generate pseudocode
5418
5419 V = N1;
5420 goto L1;
5421 L0:
5422 BODY;
5423 V += STEP;
5424 L1:
5425 if (V cond N2) goto L0; else goto L2;
5426 L2:
5427
5428 For collapsed loops, given parameters:
5429 collapse(3)
5430 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
5431 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
5432 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
5433 BODY;
5434
5435 we generate pseudocode
5436
5437 if (cond3 is <)
5438 adj = STEP3 - 1;
5439 else
5440 adj = STEP3 + 1;
5441 count3 = (adj + N32 - N31) / STEP3;
5442 if (cond2 is <)
5443 adj = STEP2 - 1;
5444 else
5445 adj = STEP2 + 1;
5446 count2 = (adj + N22 - N21) / STEP2;
5447 if (cond1 is <)
5448 adj = STEP1 - 1;
5449 else
5450 adj = STEP1 + 1;
5451 count1 = (adj + N12 - N11) / STEP1;
5452 count = count1 * count2 * count3;
5453 V = 0;
5454 V1 = N11;
5455 V2 = N21;
5456 V3 = N31;
5457 goto L1;
5458 L0:
5459 BODY;
5460 V += 1;
5461 V3 += STEP3;
5462 V2 += (V3 cond3 N32) ? 0 : STEP2;
5463 V3 = (V3 cond3 N32) ? V3 : N31;
5464 V1 += (V2 cond2 N22) ? 0 : STEP1;
5465 V2 = (V2 cond2 N22) ? V2 : N21;
5466 L1:
5467 if (V < count) goto L0; else goto L2;
5468 L2:
5469
5470 */
5471
5472 static void
5473 expand_omp_simd (struct omp_region *region, struct omp_for_data *fd)
5474 {
5475 tree type, t;
5476 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, l2_bb, l2_dom_bb;
5477 gimple_stmt_iterator gsi;
5478 gimple stmt;
5479 bool broken_loop = region->cont == NULL;
5480 edge e, ne;
5481 tree *counts = NULL;
5482 int i;
5483 tree safelen = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
5484 OMP_CLAUSE_SAFELEN);
5485 tree simduid = find_omp_clause (gimple_omp_for_clauses (fd->for_stmt),
5486 OMP_CLAUSE__SIMDUID_);
5487 tree n2;
5488
5489 type = TREE_TYPE (fd->loop.v);
5490 entry_bb = region->entry;
5491 cont_bb = region->cont;
5492 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
5493 gcc_assert (broken_loop
5494 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
5495 l0_bb = FALLTHRU_EDGE (entry_bb)->dest;
5496 if (!broken_loop)
5497 {
5498 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l0_bb);
5499 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
5500 l1_bb = split_block (cont_bb, last_stmt (cont_bb))->dest;
5501 l2_bb = BRANCH_EDGE (entry_bb)->dest;
5502 }
5503 else
5504 {
5505 BRANCH_EDGE (entry_bb)->flags &= ~EDGE_ABNORMAL;
5506 l1_bb = split_edge (BRANCH_EDGE (entry_bb));
5507 l2_bb = single_succ (l1_bb);
5508 }
5509 exit_bb = region->exit;
5510 l2_dom_bb = NULL;
5511
5512 gsi = gsi_last_bb (entry_bb);
5513
5514 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
5515 /* Not needed in SSA form right now. */
5516 gcc_assert (!gimple_in_ssa_p (cfun));
5517 if (fd->collapse > 1)
5518 {
5519 int first_zero_iter = -1;
5520 basic_block zero_iter_bb = l2_bb;
5521
5522 counts = XALLOCAVEC (tree, fd->collapse);
5523 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
5524 zero_iter_bb, first_zero_iter,
5525 l2_dom_bb);
5526 }
5527 if (l2_dom_bb == NULL)
5528 l2_dom_bb = l1_bb;
5529
5530 n2 = fd->loop.n2;
5531 if (0)
5532 /* Place holder for gimple_omp_for_combined_into_p() in
5533 the upcoming gomp-4_0-branch merge. */;
5534 else
5535 {
5536 expand_omp_build_assign (&gsi, fd->loop.v,
5537 fold_convert (type, fd->loop.n1));
5538 if (fd->collapse > 1)
5539 for (i = 0; i < fd->collapse; i++)
5540 {
5541 tree itype = TREE_TYPE (fd->loops[i].v);
5542 if (POINTER_TYPE_P (itype))
5543 itype = signed_type_for (itype);
5544 t = fold_convert (TREE_TYPE (fd->loops[i].v), fd->loops[i].n1);
5545 expand_omp_build_assign (&gsi, fd->loops[i].v, t);
5546 }
5547 }
5548
5549 /* Remove the GIMPLE_OMP_FOR statement. */
5550 gsi_remove (&gsi, true);
5551
5552 if (!broken_loop)
5553 {
5554 /* Code to control the increment goes in the CONT_BB. */
5555 gsi = gsi_last_bb (cont_bb);
5556 stmt = gsi_stmt (gsi);
5557 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
5558
5559 if (POINTER_TYPE_P (type))
5560 t = fold_build_pointer_plus (fd->loop.v, fd->loop.step);
5561 else
5562 t = fold_build2 (PLUS_EXPR, type, fd->loop.v, fd->loop.step);
5563 expand_omp_build_assign (&gsi, fd->loop.v, t);
5564
5565 if (fd->collapse > 1)
5566 {
5567 i = fd->collapse - 1;
5568 if (POINTER_TYPE_P (TREE_TYPE (fd->loops[i].v)))
5569 {
5570 t = fold_convert (sizetype, fd->loops[i].step);
5571 t = fold_build_pointer_plus (fd->loops[i].v, t);
5572 }
5573 else
5574 {
5575 t = fold_convert (TREE_TYPE (fd->loops[i].v),
5576 fd->loops[i].step);
5577 t = fold_build2 (PLUS_EXPR, TREE_TYPE (fd->loops[i].v),
5578 fd->loops[i].v, t);
5579 }
5580 expand_omp_build_assign (&gsi, fd->loops[i].v, t);
5581
5582 for (i = fd->collapse - 1; i > 0; i--)
5583 {
5584 tree itype = TREE_TYPE (fd->loops[i].v);
5585 tree itype2 = TREE_TYPE (fd->loops[i - 1].v);
5586 if (POINTER_TYPE_P (itype2))
5587 itype2 = signed_type_for (itype2);
5588 t = build3 (COND_EXPR, itype2,
5589 build2 (fd->loops[i].cond_code, boolean_type_node,
5590 fd->loops[i].v,
5591 fold_convert (itype, fd->loops[i].n2)),
5592 build_int_cst (itype2, 0),
5593 fold_convert (itype2, fd->loops[i - 1].step));
5594 if (POINTER_TYPE_P (TREE_TYPE (fd->loops[i - 1].v)))
5595 t = fold_build_pointer_plus (fd->loops[i - 1].v, t);
5596 else
5597 t = fold_build2 (PLUS_EXPR, itype2, fd->loops[i - 1].v, t);
5598 expand_omp_build_assign (&gsi, fd->loops[i - 1].v, t);
5599
5600 t = build3 (COND_EXPR, itype,
5601 build2 (fd->loops[i].cond_code, boolean_type_node,
5602 fd->loops[i].v,
5603 fold_convert (itype, fd->loops[i].n2)),
5604 fd->loops[i].v,
5605 fold_convert (itype, fd->loops[i].n1));
5606 expand_omp_build_assign (&gsi, fd->loops[i].v, t);
5607 }
5608 }
5609
5610 /* Remove GIMPLE_OMP_CONTINUE. */
5611 gsi_remove (&gsi, true);
5612 }
5613
5614 /* Emit the condition in L1_BB. */
5615 gsi = gsi_start_bb (l1_bb);
5616
5617 t = fold_convert (type, n2);
5618 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
5619 false, GSI_CONTINUE_LINKING);
5620 t = build2 (fd->loop.cond_code, boolean_type_node, fd->loop.v, t);
5621 stmt = gimple_build_cond_empty (t);
5622 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
5623 if (walk_tree (gimple_cond_lhs_ptr (stmt), expand_omp_regimplify_p,
5624 NULL, NULL)
5625 || walk_tree (gimple_cond_rhs_ptr (stmt), expand_omp_regimplify_p,
5626 NULL, NULL))
5627 {
5628 gsi = gsi_for_stmt (stmt);
5629 gimple_regimplify_operands (stmt, &gsi);
5630 }
5631
5632 /* Remove GIMPLE_OMP_RETURN. */
5633 gsi = gsi_last_bb (exit_bb);
5634 gsi_remove (&gsi, true);
5635
5636 /* Connect the new blocks. */
5637 remove_edge (FALLTHRU_EDGE (entry_bb));
5638
5639 if (!broken_loop)
5640 {
5641 remove_edge (BRANCH_EDGE (entry_bb));
5642 make_edge (entry_bb, l1_bb, EDGE_FALLTHRU);
5643
5644 e = BRANCH_EDGE (l1_bb);
5645 ne = FALLTHRU_EDGE (l1_bb);
5646 e->flags = EDGE_TRUE_VALUE;
5647 }
5648 else
5649 {
5650 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
5651
5652 ne = single_succ_edge (l1_bb);
5653 e = make_edge (l1_bb, l0_bb, EDGE_TRUE_VALUE);
5654
5655 }
5656 ne->flags = EDGE_FALSE_VALUE;
5657 e->probability = REG_BR_PROB_BASE * 7 / 8;
5658 ne->probability = REG_BR_PROB_BASE / 8;
5659
5660 set_immediate_dominator (CDI_DOMINATORS, l1_bb, entry_bb);
5661 set_immediate_dominator (CDI_DOMINATORS, l2_bb, l2_dom_bb);
5662 set_immediate_dominator (CDI_DOMINATORS, l0_bb, l1_bb);
5663
5664 if (!broken_loop)
5665 {
5666 struct loop *loop = alloc_loop ();
5667 loop->header = l1_bb;
5668 loop->latch = e->dest;
5669 add_loop (loop, l1_bb->loop_father);
5670 if (safelen == NULL_TREE)
5671 loop->safelen = INT_MAX;
5672 else
5673 {
5674 safelen = OMP_CLAUSE_SAFELEN_EXPR (safelen);
5675 if (!host_integerp (safelen, 1)
5676 || (unsigned HOST_WIDE_INT) tree_low_cst (safelen, 1)
5677 > INT_MAX)
5678 loop->safelen = INT_MAX;
5679 else
5680 loop->safelen = tree_low_cst (safelen, 1);
5681 if (loop->safelen == 1)
5682 loop->safelen = 0;
5683 }
5684 if (simduid)
5685 {
5686 loop->simduid = OMP_CLAUSE__SIMDUID__DECL (simduid);
5687 cfun->has_simduid_loops = true;
5688 }
5689 /* If not -fno-tree-loop-vectorize, hint that we want to vectorize
5690 the loop. */
5691 if ((flag_tree_loop_vectorize
5692 || (!global_options_set.x_flag_tree_loop_vectorize
5693 && !global_options_set.x_flag_tree_vectorize))
5694 && loop->safelen > 1)
5695 {
5696 loop->force_vect = true;
5697 cfun->has_force_vect_loops = true;
5698 }
5699 }
5700 }
5701
5702
5703 /* Expand the OpenMP loop defined by REGION. */
5704
5705 static void
5706 expand_omp_for (struct omp_region *region)
5707 {
5708 struct omp_for_data fd;
5709 struct omp_for_data_loop *loops;
5710
5711 loops
5712 = (struct omp_for_data_loop *)
5713 alloca (gimple_omp_for_collapse (last_stmt (region->entry))
5714 * sizeof (struct omp_for_data_loop));
5715 extract_omp_for_data (last_stmt (region->entry), &fd, loops);
5716 region->sched_kind = fd.sched_kind;
5717
5718 gcc_assert (EDGE_COUNT (region->entry->succs) == 2);
5719 BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
5720 FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
5721 if (region->cont)
5722 {
5723 gcc_assert (EDGE_COUNT (region->cont->succs) == 2);
5724 BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
5725 FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
5726 }
5727 else
5728 /* If there isn't a continue then this is a degerate case where
5729 the introduction of abnormal edges during lowering will prevent
5730 original loops from being detected. Fix that up. */
5731 loops_state_set (LOOPS_NEED_FIXUP);
5732
5733 if (gimple_omp_for_kind (fd.for_stmt) == GF_OMP_FOR_KIND_SIMD)
5734 expand_omp_simd (region, &fd);
5735 else if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
5736 && !fd.have_ordered
5737 && fd.collapse == 1
5738 && region->cont != NULL)
5739 {
5740 if (fd.chunk_size == NULL)
5741 expand_omp_for_static_nochunk (region, &fd);
5742 else
5743 expand_omp_for_static_chunk (region, &fd);
5744 }
5745 else
5746 {
5747 int fn_index, start_ix, next_ix;
5748
5749 gcc_assert (gimple_omp_for_kind (fd.for_stmt)
5750 == GF_OMP_FOR_KIND_FOR);
5751 if (fd.chunk_size == NULL
5752 && fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
5753 fd.chunk_size = integer_zero_node;
5754 gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
5755 fn_index = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
5756 ? 3 : fd.sched_kind;
5757 fn_index += fd.have_ordered * 4;
5758 start_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_START) + fn_index;
5759 next_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_NEXT) + fn_index;
5760 if (fd.iter_type == long_long_unsigned_type_node)
5761 {
5762 start_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_START
5763 - (int)BUILT_IN_GOMP_LOOP_STATIC_START);
5764 next_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
5765 - (int)BUILT_IN_GOMP_LOOP_STATIC_NEXT);
5766 }
5767 expand_omp_for_generic (region, &fd, (enum built_in_function) start_ix,
5768 (enum built_in_function) next_ix);
5769 }
5770
5771 if (gimple_in_ssa_p (cfun))
5772 update_ssa (TODO_update_ssa_only_virtuals);
5773 }
5774
5775
5776 /* Expand code for an OpenMP sections directive. In pseudo code, we generate
5777
5778 v = GOMP_sections_start (n);
5779 L0:
5780 switch (v)
5781 {
5782 case 0:
5783 goto L2;
5784 case 1:
5785 section 1;
5786 goto L1;
5787 case 2:
5788 ...
5789 case n:
5790 ...
5791 default:
5792 abort ();
5793 }
5794 L1:
5795 v = GOMP_sections_next ();
5796 goto L0;
5797 L2:
5798 reduction;
5799
5800 If this is a combined parallel sections, replace the call to
5801 GOMP_sections_start with call to GOMP_sections_next. */
5802
5803 static void
5804 expand_omp_sections (struct omp_region *region)
5805 {
5806 tree t, u, vin = NULL, vmain, vnext, l2;
5807 vec<tree> label_vec;
5808 unsigned len;
5809 basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
5810 gimple_stmt_iterator si, switch_si;
5811 gimple sections_stmt, stmt, cont;
5812 edge_iterator ei;
5813 edge e;
5814 struct omp_region *inner;
5815 unsigned i, casei;
5816 bool exit_reachable = region->cont != NULL;
5817
5818 gcc_assert (region->exit != NULL);
5819 entry_bb = region->entry;
5820 l0_bb = single_succ (entry_bb);
5821 l1_bb = region->cont;
5822 l2_bb = region->exit;
5823 if (single_pred_p (l2_bb) && single_pred (l2_bb) == l0_bb)
5824 l2 = gimple_block_label (l2_bb);
5825 else
5826 {
5827 /* This can happen if there are reductions. */
5828 len = EDGE_COUNT (l0_bb->succs);
5829 gcc_assert (len > 0);
5830 e = EDGE_SUCC (l0_bb, len - 1);
5831 si = gsi_last_bb (e->dest);
5832 l2 = NULL_TREE;
5833 if (gsi_end_p (si)
5834 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
5835 l2 = gimple_block_label (e->dest);
5836 else
5837 FOR_EACH_EDGE (e, ei, l0_bb->succs)
5838 {
5839 si = gsi_last_bb (e->dest);
5840 if (gsi_end_p (si)
5841 || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
5842 {
5843 l2 = gimple_block_label (e->dest);
5844 break;
5845 }
5846 }
5847 }
5848 if (exit_reachable)
5849 default_bb = create_empty_bb (l1_bb->prev_bb);
5850 else
5851 default_bb = create_empty_bb (l0_bb);
5852
5853 /* We will build a switch() with enough cases for all the
5854 GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
5855 and a default case to abort if something goes wrong. */
5856 len = EDGE_COUNT (l0_bb->succs);
5857
5858 /* Use vec::quick_push on label_vec throughout, since we know the size
5859 in advance. */
5860 label_vec.create (len);
5861
5862 /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
5863 GIMPLE_OMP_SECTIONS statement. */
5864 si = gsi_last_bb (entry_bb);
5865 sections_stmt = gsi_stmt (si);
5866 gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS);
5867 vin = gimple_omp_sections_control (sections_stmt);
5868 if (!is_combined_parallel (region))
5869 {
5870 /* If we are not inside a combined parallel+sections region,
5871 call GOMP_sections_start. */
5872 t = build_int_cst (unsigned_type_node, len - 1);
5873 u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_START);
5874 stmt = gimple_build_call (u, 1, t);
5875 }
5876 else
5877 {
5878 /* Otherwise, call GOMP_sections_next. */
5879 u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
5880 stmt = gimple_build_call (u, 0);
5881 }
5882 gimple_call_set_lhs (stmt, vin);
5883 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
5884 gsi_remove (&si, true);
5885
5886 /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
5887 L0_BB. */
5888 switch_si = gsi_last_bb (l0_bb);
5889 gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH);
5890 if (exit_reachable)
5891 {
5892 cont = last_stmt (l1_bb);
5893 gcc_assert (gimple_code (cont) == GIMPLE_OMP_CONTINUE);
5894 vmain = gimple_omp_continue_control_use (cont);
5895 vnext = gimple_omp_continue_control_def (cont);
5896 }
5897 else
5898 {
5899 vmain = vin;
5900 vnext = NULL_TREE;
5901 }
5902
5903 t = build_case_label (build_int_cst (unsigned_type_node, 0), NULL, l2);
5904 label_vec.quick_push (t);
5905 i = 1;
5906
5907 /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR. */
5908 for (inner = region->inner, casei = 1;
5909 inner;
5910 inner = inner->next, i++, casei++)
5911 {
5912 basic_block s_entry_bb, s_exit_bb;
5913
5914 /* Skip optional reduction region. */
5915 if (inner->type == GIMPLE_OMP_ATOMIC_LOAD)
5916 {
5917 --i;
5918 --casei;
5919 continue;
5920 }
5921
5922 s_entry_bb = inner->entry;
5923 s_exit_bb = inner->exit;
5924
5925 t = gimple_block_label (s_entry_bb);
5926 u = build_int_cst (unsigned_type_node, casei);
5927 u = build_case_label (u, NULL, t);
5928 label_vec.quick_push (u);
5929
5930 si = gsi_last_bb (s_entry_bb);
5931 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION);
5932 gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si)));
5933 gsi_remove (&si, true);
5934 single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
5935
5936 if (s_exit_bb == NULL)
5937 continue;
5938
5939 si = gsi_last_bb (s_exit_bb);
5940 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
5941 gsi_remove (&si, true);
5942
5943 single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
5944 }
5945
5946 /* Error handling code goes in DEFAULT_BB. */
5947 t = gimple_block_label (default_bb);
5948 u = build_case_label (NULL, NULL, t);
5949 make_edge (l0_bb, default_bb, 0);
5950 if (current_loops)
5951 add_bb_to_loop (default_bb, current_loops->tree_root);
5952
5953 stmt = gimple_build_switch (vmain, u, label_vec);
5954 gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT);
5955 gsi_remove (&switch_si, true);
5956 label_vec.release ();
5957
5958 si = gsi_start_bb (default_bb);
5959 stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP), 0);
5960 gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
5961
5962 if (exit_reachable)
5963 {
5964 tree bfn_decl;
5965
5966 /* Code to get the next section goes in L1_BB. */
5967 si = gsi_last_bb (l1_bb);
5968 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE);
5969
5970 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
5971 stmt = gimple_build_call (bfn_decl, 0);
5972 gimple_call_set_lhs (stmt, vnext);
5973 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
5974 gsi_remove (&si, true);
5975
5976 single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
5977 }
5978
5979 /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB. */
5980 si = gsi_last_bb (l2_bb);
5981 if (gimple_omp_return_nowait_p (gsi_stmt (si)))
5982 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_NOWAIT);
5983 else
5984 t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END);
5985 stmt = gimple_build_call (t, 0);
5986 gsi_insert_after (&si, stmt, GSI_SAME_STMT);
5987 gsi_remove (&si, true);
5988
5989 set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
5990 }
5991
5992
5993 /* Expand code for an OpenMP single directive. We've already expanded
5994 much of the code, here we simply place the GOMP_barrier call. */
5995
5996 static void
5997 expand_omp_single (struct omp_region *region)
5998 {
5999 basic_block entry_bb, exit_bb;
6000 gimple_stmt_iterator si;
6001 bool need_barrier = false;
6002
6003 entry_bb = region->entry;
6004 exit_bb = region->exit;
6005
6006 si = gsi_last_bb (entry_bb);
6007 /* The terminal barrier at the end of a GOMP_single_copy sequence cannot
6008 be removed. We need to ensure that the thread that entered the single
6009 does not exit before the data is copied out by the other threads. */
6010 if (find_omp_clause (gimple_omp_single_clauses (gsi_stmt (si)),
6011 OMP_CLAUSE_COPYPRIVATE))
6012 need_barrier = true;
6013 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE);
6014 gsi_remove (&si, true);
6015 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
6016
6017 si = gsi_last_bb (exit_bb);
6018 if (!gimple_omp_return_nowait_p (gsi_stmt (si)) || need_barrier)
6019 force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
6020 false, GSI_SAME_STMT);
6021 gsi_remove (&si, true);
6022 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
6023 }
6024
6025
6026 /* Generic expansion for OpenMP synchronization directives: master,
6027 ordered and critical. All we need to do here is remove the entry
6028 and exit markers for REGION. */
6029
6030 static void
6031 expand_omp_synch (struct omp_region *region)
6032 {
6033 basic_block entry_bb, exit_bb;
6034 gimple_stmt_iterator si;
6035
6036 entry_bb = region->entry;
6037 exit_bb = region->exit;
6038
6039 si = gsi_last_bb (entry_bb);
6040 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE
6041 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER
6042 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED
6043 || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL);
6044 gsi_remove (&si, true);
6045 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
6046
6047 if (exit_bb)
6048 {
6049 si = gsi_last_bb (exit_bb);
6050 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
6051 gsi_remove (&si, true);
6052 single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
6053 }
6054 }
6055
6056 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
6057 operation as a normal volatile load. */
6058
6059 static bool
6060 expand_omp_atomic_load (basic_block load_bb, tree addr,
6061 tree loaded_val, int index)
6062 {
6063 enum built_in_function tmpbase;
6064 gimple_stmt_iterator gsi;
6065 basic_block store_bb;
6066 location_t loc;
6067 gimple stmt;
6068 tree decl, call, type, itype;
6069
6070 gsi = gsi_last_bb (load_bb);
6071 stmt = gsi_stmt (gsi);
6072 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
6073 loc = gimple_location (stmt);
6074
6075 /* ??? If the target does not implement atomic_load_optab[mode], and mode
6076 is smaller than word size, then expand_atomic_load assumes that the load
6077 is atomic. We could avoid the builtin entirely in this case. */
6078
6079 tmpbase = (enum built_in_function) (BUILT_IN_ATOMIC_LOAD_N + index + 1);
6080 decl = builtin_decl_explicit (tmpbase);
6081 if (decl == NULL_TREE)
6082 return false;
6083
6084 type = TREE_TYPE (loaded_val);
6085 itype = TREE_TYPE (TREE_TYPE (decl));
6086
6087 call = build_call_expr_loc (loc, decl, 2, addr,
6088 build_int_cst (NULL, MEMMODEL_RELAXED));
6089 if (!useless_type_conversion_p (type, itype))
6090 call = fold_build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
6091 call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
6092
6093 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
6094 gsi_remove (&gsi, true);
6095
6096 store_bb = single_succ (load_bb);
6097 gsi = gsi_last_bb (store_bb);
6098 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
6099 gsi_remove (&gsi, true);
6100
6101 if (gimple_in_ssa_p (cfun))
6102 update_ssa (TODO_update_ssa_no_phi);
6103
6104 return true;
6105 }
6106
6107 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
6108 operation as a normal volatile store. */
6109
6110 static bool
6111 expand_omp_atomic_store (basic_block load_bb, tree addr,
6112 tree loaded_val, tree stored_val, int index)
6113 {
6114 enum built_in_function tmpbase;
6115 gimple_stmt_iterator gsi;
6116 basic_block store_bb = single_succ (load_bb);
6117 location_t loc;
6118 gimple stmt;
6119 tree decl, call, type, itype;
6120 enum machine_mode imode;
6121 bool exchange;
6122
6123 gsi = gsi_last_bb (load_bb);
6124 stmt = gsi_stmt (gsi);
6125 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
6126
6127 /* If the load value is needed, then this isn't a store but an exchange. */
6128 exchange = gimple_omp_atomic_need_value_p (stmt);
6129
6130 gsi = gsi_last_bb (store_bb);
6131 stmt = gsi_stmt (gsi);
6132 gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_STORE);
6133 loc = gimple_location (stmt);
6134
6135 /* ??? If the target does not implement atomic_store_optab[mode], and mode
6136 is smaller than word size, then expand_atomic_store assumes that the store
6137 is atomic. We could avoid the builtin entirely in this case. */
6138
6139 tmpbase = (exchange ? BUILT_IN_ATOMIC_EXCHANGE_N : BUILT_IN_ATOMIC_STORE_N);
6140 tmpbase = (enum built_in_function) ((int) tmpbase + index + 1);
6141 decl = builtin_decl_explicit (tmpbase);
6142 if (decl == NULL_TREE)
6143 return false;
6144
6145 type = TREE_TYPE (stored_val);
6146
6147 /* Dig out the type of the function's second argument. */
6148 itype = TREE_TYPE (decl);
6149 itype = TYPE_ARG_TYPES (itype);
6150 itype = TREE_CHAIN (itype);
6151 itype = TREE_VALUE (itype);
6152 imode = TYPE_MODE (itype);
6153
6154 if (exchange && !can_atomic_exchange_p (imode, true))
6155 return false;
6156
6157 if (!useless_type_conversion_p (itype, type))
6158 stored_val = fold_build1_loc (loc, VIEW_CONVERT_EXPR, itype, stored_val);
6159 call = build_call_expr_loc (loc, decl, 3, addr, stored_val,
6160 build_int_cst (NULL, MEMMODEL_RELAXED));
6161 if (exchange)
6162 {
6163 if (!useless_type_conversion_p (type, itype))
6164 call = build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
6165 call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
6166 }
6167
6168 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
6169 gsi_remove (&gsi, true);
6170
6171 /* Remove the GIMPLE_OMP_ATOMIC_LOAD that we verified above. */
6172 gsi = gsi_last_bb (load_bb);
6173 gsi_remove (&gsi, true);
6174
6175 if (gimple_in_ssa_p (cfun))
6176 update_ssa (TODO_update_ssa_no_phi);
6177
6178 return true;
6179 }
6180
6181 /* A subroutine of expand_omp_atomic. Attempt to implement the atomic
6182 operation as a __atomic_fetch_op builtin. INDEX is log2 of the
6183 size of the data type, and thus usable to find the index of the builtin
6184 decl. Returns false if the expression is not of the proper form. */
6185
6186 static bool
6187 expand_omp_atomic_fetch_op (basic_block load_bb,
6188 tree addr, tree loaded_val,
6189 tree stored_val, int index)
6190 {
6191 enum built_in_function oldbase, newbase, tmpbase;
6192 tree decl, itype, call;
6193 tree lhs, rhs;
6194 basic_block store_bb = single_succ (load_bb);
6195 gimple_stmt_iterator gsi;
6196 gimple stmt;
6197 location_t loc;
6198 enum tree_code code;
6199 bool need_old, need_new;
6200 enum machine_mode imode;
6201
6202 /* We expect to find the following sequences:
6203
6204 load_bb:
6205 GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
6206
6207 store_bb:
6208 val = tmp OP something; (or: something OP tmp)
6209 GIMPLE_OMP_STORE (val)
6210
6211 ???FIXME: Allow a more flexible sequence.
6212 Perhaps use data flow to pick the statements.
6213
6214 */
6215
6216 gsi = gsi_after_labels (store_bb);
6217 stmt = gsi_stmt (gsi);
6218 loc = gimple_location (stmt);
6219 if (!is_gimple_assign (stmt))
6220 return false;
6221 gsi_next (&gsi);
6222 if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE)
6223 return false;
6224 need_new = gimple_omp_atomic_need_value_p (gsi_stmt (gsi));
6225 need_old = gimple_omp_atomic_need_value_p (last_stmt (load_bb));
6226 gcc_checking_assert (!need_old || !need_new);
6227
6228 if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0))
6229 return false;
6230
6231 /* Check for one of the supported fetch-op operations. */
6232 code = gimple_assign_rhs_code (stmt);
6233 switch (code)
6234 {
6235 case PLUS_EXPR:
6236 case POINTER_PLUS_EXPR:
6237 oldbase = BUILT_IN_ATOMIC_FETCH_ADD_N;
6238 newbase = BUILT_IN_ATOMIC_ADD_FETCH_N;
6239 break;
6240 case MINUS_EXPR:
6241 oldbase = BUILT_IN_ATOMIC_FETCH_SUB_N;
6242 newbase = BUILT_IN_ATOMIC_SUB_FETCH_N;
6243 break;
6244 case BIT_AND_EXPR:
6245 oldbase = BUILT_IN_ATOMIC_FETCH_AND_N;
6246 newbase = BUILT_IN_ATOMIC_AND_FETCH_N;
6247 break;
6248 case BIT_IOR_EXPR:
6249 oldbase = BUILT_IN_ATOMIC_FETCH_OR_N;
6250 newbase = BUILT_IN_ATOMIC_OR_FETCH_N;
6251 break;
6252 case BIT_XOR_EXPR:
6253 oldbase = BUILT_IN_ATOMIC_FETCH_XOR_N;
6254 newbase = BUILT_IN_ATOMIC_XOR_FETCH_N;
6255 break;
6256 default:
6257 return false;
6258 }
6259
6260 /* Make sure the expression is of the proper form. */
6261 if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0))
6262 rhs = gimple_assign_rhs2 (stmt);
6263 else if (commutative_tree_code (gimple_assign_rhs_code (stmt))
6264 && operand_equal_p (gimple_assign_rhs2 (stmt), loaded_val, 0))
6265 rhs = gimple_assign_rhs1 (stmt);
6266 else
6267 return false;
6268
6269 tmpbase = ((enum built_in_function)
6270 ((need_new ? newbase : oldbase) + index + 1));
6271 decl = builtin_decl_explicit (tmpbase);
6272 if (decl == NULL_TREE)
6273 return false;
6274 itype = TREE_TYPE (TREE_TYPE (decl));
6275 imode = TYPE_MODE (itype);
6276
6277 /* We could test all of the various optabs involved, but the fact of the
6278 matter is that (with the exception of i486 vs i586 and xadd) all targets
6279 that support any atomic operaton optab also implements compare-and-swap.
6280 Let optabs.c take care of expanding any compare-and-swap loop. */
6281 if (!can_compare_and_swap_p (imode, true))
6282 return false;
6283
6284 gsi = gsi_last_bb (load_bb);
6285 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD);
6286
6287 /* OpenMP does not imply any barrier-like semantics on its atomic ops.
6288 It only requires that the operation happen atomically. Thus we can
6289 use the RELAXED memory model. */
6290 call = build_call_expr_loc (loc, decl, 3, addr,
6291 fold_convert_loc (loc, itype, rhs),
6292 build_int_cst (NULL, MEMMODEL_RELAXED));
6293
6294 if (need_old || need_new)
6295 {
6296 lhs = need_old ? loaded_val : stored_val;
6297 call = fold_convert_loc (loc, TREE_TYPE (lhs), call);
6298 call = build2_loc (loc, MODIFY_EXPR, void_type_node, lhs, call);
6299 }
6300 else
6301 call = fold_convert_loc (loc, void_type_node, call);
6302 force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
6303 gsi_remove (&gsi, true);
6304
6305 gsi = gsi_last_bb (store_bb);
6306 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
6307 gsi_remove (&gsi, true);
6308 gsi = gsi_last_bb (store_bb);
6309 gsi_remove (&gsi, true);
6310
6311 if (gimple_in_ssa_p (cfun))
6312 update_ssa (TODO_update_ssa_no_phi);
6313
6314 return true;
6315 }
6316
6317 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
6318
6319 oldval = *addr;
6320 repeat:
6321 newval = rhs; // with oldval replacing *addr in rhs
6322 oldval = __sync_val_compare_and_swap (addr, oldval, newval);
6323 if (oldval != newval)
6324 goto repeat;
6325
6326 INDEX is log2 of the size of the data type, and thus usable to find the
6327 index of the builtin decl. */
6328
6329 static bool
6330 expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
6331 tree addr, tree loaded_val, tree stored_val,
6332 int index)
6333 {
6334 tree loadedi, storedi, initial, new_storedi, old_vali;
6335 tree type, itype, cmpxchg, iaddr;
6336 gimple_stmt_iterator si;
6337 basic_block loop_header = single_succ (load_bb);
6338 gimple phi, stmt;
6339 edge e;
6340 enum built_in_function fncode;
6341
6342 /* ??? We need a non-pointer interface to __atomic_compare_exchange in
6343 order to use the RELAXED memory model effectively. */
6344 fncode = (enum built_in_function)((int)BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_N
6345 + index + 1);
6346 cmpxchg = builtin_decl_explicit (fncode);
6347 if (cmpxchg == NULL_TREE)
6348 return false;
6349 type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
6350 itype = TREE_TYPE (TREE_TYPE (cmpxchg));
6351
6352 if (!can_compare_and_swap_p (TYPE_MODE (itype), true))
6353 return false;
6354
6355 /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD. */
6356 si = gsi_last_bb (load_bb);
6357 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
6358
6359 /* For floating-point values, we'll need to view-convert them to integers
6360 so that we can perform the atomic compare and swap. Simplify the
6361 following code by always setting up the "i"ntegral variables. */
6362 if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
6363 {
6364 tree iaddr_val;
6365
6366 iaddr = create_tmp_reg (build_pointer_type_for_mode (itype, ptr_mode,
6367 true), NULL);
6368 iaddr_val
6369 = force_gimple_operand_gsi (&si,
6370 fold_convert (TREE_TYPE (iaddr), addr),
6371 false, NULL_TREE, true, GSI_SAME_STMT);
6372 stmt = gimple_build_assign (iaddr, iaddr_val);
6373 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
6374 loadedi = create_tmp_var (itype, NULL);
6375 if (gimple_in_ssa_p (cfun))
6376 loadedi = make_ssa_name (loadedi, NULL);
6377 }
6378 else
6379 {
6380 iaddr = addr;
6381 loadedi = loaded_val;
6382 }
6383
6384 initial
6385 = force_gimple_operand_gsi (&si,
6386 build2 (MEM_REF, TREE_TYPE (TREE_TYPE (iaddr)),
6387 iaddr,
6388 build_int_cst (TREE_TYPE (iaddr), 0)),
6389 true, NULL_TREE, true, GSI_SAME_STMT);
6390
6391 /* Move the value to the LOADEDI temporary. */
6392 if (gimple_in_ssa_p (cfun))
6393 {
6394 gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header)));
6395 phi = create_phi_node (loadedi, loop_header);
6396 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
6397 initial);
6398 }
6399 else
6400 gsi_insert_before (&si,
6401 gimple_build_assign (loadedi, initial),
6402 GSI_SAME_STMT);
6403 if (loadedi != loaded_val)
6404 {
6405 gimple_stmt_iterator gsi2;
6406 tree x;
6407
6408 x = build1 (VIEW_CONVERT_EXPR, type, loadedi);
6409 gsi2 = gsi_start_bb (loop_header);
6410 if (gimple_in_ssa_p (cfun))
6411 {
6412 gimple stmt;
6413 x = force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
6414 true, GSI_SAME_STMT);
6415 stmt = gimple_build_assign (loaded_val, x);
6416 gsi_insert_before (&gsi2, stmt, GSI_SAME_STMT);
6417 }
6418 else
6419 {
6420 x = build2 (MODIFY_EXPR, TREE_TYPE (loaded_val), loaded_val, x);
6421 force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
6422 true, GSI_SAME_STMT);
6423 }
6424 }
6425 gsi_remove (&si, true);
6426
6427 si = gsi_last_bb (store_bb);
6428 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
6429
6430 if (iaddr == addr)
6431 storedi = stored_val;
6432 else
6433 storedi =
6434 force_gimple_operand_gsi (&si,
6435 build1 (VIEW_CONVERT_EXPR, itype,
6436 stored_val), true, NULL_TREE, true,
6437 GSI_SAME_STMT);
6438
6439 /* Build the compare&swap statement. */
6440 new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
6441 new_storedi = force_gimple_operand_gsi (&si,
6442 fold_convert (TREE_TYPE (loadedi),
6443 new_storedi),
6444 true, NULL_TREE,
6445 true, GSI_SAME_STMT);
6446
6447 if (gimple_in_ssa_p (cfun))
6448 old_vali = loadedi;
6449 else
6450 {
6451 old_vali = create_tmp_var (TREE_TYPE (loadedi), NULL);
6452 stmt = gimple_build_assign (old_vali, loadedi);
6453 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
6454
6455 stmt = gimple_build_assign (loadedi, new_storedi);
6456 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
6457 }
6458
6459 /* Note that we always perform the comparison as an integer, even for
6460 floating point. This allows the atomic operation to properly
6461 succeed even with NaNs and -0.0. */
6462 stmt = gimple_build_cond_empty
6463 (build2 (NE_EXPR, boolean_type_node,
6464 new_storedi, old_vali));
6465 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
6466
6467 /* Update cfg. */
6468 e = single_succ_edge (store_bb);
6469 e->flags &= ~EDGE_FALLTHRU;
6470 e->flags |= EDGE_FALSE_VALUE;
6471
6472 e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
6473
6474 /* Copy the new value to loadedi (we already did that before the condition
6475 if we are not in SSA). */
6476 if (gimple_in_ssa_p (cfun))
6477 {
6478 phi = gimple_seq_first_stmt (phi_nodes (loop_header));
6479 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi);
6480 }
6481
6482 /* Remove GIMPLE_OMP_ATOMIC_STORE. */
6483 gsi_remove (&si, true);
6484
6485 struct loop *loop = alloc_loop ();
6486 loop->header = loop_header;
6487 loop->latch = store_bb;
6488 add_loop (loop, loop_header->loop_father);
6489
6490 if (gimple_in_ssa_p (cfun))
6491 update_ssa (TODO_update_ssa_no_phi);
6492
6493 return true;
6494 }
6495
6496 /* A subroutine of expand_omp_atomic. Implement the atomic operation as:
6497
6498 GOMP_atomic_start ();
6499 *addr = rhs;
6500 GOMP_atomic_end ();
6501
6502 The result is not globally atomic, but works so long as all parallel
6503 references are within #pragma omp atomic directives. According to
6504 responses received from omp@openmp.org, appears to be within spec.
6505 Which makes sense, since that's how several other compilers handle
6506 this situation as well.
6507 LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
6508 expanding. STORED_VAL is the operand of the matching
6509 GIMPLE_OMP_ATOMIC_STORE.
6510
6511 We replace
6512 GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
6513 loaded_val = *addr;
6514
6515 and replace
6516 GIMPLE_OMP_ATOMIC_STORE (stored_val) with
6517 *addr = stored_val;
6518 */
6519
6520 static bool
6521 expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
6522 tree addr, tree loaded_val, tree stored_val)
6523 {
6524 gimple_stmt_iterator si;
6525 gimple stmt;
6526 tree t;
6527
6528 si = gsi_last_bb (load_bb);
6529 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
6530
6531 t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START);
6532 t = build_call_expr (t, 0);
6533 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
6534
6535 stmt = gimple_build_assign (loaded_val, build_simple_mem_ref (addr));
6536 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
6537 gsi_remove (&si, true);
6538
6539 si = gsi_last_bb (store_bb);
6540 gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
6541
6542 stmt = gimple_build_assign (build_simple_mem_ref (unshare_expr (addr)),
6543 stored_val);
6544 gsi_insert_before (&si, stmt, GSI_SAME_STMT);
6545
6546 t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END);
6547 t = build_call_expr (t, 0);
6548 force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
6549 gsi_remove (&si, true);
6550
6551 if (gimple_in_ssa_p (cfun))
6552 update_ssa (TODO_update_ssa_no_phi);
6553 return true;
6554 }
6555
6556 /* Expand an GIMPLE_OMP_ATOMIC statement. We try to expand
6557 using expand_omp_atomic_fetch_op. If it failed, we try to
6558 call expand_omp_atomic_pipeline, and if it fails too, the
6559 ultimate fallback is wrapping the operation in a mutex
6560 (expand_omp_atomic_mutex). REGION is the atomic region built
6561 by build_omp_regions_1(). */
6562
6563 static void
6564 expand_omp_atomic (struct omp_region *region)
6565 {
6566 basic_block load_bb = region->entry, store_bb = region->exit;
6567 gimple load = last_stmt (load_bb), store = last_stmt (store_bb);
6568 tree loaded_val = gimple_omp_atomic_load_lhs (load);
6569 tree addr = gimple_omp_atomic_load_rhs (load);
6570 tree stored_val = gimple_omp_atomic_store_val (store);
6571 tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
6572 HOST_WIDE_INT index;
6573
6574 /* Make sure the type is one of the supported sizes. */
6575 index = tree_low_cst (TYPE_SIZE_UNIT (type), 1);
6576 index = exact_log2 (index);
6577 if (index >= 0 && index <= 4)
6578 {
6579 unsigned int align = TYPE_ALIGN_UNIT (type);
6580
6581 /* __sync builtins require strict data alignment. */
6582 if (exact_log2 (align) >= index)
6583 {
6584 /* Atomic load. */
6585 if (loaded_val == stored_val
6586 && (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
6587 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
6588 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
6589 && expand_omp_atomic_load (load_bb, addr, loaded_val, index))
6590 return;
6591
6592 /* Atomic store. */
6593 if ((GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
6594 || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
6595 && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
6596 && store_bb == single_succ (load_bb)
6597 && first_stmt (store_bb) == store
6598 && expand_omp_atomic_store (load_bb, addr, loaded_val,
6599 stored_val, index))
6600 return;
6601
6602 /* When possible, use specialized atomic update functions. */
6603 if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
6604 && store_bb == single_succ (load_bb)
6605 && expand_omp_atomic_fetch_op (load_bb, addr,
6606 loaded_val, stored_val, index))
6607 return;
6608
6609 /* If we don't have specialized __sync builtins, try and implement
6610 as a compare and swap loop. */
6611 if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
6612 loaded_val, stored_val, index))
6613 return;
6614 }
6615 }
6616
6617 /* The ultimate fallback is wrapping the operation in a mutex. */
6618 expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
6619 }
6620
6621
6622 /* Expand the parallel region tree rooted at REGION. Expansion
6623 proceeds in depth-first order. Innermost regions are expanded
6624 first. This way, parallel regions that require a new function to
6625 be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
6626 internal dependencies in their body. */
6627
6628 static void
6629 expand_omp (struct omp_region *region)
6630 {
6631 while (region)
6632 {
6633 location_t saved_location;
6634
6635 /* First, determine whether this is a combined parallel+workshare
6636 region. */
6637 if (region->type == GIMPLE_OMP_PARALLEL)
6638 determine_parallel_type (region);
6639
6640 if (region->inner)
6641 expand_omp (region->inner);
6642
6643 saved_location = input_location;
6644 if (gimple_has_location (last_stmt (region->entry)))
6645 input_location = gimple_location (last_stmt (region->entry));
6646
6647 switch (region->type)
6648 {
6649 case GIMPLE_OMP_PARALLEL:
6650 case GIMPLE_OMP_TASK:
6651 expand_omp_taskreg (region);
6652 break;
6653
6654 case GIMPLE_OMP_FOR:
6655 expand_omp_for (region);
6656 break;
6657
6658 case GIMPLE_OMP_SECTIONS:
6659 expand_omp_sections (region);
6660 break;
6661
6662 case GIMPLE_OMP_SECTION:
6663 /* Individual omp sections are handled together with their
6664 parent GIMPLE_OMP_SECTIONS region. */
6665 break;
6666
6667 case GIMPLE_OMP_SINGLE:
6668 expand_omp_single (region);
6669 break;
6670
6671 case GIMPLE_OMP_MASTER:
6672 case GIMPLE_OMP_ORDERED:
6673 case GIMPLE_OMP_CRITICAL:
6674 expand_omp_synch (region);
6675 break;
6676
6677 case GIMPLE_OMP_ATOMIC_LOAD:
6678 expand_omp_atomic (region);
6679 break;
6680
6681 default:
6682 gcc_unreachable ();
6683 }
6684
6685 input_location = saved_location;
6686 region = region->next;
6687 }
6688 }
6689
6690
6691 /* Helper for build_omp_regions. Scan the dominator tree starting at
6692 block BB. PARENT is the region that contains BB. If SINGLE_TREE is
6693 true, the function ends once a single tree is built (otherwise, whole
6694 forest of OMP constructs may be built). */
6695
6696 static void
6697 build_omp_regions_1 (basic_block bb, struct omp_region *parent,
6698 bool single_tree)
6699 {
6700 gimple_stmt_iterator gsi;
6701 gimple stmt;
6702 basic_block son;
6703
6704 gsi = gsi_last_bb (bb);
6705 if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi)))
6706 {
6707 struct omp_region *region;
6708 enum gimple_code code;
6709
6710 stmt = gsi_stmt (gsi);
6711 code = gimple_code (stmt);
6712 if (code == GIMPLE_OMP_RETURN)
6713 {
6714 /* STMT is the return point out of region PARENT. Mark it
6715 as the exit point and make PARENT the immediately
6716 enclosing region. */
6717 gcc_assert (parent);
6718 region = parent;
6719 region->exit = bb;
6720 parent = parent->outer;
6721 }
6722 else if (code == GIMPLE_OMP_ATOMIC_STORE)
6723 {
6724 /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
6725 GIMPLE_OMP_RETURN, but matches with
6726 GIMPLE_OMP_ATOMIC_LOAD. */
6727 gcc_assert (parent);
6728 gcc_assert (parent->type == GIMPLE_OMP_ATOMIC_LOAD);
6729 region = parent;
6730 region->exit = bb;
6731 parent = parent->outer;
6732 }
6733
6734 else if (code == GIMPLE_OMP_CONTINUE)
6735 {
6736 gcc_assert (parent);
6737 parent->cont = bb;
6738 }
6739 else if (code == GIMPLE_OMP_SECTIONS_SWITCH)
6740 {
6741 /* GIMPLE_OMP_SECTIONS_SWITCH is part of
6742 GIMPLE_OMP_SECTIONS, and we do nothing for it. */
6743 ;
6744 }
6745 else
6746 {
6747 /* Otherwise, this directive becomes the parent for a new
6748 region. */
6749 region = new_omp_region (bb, code, parent);
6750 parent = region;
6751 }
6752 }
6753
6754 if (single_tree && !parent)
6755 return;
6756
6757 for (son = first_dom_son (CDI_DOMINATORS, bb);
6758 son;
6759 son = next_dom_son (CDI_DOMINATORS, son))
6760 build_omp_regions_1 (son, parent, single_tree);
6761 }
6762
6763 /* Builds the tree of OMP regions rooted at ROOT, storing it to
6764 root_omp_region. */
6765
6766 static void
6767 build_omp_regions_root (basic_block root)
6768 {
6769 gcc_assert (root_omp_region == NULL);
6770 build_omp_regions_1 (root, NULL, true);
6771 gcc_assert (root_omp_region != NULL);
6772 }
6773
6774 /* Expands omp construct (and its subconstructs) starting in HEAD. */
6775
6776 void
6777 omp_expand_local (basic_block head)
6778 {
6779 build_omp_regions_root (head);
6780 if (dump_file && (dump_flags & TDF_DETAILS))
6781 {
6782 fprintf (dump_file, "\nOMP region tree\n\n");
6783 dump_omp_region (dump_file, root_omp_region, 0);
6784 fprintf (dump_file, "\n");
6785 }
6786
6787 remove_exit_barriers (root_omp_region);
6788 expand_omp (root_omp_region);
6789
6790 free_omp_regions ();
6791 }
6792
6793 /* Scan the CFG and build a tree of OMP regions. Return the root of
6794 the OMP region tree. */
6795
6796 static void
6797 build_omp_regions (void)
6798 {
6799 gcc_assert (root_omp_region == NULL);
6800 calculate_dominance_info (CDI_DOMINATORS);
6801 build_omp_regions_1 (ENTRY_BLOCK_PTR, NULL, false);
6802 }
6803
6804 /* Main entry point for expanding OMP-GIMPLE into runtime calls. */
6805
6806 static unsigned int
6807 execute_expand_omp (void)
6808 {
6809 build_omp_regions ();
6810
6811 if (!root_omp_region)
6812 return 0;
6813
6814 if (dump_file)
6815 {
6816 fprintf (dump_file, "\nOMP region tree\n\n");
6817 dump_omp_region (dump_file, root_omp_region, 0);
6818 fprintf (dump_file, "\n");
6819 }
6820
6821 remove_exit_barriers (root_omp_region);
6822
6823 expand_omp (root_omp_region);
6824
6825 cleanup_tree_cfg ();
6826
6827 free_omp_regions ();
6828
6829 return 0;
6830 }
6831
6832 /* OMP expansion -- the default pass, run before creation of SSA form. */
6833
6834 static bool
6835 gate_expand_omp (void)
6836 {
6837 return (flag_openmp != 0 && !seen_error ());
6838 }
6839
6840 namespace {
6841
6842 const pass_data pass_data_expand_omp =
6843 {
6844 GIMPLE_PASS, /* type */
6845 "ompexp", /* name */
6846 OPTGROUP_NONE, /* optinfo_flags */
6847 true, /* has_gate */
6848 true, /* has_execute */
6849 TV_NONE, /* tv_id */
6850 PROP_gimple_any, /* properties_required */
6851 0, /* properties_provided */
6852 0, /* properties_destroyed */
6853 0, /* todo_flags_start */
6854 0, /* todo_flags_finish */
6855 };
6856
6857 class pass_expand_omp : public gimple_opt_pass
6858 {
6859 public:
6860 pass_expand_omp(gcc::context *ctxt)
6861 : gimple_opt_pass(pass_data_expand_omp, ctxt)
6862 {}
6863
6864 /* opt_pass methods: */
6865 bool gate () { return gate_expand_omp (); }
6866 unsigned int execute () { return execute_expand_omp (); }
6867
6868 }; // class pass_expand_omp
6869
6870 } // anon namespace
6871
6872 gimple_opt_pass *
6873 make_pass_expand_omp (gcc::context *ctxt)
6874 {
6875 return new pass_expand_omp (ctxt);
6876 }
6877 \f
6878 /* Routines to lower OpenMP directives into OMP-GIMPLE. */
6879
6880 /* Lower the OpenMP sections directive in the current statement in GSI_P.
6881 CTX is the enclosing OMP context for the current statement. */
6882
6883 static void
6884 lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6885 {
6886 tree block, control;
6887 gimple_stmt_iterator tgsi;
6888 gimple stmt, new_stmt, bind, t;
6889 gimple_seq ilist, dlist, olist, new_body;
6890 struct gimplify_ctx gctx;
6891
6892 stmt = gsi_stmt (*gsi_p);
6893
6894 push_gimplify_context (&gctx);
6895
6896 dlist = NULL;
6897 ilist = NULL;
6898 lower_rec_input_clauses (gimple_omp_sections_clauses (stmt),
6899 &ilist, &dlist, ctx);
6900
6901 new_body = gimple_omp_body (stmt);
6902 gimple_omp_set_body (stmt, NULL);
6903 tgsi = gsi_start (new_body);
6904 for (; !gsi_end_p (tgsi); gsi_next (&tgsi))
6905 {
6906 omp_context *sctx;
6907 gimple sec_start;
6908
6909 sec_start = gsi_stmt (tgsi);
6910 sctx = maybe_lookup_ctx (sec_start);
6911 gcc_assert (sctx);
6912
6913 lower_omp (gimple_omp_body_ptr (sec_start), sctx);
6914 gsi_insert_seq_after (&tgsi, gimple_omp_body (sec_start),
6915 GSI_CONTINUE_LINKING);
6916 gimple_omp_set_body (sec_start, NULL);
6917
6918 if (gsi_one_before_end_p (tgsi))
6919 {
6920 gimple_seq l = NULL;
6921 lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt), NULL,
6922 &l, ctx);
6923 gsi_insert_seq_after (&tgsi, l, GSI_CONTINUE_LINKING);
6924 gimple_omp_section_set_last (sec_start);
6925 }
6926
6927 gsi_insert_after (&tgsi, gimple_build_omp_return (false),
6928 GSI_CONTINUE_LINKING);
6929 }
6930
6931 block = make_node (BLOCK);
6932 bind = gimple_build_bind (NULL, new_body, block);
6933
6934 olist = NULL;
6935 lower_reduction_clauses (gimple_omp_sections_clauses (stmt), &olist, ctx);
6936
6937 block = make_node (BLOCK);
6938 new_stmt = gimple_build_bind (NULL, NULL, block);
6939 gsi_replace (gsi_p, new_stmt, true);
6940
6941 pop_gimplify_context (new_stmt);
6942 gimple_bind_append_vars (new_stmt, ctx->block_vars);
6943 BLOCK_VARS (block) = gimple_bind_vars (bind);
6944 if (BLOCK_VARS (block))
6945 TREE_USED (block) = 1;
6946
6947 new_body = NULL;
6948 gimple_seq_add_seq (&new_body, ilist);
6949 gimple_seq_add_stmt (&new_body, stmt);
6950 gimple_seq_add_stmt (&new_body, gimple_build_omp_sections_switch ());
6951 gimple_seq_add_stmt (&new_body, bind);
6952
6953 control = create_tmp_var (unsigned_type_node, ".section");
6954 t = gimple_build_omp_continue (control, control);
6955 gimple_omp_sections_set_control (stmt, control);
6956 gimple_seq_add_stmt (&new_body, t);
6957
6958 gimple_seq_add_seq (&new_body, olist);
6959 gimple_seq_add_seq (&new_body, dlist);
6960
6961 new_body = maybe_catch_exception (new_body);
6962
6963 t = gimple_build_omp_return
6964 (!!find_omp_clause (gimple_omp_sections_clauses (stmt),
6965 OMP_CLAUSE_NOWAIT));
6966 gimple_seq_add_stmt (&new_body, t);
6967
6968 gimple_bind_set_body (new_stmt, new_body);
6969 }
6970
6971
6972 /* A subroutine of lower_omp_single. Expand the simple form of
6973 a GIMPLE_OMP_SINGLE, without a copyprivate clause:
6974
6975 if (GOMP_single_start ())
6976 BODY;
6977 [ GOMP_barrier (); ] -> unless 'nowait' is present.
6978
6979 FIXME. It may be better to delay expanding the logic of this until
6980 pass_expand_omp. The expanded logic may make the job more difficult
6981 to a synchronization analysis pass. */
6982
6983 static void
6984 lower_omp_single_simple (gimple single_stmt, gimple_seq *pre_p)
6985 {
6986 location_t loc = gimple_location (single_stmt);
6987 tree tlabel = create_artificial_label (loc);
6988 tree flabel = create_artificial_label (loc);
6989 gimple call, cond;
6990 tree lhs, decl;
6991
6992 decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_START);
6993 lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (decl)), NULL);
6994 call = gimple_build_call (decl, 0);
6995 gimple_call_set_lhs (call, lhs);
6996 gimple_seq_add_stmt (pre_p, call);
6997
6998 cond = gimple_build_cond (EQ_EXPR, lhs,
6999 fold_convert_loc (loc, TREE_TYPE (lhs),
7000 boolean_true_node),
7001 tlabel, flabel);
7002 gimple_seq_add_stmt (pre_p, cond);
7003 gimple_seq_add_stmt (pre_p, gimple_build_label (tlabel));
7004 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
7005 gimple_seq_add_stmt (pre_p, gimple_build_label (flabel));
7006 }
7007
7008
7009 /* A subroutine of lower_omp_single. Expand the simple form of
7010 a GIMPLE_OMP_SINGLE, with a copyprivate clause:
7011
7012 #pragma omp single copyprivate (a, b, c)
7013
7014 Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
7015
7016 {
7017 if ((copyout_p = GOMP_single_copy_start ()) == NULL)
7018 {
7019 BODY;
7020 copyout.a = a;
7021 copyout.b = b;
7022 copyout.c = c;
7023 GOMP_single_copy_end (&copyout);
7024 }
7025 else
7026 {
7027 a = copyout_p->a;
7028 b = copyout_p->b;
7029 c = copyout_p->c;
7030 }
7031 GOMP_barrier ();
7032 }
7033
7034 FIXME. It may be better to delay expanding the logic of this until
7035 pass_expand_omp. The expanded logic may make the job more difficult
7036 to a synchronization analysis pass. */
7037
7038 static void
7039 lower_omp_single_copy (gimple single_stmt, gimple_seq *pre_p, omp_context *ctx)
7040 {
7041 tree ptr_type, t, l0, l1, l2, bfn_decl;
7042 gimple_seq copyin_seq;
7043 location_t loc = gimple_location (single_stmt);
7044
7045 ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
7046
7047 ptr_type = build_pointer_type (ctx->record_type);
7048 ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
7049
7050 l0 = create_artificial_label (loc);
7051 l1 = create_artificial_label (loc);
7052 l2 = create_artificial_label (loc);
7053
7054 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_START);
7055 t = build_call_expr_loc (loc, bfn_decl, 0);
7056 t = fold_convert_loc (loc, ptr_type, t);
7057 gimplify_assign (ctx->receiver_decl, t, pre_p);
7058
7059 t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
7060 build_int_cst (ptr_type, 0));
7061 t = build3 (COND_EXPR, void_type_node, t,
7062 build_and_jump (&l0), build_and_jump (&l1));
7063 gimplify_and_add (t, pre_p);
7064
7065 gimple_seq_add_stmt (pre_p, gimple_build_label (l0));
7066
7067 gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
7068
7069 copyin_seq = NULL;
7070 lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt), pre_p,
7071 &copyin_seq, ctx);
7072
7073 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
7074 bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_END);
7075 t = build_call_expr_loc (loc, bfn_decl, 1, t);
7076 gimplify_and_add (t, pre_p);
7077
7078 t = build_and_jump (&l2);
7079 gimplify_and_add (t, pre_p);
7080
7081 gimple_seq_add_stmt (pre_p, gimple_build_label (l1));
7082
7083 gimple_seq_add_seq (pre_p, copyin_seq);
7084
7085 gimple_seq_add_stmt (pre_p, gimple_build_label (l2));
7086 }
7087
7088
7089 /* Expand code for an OpenMP single directive. */
7090
7091 static void
7092 lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
7093 {
7094 tree block;
7095 gimple t, bind, single_stmt = gsi_stmt (*gsi_p);
7096 gimple_seq bind_body, dlist;
7097 struct gimplify_ctx gctx;
7098
7099 push_gimplify_context (&gctx);
7100
7101 block = make_node (BLOCK);
7102 bind = gimple_build_bind (NULL, NULL, block);
7103 gsi_replace (gsi_p, bind, true);
7104 bind_body = NULL;
7105 dlist = NULL;
7106 lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt),
7107 &bind_body, &dlist, ctx);
7108 lower_omp (gimple_omp_body_ptr (single_stmt), ctx);
7109
7110 gimple_seq_add_stmt (&bind_body, single_stmt);
7111
7112 if (ctx->record_type)
7113 lower_omp_single_copy (single_stmt, &bind_body, ctx);
7114 else
7115 lower_omp_single_simple (single_stmt, &bind_body);
7116
7117 gimple_omp_set_body (single_stmt, NULL);
7118
7119 gimple_seq_add_seq (&bind_body, dlist);
7120
7121 bind_body = maybe_catch_exception (bind_body);
7122
7123 t = gimple_build_omp_return
7124 (!!find_omp_clause (gimple_omp_single_clauses (single_stmt),
7125 OMP_CLAUSE_NOWAIT));
7126 gimple_seq_add_stmt (&bind_body, t);
7127 gimple_bind_set_body (bind, bind_body);
7128
7129 pop_gimplify_context (bind);
7130
7131 gimple_bind_append_vars (bind, ctx->block_vars);
7132 BLOCK_VARS (block) = ctx->block_vars;
7133 if (BLOCK_VARS (block))
7134 TREE_USED (block) = 1;
7135 }
7136
7137
7138 /* Expand code for an OpenMP master directive. */
7139
7140 static void
7141 lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
7142 {
7143 tree block, lab = NULL, x, bfn_decl;
7144 gimple stmt = gsi_stmt (*gsi_p), bind;
7145 location_t loc = gimple_location (stmt);
7146 gimple_seq tseq;
7147 struct gimplify_ctx gctx;
7148
7149 push_gimplify_context (&gctx);
7150
7151 block = make_node (BLOCK);
7152 bind = gimple_build_bind (NULL, NULL, block);
7153 gsi_replace (gsi_p, bind, true);
7154 gimple_bind_add_stmt (bind, stmt);
7155
7156 bfn_decl = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
7157 x = build_call_expr_loc (loc, bfn_decl, 0);
7158 x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
7159 x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
7160 tseq = NULL;
7161 gimplify_and_add (x, &tseq);
7162 gimple_bind_add_seq (bind, tseq);
7163
7164 lower_omp (gimple_omp_body_ptr (stmt), ctx);
7165 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
7166 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
7167 gimple_omp_set_body (stmt, NULL);
7168
7169 gimple_bind_add_stmt (bind, gimple_build_label (lab));
7170
7171 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
7172
7173 pop_gimplify_context (bind);
7174
7175 gimple_bind_append_vars (bind, ctx->block_vars);
7176 BLOCK_VARS (block) = ctx->block_vars;
7177 }
7178
7179
7180 /* Expand code for an OpenMP ordered directive. */
7181
7182 static void
7183 lower_omp_ordered (gimple_stmt_iterator *gsi_p, omp_context *ctx)
7184 {
7185 tree block;
7186 gimple stmt = gsi_stmt (*gsi_p), bind, x;
7187 struct gimplify_ctx gctx;
7188
7189 push_gimplify_context (&gctx);
7190
7191 block = make_node (BLOCK);
7192 bind = gimple_build_bind (NULL, NULL, block);
7193 gsi_replace (gsi_p, bind, true);
7194 gimple_bind_add_stmt (bind, stmt);
7195
7196 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_START),
7197 0);
7198 gimple_bind_add_stmt (bind, x);
7199
7200 lower_omp (gimple_omp_body_ptr (stmt), ctx);
7201 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
7202 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
7203 gimple_omp_set_body (stmt, NULL);
7204
7205 x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_END), 0);
7206 gimple_bind_add_stmt (bind, x);
7207
7208 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
7209
7210 pop_gimplify_context (bind);
7211
7212 gimple_bind_append_vars (bind, ctx->block_vars);
7213 BLOCK_VARS (block) = gimple_bind_vars (bind);
7214 }
7215
7216
7217 /* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
7218 substitution of a couple of function calls. But in the NAMED case,
7219 requires that languages coordinate a symbol name. It is therefore
7220 best put here in common code. */
7221
7222 static GTY((param1_is (tree), param2_is (tree)))
7223 splay_tree critical_name_mutexes;
7224
7225 static void
7226 lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx)
7227 {
7228 tree block;
7229 tree name, lock, unlock;
7230 gimple stmt = gsi_stmt (*gsi_p), bind;
7231 location_t loc = gimple_location (stmt);
7232 gimple_seq tbody;
7233 struct gimplify_ctx gctx;
7234
7235 name = gimple_omp_critical_name (stmt);
7236 if (name)
7237 {
7238 tree decl;
7239 splay_tree_node n;
7240
7241 if (!critical_name_mutexes)
7242 critical_name_mutexes
7243 = splay_tree_new_ggc (splay_tree_compare_pointers,
7244 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_s,
7245 ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_node_s);
7246
7247 n = splay_tree_lookup (critical_name_mutexes, (splay_tree_key) name);
7248 if (n == NULL)
7249 {
7250 char *new_str;
7251
7252 decl = create_tmp_var_raw (ptr_type_node, NULL);
7253
7254 new_str = ACONCAT ((".gomp_critical_user_",
7255 IDENTIFIER_POINTER (name), NULL));
7256 DECL_NAME (decl) = get_identifier (new_str);
7257 TREE_PUBLIC (decl) = 1;
7258 TREE_STATIC (decl) = 1;
7259 DECL_COMMON (decl) = 1;
7260 DECL_ARTIFICIAL (decl) = 1;
7261 DECL_IGNORED_P (decl) = 1;
7262 varpool_finalize_decl (decl);
7263
7264 splay_tree_insert (critical_name_mutexes, (splay_tree_key) name,
7265 (splay_tree_value) decl);
7266 }
7267 else
7268 decl = (tree) n->value;
7269
7270 lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_START);
7271 lock = build_call_expr_loc (loc, lock, 1, build_fold_addr_expr_loc (loc, decl));
7272
7273 unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_END);
7274 unlock = build_call_expr_loc (loc, unlock, 1,
7275 build_fold_addr_expr_loc (loc, decl));
7276 }
7277 else
7278 {
7279 lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_START);
7280 lock = build_call_expr_loc (loc, lock, 0);
7281
7282 unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_END);
7283 unlock = build_call_expr_loc (loc, unlock, 0);
7284 }
7285
7286 push_gimplify_context (&gctx);
7287
7288 block = make_node (BLOCK);
7289 bind = gimple_build_bind (NULL, NULL, block);
7290 gsi_replace (gsi_p, bind, true);
7291 gimple_bind_add_stmt (bind, stmt);
7292
7293 tbody = gimple_bind_body (bind);
7294 gimplify_and_add (lock, &tbody);
7295 gimple_bind_set_body (bind, tbody);
7296
7297 lower_omp (gimple_omp_body_ptr (stmt), ctx);
7298 gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
7299 gimple_bind_add_seq (bind, gimple_omp_body (stmt));
7300 gimple_omp_set_body (stmt, NULL);
7301
7302 tbody = gimple_bind_body (bind);
7303 gimplify_and_add (unlock, &tbody);
7304 gimple_bind_set_body (bind, tbody);
7305
7306 gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
7307
7308 pop_gimplify_context (bind);
7309 gimple_bind_append_vars (bind, ctx->block_vars);
7310 BLOCK_VARS (block) = gimple_bind_vars (bind);
7311 }
7312
7313
7314 /* A subroutine of lower_omp_for. Generate code to emit the predicate
7315 for a lastprivate clause. Given a loop control predicate of (V
7316 cond N2), we gate the clause on (!(V cond N2)). The lowered form
7317 is appended to *DLIST, iterator initialization is appended to
7318 *BODY_P. */
7319
7320 static void
7321 lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p,
7322 gimple_seq *dlist, struct omp_context *ctx)
7323 {
7324 tree clauses, cond, vinit;
7325 enum tree_code cond_code;
7326 gimple_seq stmts;
7327
7328 cond_code = fd->loop.cond_code;
7329 cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
7330
7331 /* When possible, use a strict equality expression. This can let VRP
7332 type optimizations deduce the value and remove a copy. */
7333 if (host_integerp (fd->loop.step, 0))
7334 {
7335 HOST_WIDE_INT step = TREE_INT_CST_LOW (fd->loop.step);
7336 if (step == 1 || step == -1)
7337 cond_code = EQ_EXPR;
7338 }
7339
7340 cond = build2 (cond_code, boolean_type_node, fd->loop.v, fd->loop.n2);
7341
7342 clauses = gimple_omp_for_clauses (fd->for_stmt);
7343 stmts = NULL;
7344 lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
7345 if (!gimple_seq_empty_p (stmts))
7346 {
7347 gimple_seq_add_seq (&stmts, *dlist);
7348 *dlist = stmts;
7349
7350 /* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
7351 vinit = fd->loop.n1;
7352 if (cond_code == EQ_EXPR
7353 && host_integerp (fd->loop.n2, 0)
7354 && ! integer_zerop (fd->loop.n2))
7355 vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0);
7356 else
7357 vinit = unshare_expr (vinit);
7358
7359 /* Initialize the iterator variable, so that threads that don't execute
7360 any iterations don't execute the lastprivate clauses by accident. */
7361 gimplify_assign (fd->loop.v, vinit, body_p);
7362 }
7363 }
7364
7365
7366 /* Lower code for an OpenMP loop directive. */
7367
7368 static void
7369 lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
7370 {
7371 tree *rhs_p, block;
7372 struct omp_for_data fd;
7373 gimple stmt = gsi_stmt (*gsi_p), new_stmt;
7374 gimple_seq omp_for_body, body, dlist;
7375 size_t i;
7376 struct gimplify_ctx gctx;
7377
7378 push_gimplify_context (&gctx);
7379
7380 lower_omp (gimple_omp_for_pre_body_ptr (stmt), ctx);
7381
7382 block = make_node (BLOCK);
7383 new_stmt = gimple_build_bind (NULL, NULL, block);
7384 /* Replace at gsi right away, so that 'stmt' is no member
7385 of a sequence anymore as we're going to add to to a different
7386 one below. */
7387 gsi_replace (gsi_p, new_stmt, true);
7388
7389 /* Move declaration of temporaries in the loop body before we make
7390 it go away. */
7391 omp_for_body = gimple_omp_body (stmt);
7392 if (!gimple_seq_empty_p (omp_for_body)
7393 && gimple_code (gimple_seq_first_stmt (omp_for_body)) == GIMPLE_BIND)
7394 {
7395 tree vars = gimple_bind_vars (gimple_seq_first_stmt (omp_for_body));
7396 gimple_bind_append_vars (new_stmt, vars);
7397 }
7398
7399 /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
7400 dlist = NULL;
7401 body = NULL;
7402 lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx);
7403 gimple_seq_add_seq (&body, gimple_omp_for_pre_body (stmt));
7404
7405 lower_omp (gimple_omp_body_ptr (stmt), ctx);
7406
7407 /* Lower the header expressions. At this point, we can assume that
7408 the header is of the form:
7409
7410 #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
7411
7412 We just need to make sure that VAL1, VAL2 and VAL3 are lowered
7413 using the .omp_data_s mapping, if needed. */
7414 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
7415 {
7416 rhs_p = gimple_omp_for_initial_ptr (stmt, i);
7417 if (!is_gimple_min_invariant (*rhs_p))
7418 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
7419
7420 rhs_p = gimple_omp_for_final_ptr (stmt, i);
7421 if (!is_gimple_min_invariant (*rhs_p))
7422 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
7423
7424 rhs_p = &TREE_OPERAND (gimple_omp_for_incr (stmt, i), 1);
7425 if (!is_gimple_min_invariant (*rhs_p))
7426 *rhs_p = get_formal_tmp_var (*rhs_p, &body);
7427 }
7428
7429 /* Once lowered, extract the bounds and clauses. */
7430 extract_omp_for_data (stmt, &fd, NULL);
7431
7432 lower_omp_for_lastprivate (&fd, &body, &dlist, ctx);
7433
7434 gimple_seq_add_stmt (&body, stmt);
7435 gimple_seq_add_seq (&body, gimple_omp_body (stmt));
7436
7437 gimple_seq_add_stmt (&body, gimple_build_omp_continue (fd.loop.v,
7438 fd.loop.v));
7439
7440 /* After the loop, add exit clauses. */
7441 lower_reduction_clauses (gimple_omp_for_clauses (stmt), &body, ctx);
7442 gimple_seq_add_seq (&body, dlist);
7443
7444 body = maybe_catch_exception (body);
7445
7446 /* Region exit marker goes at the end of the loop body. */
7447 gimple_seq_add_stmt (&body, gimple_build_omp_return (fd.have_nowait));
7448
7449 pop_gimplify_context (new_stmt);
7450
7451 gimple_bind_append_vars (new_stmt, ctx->block_vars);
7452 BLOCK_VARS (block) = gimple_bind_vars (new_stmt);
7453 if (BLOCK_VARS (block))
7454 TREE_USED (block) = 1;
7455
7456 gimple_bind_set_body (new_stmt, body);
7457 gimple_omp_set_body (stmt, NULL);
7458 gimple_omp_for_set_pre_body (stmt, NULL);
7459 }
7460
7461 /* Callback for walk_stmts. Check if the current statement only contains
7462 GIMPLE_OMP_FOR or GIMPLE_OMP_PARALLEL. */
7463
7464 static tree
7465 check_combined_parallel (gimple_stmt_iterator *gsi_p,
7466 bool *handled_ops_p,
7467 struct walk_stmt_info *wi)
7468 {
7469 int *info = (int *) wi->info;
7470 gimple stmt = gsi_stmt (*gsi_p);
7471
7472 *handled_ops_p = true;
7473 switch (gimple_code (stmt))
7474 {
7475 WALK_SUBSTMTS;
7476
7477 case GIMPLE_OMP_FOR:
7478 case GIMPLE_OMP_SECTIONS:
7479 *info = *info == 0 ? 1 : -1;
7480 break;
7481 default:
7482 *info = -1;
7483 break;
7484 }
7485 return NULL;
7486 }
7487
7488 struct omp_taskcopy_context
7489 {
7490 /* This field must be at the beginning, as we do "inheritance": Some
7491 callback functions for tree-inline.c (e.g., omp_copy_decl)
7492 receive a copy_body_data pointer that is up-casted to an
7493 omp_context pointer. */
7494 copy_body_data cb;
7495 omp_context *ctx;
7496 };
7497
7498 static tree
7499 task_copyfn_copy_decl (tree var, copy_body_data *cb)
7500 {
7501 struct omp_taskcopy_context *tcctx = (struct omp_taskcopy_context *) cb;
7502
7503 if (splay_tree_lookup (tcctx->ctx->sfield_map, (splay_tree_key) var))
7504 return create_tmp_var (TREE_TYPE (var), NULL);
7505
7506 return var;
7507 }
7508
7509 static tree
7510 task_copyfn_remap_type (struct omp_taskcopy_context *tcctx, tree orig_type)
7511 {
7512 tree name, new_fields = NULL, type, f;
7513
7514 type = lang_hooks.types.make_type (RECORD_TYPE);
7515 name = DECL_NAME (TYPE_NAME (orig_type));
7516 name = build_decl (gimple_location (tcctx->ctx->stmt),
7517 TYPE_DECL, name, type);
7518 TYPE_NAME (type) = name;
7519
7520 for (f = TYPE_FIELDS (orig_type); f ; f = TREE_CHAIN (f))
7521 {
7522 tree new_f = copy_node (f);
7523 DECL_CONTEXT (new_f) = type;
7524 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &tcctx->cb);
7525 TREE_CHAIN (new_f) = new_fields;
7526 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &tcctx->cb, NULL);
7527 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, &tcctx->cb, NULL);
7528 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
7529 &tcctx->cb, NULL);
7530 new_fields = new_f;
7531 *pointer_map_insert (tcctx->cb.decl_map, f) = new_f;
7532 }
7533 TYPE_FIELDS (type) = nreverse (new_fields);
7534 layout_type (type);
7535 return type;
7536 }
7537
7538 /* Create task copyfn. */
7539
7540 static void
7541 create_task_copyfn (gimple task_stmt, omp_context *ctx)
7542 {
7543 struct function *child_cfun;
7544 tree child_fn, t, c, src, dst, f, sf, arg, sarg, decl;
7545 tree record_type, srecord_type, bind, list;
7546 bool record_needs_remap = false, srecord_needs_remap = false;
7547 splay_tree_node n;
7548 struct omp_taskcopy_context tcctx;
7549 struct gimplify_ctx gctx;
7550 location_t loc = gimple_location (task_stmt);
7551
7552 child_fn = gimple_omp_task_copy_fn (task_stmt);
7553 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
7554 gcc_assert (child_cfun->cfg == NULL);
7555 DECL_SAVED_TREE (child_fn) = alloc_stmt_list ();
7556
7557 /* Reset DECL_CONTEXT on function arguments. */
7558 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
7559 DECL_CONTEXT (t) = child_fn;
7560
7561 /* Populate the function. */
7562 push_gimplify_context (&gctx);
7563 push_cfun (child_cfun);
7564
7565 bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
7566 TREE_SIDE_EFFECTS (bind) = 1;
7567 list = NULL;
7568 DECL_SAVED_TREE (child_fn) = bind;
7569 DECL_SOURCE_LOCATION (child_fn) = gimple_location (task_stmt);
7570
7571 /* Remap src and dst argument types if needed. */
7572 record_type = ctx->record_type;
7573 srecord_type = ctx->srecord_type;
7574 for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
7575 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
7576 {
7577 record_needs_remap = true;
7578 break;
7579 }
7580 for (f = TYPE_FIELDS (srecord_type); f ; f = DECL_CHAIN (f))
7581 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
7582 {
7583 srecord_needs_remap = true;
7584 break;
7585 }
7586
7587 if (record_needs_remap || srecord_needs_remap)
7588 {
7589 memset (&tcctx, '\0', sizeof (tcctx));
7590 tcctx.cb.src_fn = ctx->cb.src_fn;
7591 tcctx.cb.dst_fn = child_fn;
7592 tcctx.cb.src_node = cgraph_get_node (tcctx.cb.src_fn);
7593 gcc_checking_assert (tcctx.cb.src_node);
7594 tcctx.cb.dst_node = tcctx.cb.src_node;
7595 tcctx.cb.src_cfun = ctx->cb.src_cfun;
7596 tcctx.cb.copy_decl = task_copyfn_copy_decl;
7597 tcctx.cb.eh_lp_nr = 0;
7598 tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE;
7599 tcctx.cb.decl_map = pointer_map_create ();
7600 tcctx.ctx = ctx;
7601
7602 if (record_needs_remap)
7603 record_type = task_copyfn_remap_type (&tcctx, record_type);
7604 if (srecord_needs_remap)
7605 srecord_type = task_copyfn_remap_type (&tcctx, srecord_type);
7606 }
7607 else
7608 tcctx.cb.decl_map = NULL;
7609
7610 arg = DECL_ARGUMENTS (child_fn);
7611 TREE_TYPE (arg) = build_pointer_type (record_type);
7612 sarg = DECL_CHAIN (arg);
7613 TREE_TYPE (sarg) = build_pointer_type (srecord_type);
7614
7615 /* First pass: initialize temporaries used in record_type and srecord_type
7616 sizes and field offsets. */
7617 if (tcctx.cb.decl_map)
7618 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
7619 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
7620 {
7621 tree *p;
7622
7623 decl = OMP_CLAUSE_DECL (c);
7624 p = (tree *) pointer_map_contains (tcctx.cb.decl_map, decl);
7625 if (p == NULL)
7626 continue;
7627 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
7628 sf = (tree) n->value;
7629 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
7630 src = build_simple_mem_ref_loc (loc, sarg);
7631 src = omp_build_component_ref (src, sf);
7632 t = build2 (MODIFY_EXPR, TREE_TYPE (*p), *p, src);
7633 append_to_statement_list (t, &list);
7634 }
7635
7636 /* Second pass: copy shared var pointers and copy construct non-VLA
7637 firstprivate vars. */
7638 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
7639 switch (OMP_CLAUSE_CODE (c))
7640 {
7641 case OMP_CLAUSE_SHARED:
7642 decl = OMP_CLAUSE_DECL (c);
7643 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
7644 if (n == NULL)
7645 break;
7646 f = (tree) n->value;
7647 if (tcctx.cb.decl_map)
7648 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
7649 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
7650 sf = (tree) n->value;
7651 if (tcctx.cb.decl_map)
7652 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
7653 src = build_simple_mem_ref_loc (loc, sarg);
7654 src = omp_build_component_ref (src, sf);
7655 dst = build_simple_mem_ref_loc (loc, arg);
7656 dst = omp_build_component_ref (dst, f);
7657 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
7658 append_to_statement_list (t, &list);
7659 break;
7660 case OMP_CLAUSE_FIRSTPRIVATE:
7661 decl = OMP_CLAUSE_DECL (c);
7662 if (is_variable_sized (decl))
7663 break;
7664 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
7665 if (n == NULL)
7666 break;
7667 f = (tree) n->value;
7668 if (tcctx.cb.decl_map)
7669 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
7670 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
7671 if (n != NULL)
7672 {
7673 sf = (tree) n->value;
7674 if (tcctx.cb.decl_map)
7675 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
7676 src = build_simple_mem_ref_loc (loc, sarg);
7677 src = omp_build_component_ref (src, sf);
7678 if (use_pointer_for_field (decl, NULL) || is_reference (decl))
7679 src = build_simple_mem_ref_loc (loc, src);
7680 }
7681 else
7682 src = decl;
7683 dst = build_simple_mem_ref_loc (loc, arg);
7684 dst = omp_build_component_ref (dst, f);
7685 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
7686 append_to_statement_list (t, &list);
7687 break;
7688 case OMP_CLAUSE_PRIVATE:
7689 if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c))
7690 break;
7691 decl = OMP_CLAUSE_DECL (c);
7692 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
7693 f = (tree) n->value;
7694 if (tcctx.cb.decl_map)
7695 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
7696 n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
7697 if (n != NULL)
7698 {
7699 sf = (tree) n->value;
7700 if (tcctx.cb.decl_map)
7701 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
7702 src = build_simple_mem_ref_loc (loc, sarg);
7703 src = omp_build_component_ref (src, sf);
7704 if (use_pointer_for_field (decl, NULL))
7705 src = build_simple_mem_ref_loc (loc, src);
7706 }
7707 else
7708 src = decl;
7709 dst = build_simple_mem_ref_loc (loc, arg);
7710 dst = omp_build_component_ref (dst, f);
7711 t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
7712 append_to_statement_list (t, &list);
7713 break;
7714 default:
7715 break;
7716 }
7717
7718 /* Last pass: handle VLA firstprivates. */
7719 if (tcctx.cb.decl_map)
7720 for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
7721 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
7722 {
7723 tree ind, ptr, df;
7724
7725 decl = OMP_CLAUSE_DECL (c);
7726 if (!is_variable_sized (decl))
7727 continue;
7728 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
7729 if (n == NULL)
7730 continue;
7731 f = (tree) n->value;
7732 f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
7733 gcc_assert (DECL_HAS_VALUE_EXPR_P (decl));
7734 ind = DECL_VALUE_EXPR (decl);
7735 gcc_assert (TREE_CODE (ind) == INDIRECT_REF);
7736 gcc_assert (DECL_P (TREE_OPERAND (ind, 0)));
7737 n = splay_tree_lookup (ctx->sfield_map,
7738 (splay_tree_key) TREE_OPERAND (ind, 0));
7739 sf = (tree) n->value;
7740 sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
7741 src = build_simple_mem_ref_loc (loc, sarg);
7742 src = omp_build_component_ref (src, sf);
7743 src = build_simple_mem_ref_loc (loc, src);
7744 dst = build_simple_mem_ref_loc (loc, arg);
7745 dst = omp_build_component_ref (dst, f);
7746 t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
7747 append_to_statement_list (t, &list);
7748 n = splay_tree_lookup (ctx->field_map,
7749 (splay_tree_key) TREE_OPERAND (ind, 0));
7750 df = (tree) n->value;
7751 df = *(tree *) pointer_map_contains (tcctx.cb.decl_map, df);
7752 ptr = build_simple_mem_ref_loc (loc, arg);
7753 ptr = omp_build_component_ref (ptr, df);
7754 t = build2 (MODIFY_EXPR, TREE_TYPE (ptr), ptr,
7755 build_fold_addr_expr_loc (loc, dst));
7756 append_to_statement_list (t, &list);
7757 }
7758
7759 t = build1 (RETURN_EXPR, void_type_node, NULL);
7760 append_to_statement_list (t, &list);
7761
7762 if (tcctx.cb.decl_map)
7763 pointer_map_destroy (tcctx.cb.decl_map);
7764 pop_gimplify_context (NULL);
7765 BIND_EXPR_BODY (bind) = list;
7766 pop_cfun ();
7767 }
7768
7769 /* Lower the OpenMP parallel or task directive in the current statement
7770 in GSI_P. CTX holds context information for the directive. */
7771
7772 static void
7773 lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
7774 {
7775 tree clauses;
7776 tree child_fn, t;
7777 gimple stmt = gsi_stmt (*gsi_p);
7778 gimple par_bind, bind;
7779 gimple_seq par_body, olist, ilist, par_olist, par_ilist, new_body;
7780 struct gimplify_ctx gctx;
7781 location_t loc = gimple_location (stmt);
7782
7783 clauses = gimple_omp_taskreg_clauses (stmt);
7784 par_bind = gimple_seq_first_stmt (gimple_omp_body (stmt));
7785 par_body = gimple_bind_body (par_bind);
7786 child_fn = ctx->cb.dst_fn;
7787 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
7788 && !gimple_omp_parallel_combined_p (stmt))
7789 {
7790 struct walk_stmt_info wi;
7791 int ws_num = 0;
7792
7793 memset (&wi, 0, sizeof (wi));
7794 wi.info = &ws_num;
7795 wi.val_only = true;
7796 walk_gimple_seq (par_body, check_combined_parallel, NULL, &wi);
7797 if (ws_num == 1)
7798 gimple_omp_parallel_set_combined_p (stmt, true);
7799 }
7800 if (ctx->srecord_type)
7801 create_task_copyfn (stmt, ctx);
7802
7803 push_gimplify_context (&gctx);
7804
7805 par_olist = NULL;
7806 par_ilist = NULL;
7807 lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx);
7808 lower_omp (&par_body, ctx);
7809 if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL)
7810 lower_reduction_clauses (clauses, &par_olist, ctx);
7811
7812 /* Declare all the variables created by mapping and the variables
7813 declared in the scope of the parallel body. */
7814 record_vars_into (ctx->block_vars, child_fn);
7815 record_vars_into (gimple_bind_vars (par_bind), child_fn);
7816
7817 if (ctx->record_type)
7818 {
7819 ctx->sender_decl
7820 = create_tmp_var (ctx->srecord_type ? ctx->srecord_type
7821 : ctx->record_type, ".omp_data_o");
7822 DECL_NAMELESS (ctx->sender_decl) = 1;
7823 TREE_ADDRESSABLE (ctx->sender_decl) = 1;
7824 gimple_omp_taskreg_set_data_arg (stmt, ctx->sender_decl);
7825 }
7826
7827 olist = NULL;
7828 ilist = NULL;
7829 lower_send_clauses (clauses, &ilist, &olist, ctx);
7830 lower_send_shared_vars (&ilist, &olist, ctx);
7831
7832 /* Once all the expansions are done, sequence all the different
7833 fragments inside gimple_omp_body. */
7834
7835 new_body = NULL;
7836
7837 if (ctx->record_type)
7838 {
7839 t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
7840 /* fixup_child_record_type might have changed receiver_decl's type. */
7841 t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
7842 gimple_seq_add_stmt (&new_body,
7843 gimple_build_assign (ctx->receiver_decl, t));
7844 }
7845
7846 gimple_seq_add_seq (&new_body, par_ilist);
7847 gimple_seq_add_seq (&new_body, par_body);
7848 gimple_seq_add_seq (&new_body, par_olist);
7849 new_body = maybe_catch_exception (new_body);
7850 gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
7851 gimple_omp_set_body (stmt, new_body);
7852
7853 bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
7854 gsi_replace (gsi_p, bind, true);
7855 gimple_bind_add_seq (bind, ilist);
7856 gimple_bind_add_stmt (bind, stmt);
7857 gimple_bind_add_seq (bind, olist);
7858
7859 pop_gimplify_context (NULL);
7860 }
7861
7862 /* Callback for lower_omp_1. Return non-NULL if *tp needs to be
7863 regimplified. If DATA is non-NULL, lower_omp_1 is outside
7864 of OpenMP context, but with task_shared_vars set. */
7865
7866 static tree
7867 lower_omp_regimplify_p (tree *tp, int *walk_subtrees,
7868 void *data)
7869 {
7870 tree t = *tp;
7871
7872 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
7873 if (TREE_CODE (t) == VAR_DECL && data == NULL && DECL_HAS_VALUE_EXPR_P (t))
7874 return t;
7875
7876 if (task_shared_vars
7877 && DECL_P (t)
7878 && bitmap_bit_p (task_shared_vars, DECL_UID (t)))
7879 return t;
7880
7881 /* If a global variable has been privatized, TREE_CONSTANT on
7882 ADDR_EXPR might be wrong. */
7883 if (data == NULL && TREE_CODE (t) == ADDR_EXPR)
7884 recompute_tree_invariant_for_addr_expr (t);
7885
7886 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
7887 return NULL_TREE;
7888 }
7889
7890 static void
7891 lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
7892 {
7893 gimple stmt = gsi_stmt (*gsi_p);
7894 struct walk_stmt_info wi;
7895
7896 if (gimple_has_location (stmt))
7897 input_location = gimple_location (stmt);
7898
7899 if (task_shared_vars)
7900 memset (&wi, '\0', sizeof (wi));
7901
7902 /* If we have issued syntax errors, avoid doing any heavy lifting.
7903 Just replace the OpenMP directives with a NOP to avoid
7904 confusing RTL expansion. */
7905 if (seen_error () && is_gimple_omp (stmt))
7906 {
7907 gsi_replace (gsi_p, gimple_build_nop (), true);
7908 return;
7909 }
7910
7911 switch (gimple_code (stmt))
7912 {
7913 case GIMPLE_COND:
7914 if ((ctx || task_shared_vars)
7915 && (walk_tree (gimple_cond_lhs_ptr (stmt), lower_omp_regimplify_p,
7916 ctx ? NULL : &wi, NULL)
7917 || walk_tree (gimple_cond_rhs_ptr (stmt), lower_omp_regimplify_p,
7918 ctx ? NULL : &wi, NULL)))
7919 gimple_regimplify_operands (stmt, gsi_p);
7920 break;
7921 case GIMPLE_CATCH:
7922 lower_omp (gimple_catch_handler_ptr (stmt), ctx);
7923 break;
7924 case GIMPLE_EH_FILTER:
7925 lower_omp (gimple_eh_filter_failure_ptr (stmt), ctx);
7926 break;
7927 case GIMPLE_TRY:
7928 lower_omp (gimple_try_eval_ptr (stmt), ctx);
7929 lower_omp (gimple_try_cleanup_ptr (stmt), ctx);
7930 break;
7931 case GIMPLE_TRANSACTION:
7932 lower_omp (gimple_transaction_body_ptr (stmt), ctx);
7933 break;
7934 case GIMPLE_BIND:
7935 lower_omp (gimple_bind_body_ptr (stmt), ctx);
7936 break;
7937 case GIMPLE_OMP_PARALLEL:
7938 case GIMPLE_OMP_TASK:
7939 ctx = maybe_lookup_ctx (stmt);
7940 lower_omp_taskreg (gsi_p, ctx);
7941 break;
7942 case GIMPLE_OMP_FOR:
7943 ctx = maybe_lookup_ctx (stmt);
7944 gcc_assert (ctx);
7945 lower_omp_for (gsi_p, ctx);
7946 break;
7947 case GIMPLE_OMP_SECTIONS:
7948 ctx = maybe_lookup_ctx (stmt);
7949 gcc_assert (ctx);
7950 lower_omp_sections (gsi_p, ctx);
7951 break;
7952 case GIMPLE_OMP_SINGLE:
7953 ctx = maybe_lookup_ctx (stmt);
7954 gcc_assert (ctx);
7955 lower_omp_single (gsi_p, ctx);
7956 break;
7957 case GIMPLE_OMP_MASTER:
7958 ctx = maybe_lookup_ctx (stmt);
7959 gcc_assert (ctx);
7960 lower_omp_master (gsi_p, ctx);
7961 break;
7962 case GIMPLE_OMP_ORDERED:
7963 ctx = maybe_lookup_ctx (stmt);
7964 gcc_assert (ctx);
7965 lower_omp_ordered (gsi_p, ctx);
7966 break;
7967 case GIMPLE_OMP_CRITICAL:
7968 ctx = maybe_lookup_ctx (stmt);
7969 gcc_assert (ctx);
7970 lower_omp_critical (gsi_p, ctx);
7971 break;
7972 case GIMPLE_OMP_ATOMIC_LOAD:
7973 if ((ctx || task_shared_vars)
7974 && walk_tree (gimple_omp_atomic_load_rhs_ptr (stmt),
7975 lower_omp_regimplify_p, ctx ? NULL : &wi, NULL))
7976 gimple_regimplify_operands (stmt, gsi_p);
7977 break;
7978 default:
7979 if ((ctx || task_shared_vars)
7980 && walk_gimple_op (stmt, lower_omp_regimplify_p,
7981 ctx ? NULL : &wi))
7982 gimple_regimplify_operands (stmt, gsi_p);
7983 break;
7984 }
7985 }
7986
7987 static void
7988 lower_omp (gimple_seq *body, omp_context *ctx)
7989 {
7990 location_t saved_location = input_location;
7991 gimple_stmt_iterator gsi;
7992 for (gsi = gsi_start (*body); !gsi_end_p (gsi); gsi_next (&gsi))
7993 lower_omp_1 (&gsi, ctx);
7994 input_location = saved_location;
7995 }
7996 \f
7997 /* Main entry point. */
7998
7999 static unsigned int
8000 execute_lower_omp (void)
8001 {
8002 gimple_seq body;
8003
8004 /* This pass always runs, to provide PROP_gimple_lomp.
8005 But there is nothing to do unless -fopenmp is given. */
8006 if (flag_openmp == 0)
8007 return 0;
8008
8009 all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
8010 delete_omp_context);
8011
8012 body = gimple_body (current_function_decl);
8013 scan_omp (&body, NULL);
8014 gcc_assert (taskreg_nesting_level == 0);
8015
8016 if (all_contexts->root)
8017 {
8018 struct gimplify_ctx gctx;
8019
8020 if (task_shared_vars)
8021 push_gimplify_context (&gctx);
8022 lower_omp (&body, NULL);
8023 if (task_shared_vars)
8024 pop_gimplify_context (NULL);
8025 }
8026
8027 if (all_contexts)
8028 {
8029 splay_tree_delete (all_contexts);
8030 all_contexts = NULL;
8031 }
8032 BITMAP_FREE (task_shared_vars);
8033 return 0;
8034 }
8035
8036 namespace {
8037
8038 const pass_data pass_data_lower_omp =
8039 {
8040 GIMPLE_PASS, /* type */
8041 "omplower", /* name */
8042 OPTGROUP_NONE, /* optinfo_flags */
8043 false, /* has_gate */
8044 true, /* has_execute */
8045 TV_NONE, /* tv_id */
8046 PROP_gimple_any, /* properties_required */
8047 PROP_gimple_lomp, /* properties_provided */
8048 0, /* properties_destroyed */
8049 0, /* todo_flags_start */
8050 0, /* todo_flags_finish */
8051 };
8052
8053 class pass_lower_omp : public gimple_opt_pass
8054 {
8055 public:
8056 pass_lower_omp(gcc::context *ctxt)
8057 : gimple_opt_pass(pass_data_lower_omp, ctxt)
8058 {}
8059
8060 /* opt_pass methods: */
8061 unsigned int execute () { return execute_lower_omp (); }
8062
8063 }; // class pass_lower_omp
8064
8065 } // anon namespace
8066
8067 gimple_opt_pass *
8068 make_pass_lower_omp (gcc::context *ctxt)
8069 {
8070 return new pass_lower_omp (ctxt);
8071 }
8072 \f
8073 /* The following is a utility to diagnose OpenMP structured block violations.
8074 It is not part of the "omplower" pass, as that's invoked too late. It
8075 should be invoked by the respective front ends after gimplification. */
8076
8077 static splay_tree all_labels;
8078
8079 /* Check for mismatched contexts and generate an error if needed. Return
8080 true if an error is detected. */
8081
8082 static bool
8083 diagnose_sb_0 (gimple_stmt_iterator *gsi_p,
8084 gimple branch_ctx, gimple label_ctx)
8085 {
8086 if (label_ctx == branch_ctx)
8087 return false;
8088
8089
8090 /*
8091 Previously we kept track of the label's entire context in diagnose_sb_[12]
8092 so we could traverse it and issue a correct "exit" or "enter" error
8093 message upon a structured block violation.
8094
8095 We built the context by building a list with tree_cons'ing, but there is
8096 no easy counterpart in gimple tuples. It seems like far too much work
8097 for issuing exit/enter error messages. If someone really misses the
8098 distinct error message... patches welcome.
8099 */
8100
8101 #if 0
8102 /* Try to avoid confusing the user by producing and error message
8103 with correct "exit" or "enter" verbiage. We prefer "exit"
8104 unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
8105 if (branch_ctx == NULL)
8106 exit_p = false;
8107 else
8108 {
8109 while (label_ctx)
8110 {
8111 if (TREE_VALUE (label_ctx) == branch_ctx)
8112 {
8113 exit_p = false;
8114 break;
8115 }
8116 label_ctx = TREE_CHAIN (label_ctx);
8117 }
8118 }
8119
8120 if (exit_p)
8121 error ("invalid exit from OpenMP structured block");
8122 else
8123 error ("invalid entry to OpenMP structured block");
8124 #endif
8125
8126 /* If it's obvious we have an invalid entry, be specific about the error. */
8127 if (branch_ctx == NULL)
8128 error ("invalid entry to OpenMP structured block");
8129 else
8130 /* Otherwise, be vague and lazy, but efficient. */
8131 error ("invalid branch to/from an OpenMP structured block");
8132
8133 gsi_replace (gsi_p, gimple_build_nop (), false);
8134 return true;
8135 }
8136
8137 /* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
8138 where each label is found. */
8139
8140 static tree
8141 diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
8142 struct walk_stmt_info *wi)
8143 {
8144 gimple context = (gimple) wi->info;
8145 gimple inner_context;
8146 gimple stmt = gsi_stmt (*gsi_p);
8147
8148 *handled_ops_p = true;
8149
8150 switch (gimple_code (stmt))
8151 {
8152 WALK_SUBSTMTS;
8153
8154 case GIMPLE_OMP_PARALLEL:
8155 case GIMPLE_OMP_TASK:
8156 case GIMPLE_OMP_SECTIONS:
8157 case GIMPLE_OMP_SINGLE:
8158 case GIMPLE_OMP_SECTION:
8159 case GIMPLE_OMP_MASTER:
8160 case GIMPLE_OMP_ORDERED:
8161 case GIMPLE_OMP_CRITICAL:
8162 /* The minimal context here is just the current OMP construct. */
8163 inner_context = stmt;
8164 wi->info = inner_context;
8165 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
8166 wi->info = context;
8167 break;
8168
8169 case GIMPLE_OMP_FOR:
8170 inner_context = stmt;
8171 wi->info = inner_context;
8172 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
8173 walk them. */
8174 walk_gimple_seq (gimple_omp_for_pre_body (stmt),
8175 diagnose_sb_1, NULL, wi);
8176 walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
8177 wi->info = context;
8178 break;
8179
8180 case GIMPLE_LABEL:
8181 splay_tree_insert (all_labels, (splay_tree_key) gimple_label_label (stmt),
8182 (splay_tree_value) context);
8183 break;
8184
8185 default:
8186 break;
8187 }
8188
8189 return NULL_TREE;
8190 }
8191
8192 /* Pass 2: Check each branch and see if its context differs from that of
8193 the destination label's context. */
8194
8195 static tree
8196 diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
8197 struct walk_stmt_info *wi)
8198 {
8199 gimple context = (gimple) wi->info;
8200 splay_tree_node n;
8201 gimple stmt = gsi_stmt (*gsi_p);
8202
8203 *handled_ops_p = true;
8204
8205 switch (gimple_code (stmt))
8206 {
8207 WALK_SUBSTMTS;
8208
8209 case GIMPLE_OMP_PARALLEL:
8210 case GIMPLE_OMP_TASK:
8211 case GIMPLE_OMP_SECTIONS:
8212 case GIMPLE_OMP_SINGLE:
8213 case GIMPLE_OMP_SECTION:
8214 case GIMPLE_OMP_MASTER:
8215 case GIMPLE_OMP_ORDERED:
8216 case GIMPLE_OMP_CRITICAL:
8217 wi->info = stmt;
8218 walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), diagnose_sb_2, NULL, wi);
8219 wi->info = context;
8220 break;
8221
8222 case GIMPLE_OMP_FOR:
8223 wi->info = stmt;
8224 /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
8225 walk them. */
8226 walk_gimple_seq_mod (gimple_omp_for_pre_body_ptr (stmt),
8227 diagnose_sb_2, NULL, wi);
8228 walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), diagnose_sb_2, NULL, wi);
8229 wi->info = context;
8230 break;
8231
8232 case GIMPLE_COND:
8233 {
8234 tree lab = gimple_cond_true_label (stmt);
8235 if (lab)
8236 {
8237 n = splay_tree_lookup (all_labels,
8238 (splay_tree_key) lab);
8239 diagnose_sb_0 (gsi_p, context,
8240 n ? (gimple) n->value : NULL);
8241 }
8242 lab = gimple_cond_false_label (stmt);
8243 if (lab)
8244 {
8245 n = splay_tree_lookup (all_labels,
8246 (splay_tree_key) lab);
8247 diagnose_sb_0 (gsi_p, context,
8248 n ? (gimple) n->value : NULL);
8249 }
8250 }
8251 break;
8252
8253 case GIMPLE_GOTO:
8254 {
8255 tree lab = gimple_goto_dest (stmt);
8256 if (TREE_CODE (lab) != LABEL_DECL)
8257 break;
8258
8259 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
8260 diagnose_sb_0 (gsi_p, context, n ? (gimple) n->value : NULL);
8261 }
8262 break;
8263
8264 case GIMPLE_SWITCH:
8265 {
8266 unsigned int i;
8267 for (i = 0; i < gimple_switch_num_labels (stmt); ++i)
8268 {
8269 tree lab = CASE_LABEL (gimple_switch_label (stmt, i));
8270 n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
8271 if (n && diagnose_sb_0 (gsi_p, context, (gimple) n->value))
8272 break;
8273 }
8274 }
8275 break;
8276
8277 case GIMPLE_RETURN:
8278 diagnose_sb_0 (gsi_p, context, NULL);
8279 break;
8280
8281 default:
8282 break;
8283 }
8284
8285 return NULL_TREE;
8286 }
8287
8288 static unsigned int
8289 diagnose_omp_structured_block_errors (void)
8290 {
8291 struct walk_stmt_info wi;
8292 gimple_seq body = gimple_body (current_function_decl);
8293
8294 all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
8295
8296 memset (&wi, 0, sizeof (wi));
8297 walk_gimple_seq (body, diagnose_sb_1, NULL, &wi);
8298
8299 memset (&wi, 0, sizeof (wi));
8300 wi.want_locations = true;
8301 walk_gimple_seq_mod (&body, diagnose_sb_2, NULL, &wi);
8302
8303 gimple_set_body (current_function_decl, body);
8304
8305 splay_tree_delete (all_labels);
8306 all_labels = NULL;
8307
8308 return 0;
8309 }
8310
8311 static bool
8312 gate_diagnose_omp_blocks (void)
8313 {
8314 return flag_openmp != 0;
8315 }
8316
8317 namespace {
8318
8319 const pass_data pass_data_diagnose_omp_blocks =
8320 {
8321 GIMPLE_PASS, /* type */
8322 "*diagnose_omp_blocks", /* name */
8323 OPTGROUP_NONE, /* optinfo_flags */
8324 true, /* has_gate */
8325 true, /* has_execute */
8326 TV_NONE, /* tv_id */
8327 PROP_gimple_any, /* properties_required */
8328 0, /* properties_provided */
8329 0, /* properties_destroyed */
8330 0, /* todo_flags_start */
8331 0, /* todo_flags_finish */
8332 };
8333
8334 class pass_diagnose_omp_blocks : public gimple_opt_pass
8335 {
8336 public:
8337 pass_diagnose_omp_blocks(gcc::context *ctxt)
8338 : gimple_opt_pass(pass_data_diagnose_omp_blocks, ctxt)
8339 {}
8340
8341 /* opt_pass methods: */
8342 bool gate () { return gate_diagnose_omp_blocks (); }
8343 unsigned int execute () {
8344 return diagnose_omp_structured_block_errors ();
8345 }
8346
8347 }; // class pass_diagnose_omp_blocks
8348
8349 } // anon namespace
8350
8351 gimple_opt_pass *
8352 make_pass_diagnose_omp_blocks (gcc::context *ctxt)
8353 {
8354 return new pass_diagnose_omp_blocks (ctxt);
8355 }
8356
8357 #include "gt-omp-low.h"