Daily bump.
[gcc.git] / gcc / c-family / c-omp.c
1 /* This file contains routines to construct OpenACC and OpenMP constructs,
2 called from parsing in the C and C++ front ends.
3
4 Copyright (C) 2005-2020 Free Software Foundation, Inc.
5 Contributed by Richard Henderson <rth@redhat.com>,
6 Diego Novillo <dnovillo@redhat.com>.
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
14
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "options.h"
28 #include "c-common.h"
29 #include "gimple-expr.h"
30 #include "c-pragma.h"
31 #include "stringpool.h"
32 #include "omp-general.h"
33 #include "gomp-constants.h"
34 #include "memmodel.h"
35 #include "attribs.h"
36 #include "gimplify.h"
37 #include "langhooks.h"
38
39
40 /* Complete a #pragma oacc wait construct. LOC is the location of
41 the #pragma. */
42
43 tree
44 c_finish_oacc_wait (location_t loc, tree parms, tree clauses)
45 {
46 const int nparms = list_length (parms);
47 tree stmt, t;
48 vec<tree, va_gc> *args;
49
50 vec_alloc (args, nparms + 2);
51 stmt = builtin_decl_explicit (BUILT_IN_GOACC_WAIT);
52
53 if (omp_find_clause (clauses, OMP_CLAUSE_ASYNC))
54 t = OMP_CLAUSE_ASYNC_EXPR (clauses);
55 else
56 t = build_int_cst (integer_type_node, GOMP_ASYNC_SYNC);
57
58 args->quick_push (t);
59 args->quick_push (build_int_cst (integer_type_node, nparms));
60
61 for (t = parms; t; t = TREE_CHAIN (t))
62 {
63 if (TREE_CODE (OMP_CLAUSE_WAIT_EXPR (t)) == INTEGER_CST)
64 args->quick_push (build_int_cst (integer_type_node,
65 TREE_INT_CST_LOW (OMP_CLAUSE_WAIT_EXPR (t))));
66 else
67 args->quick_push (OMP_CLAUSE_WAIT_EXPR (t));
68 }
69
70 stmt = build_call_expr_loc_vec (loc, stmt, args);
71
72 vec_free (args);
73
74 return stmt;
75 }
76
77 /* Complete a #pragma omp master construct. STMT is the structured-block
78 that follows the pragma. LOC is the location of the #pragma. */
79
80 tree
81 c_finish_omp_master (location_t loc, tree stmt)
82 {
83 tree t = add_stmt (build1 (OMP_MASTER, void_type_node, stmt));
84 SET_EXPR_LOCATION (t, loc);
85 return t;
86 }
87
88 /* Complete a #pragma omp taskgroup construct. BODY is the structured-block
89 that follows the pragma. LOC is the location of the #pragma. */
90
91 tree
92 c_finish_omp_taskgroup (location_t loc, tree body, tree clauses)
93 {
94 tree stmt = make_node (OMP_TASKGROUP);
95 TREE_TYPE (stmt) = void_type_node;
96 OMP_TASKGROUP_BODY (stmt) = body;
97 OMP_TASKGROUP_CLAUSES (stmt) = clauses;
98 SET_EXPR_LOCATION (stmt, loc);
99 return add_stmt (stmt);
100 }
101
102 /* Complete a #pragma omp critical construct. BODY is the structured-block
103 that follows the pragma, NAME is the identifier in the pragma, or null
104 if it was omitted. LOC is the location of the #pragma. */
105
106 tree
107 c_finish_omp_critical (location_t loc, tree body, tree name, tree clauses)
108 {
109 gcc_assert (!clauses || OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_HINT);
110 if (name == NULL_TREE
111 && clauses != NULL_TREE
112 && integer_nonzerop (OMP_CLAUSE_HINT_EXPR (clauses)))
113 {
114 error_at (OMP_CLAUSE_LOCATION (clauses),
115 "%<#pragma omp critical%> with %<hint%> clause requires "
116 "a name, except when %<omp_sync_hint_none%> is used");
117 return error_mark_node;
118 }
119
120 tree stmt = make_node (OMP_CRITICAL);
121 TREE_TYPE (stmt) = void_type_node;
122 OMP_CRITICAL_BODY (stmt) = body;
123 OMP_CRITICAL_NAME (stmt) = name;
124 OMP_CRITICAL_CLAUSES (stmt) = clauses;
125 SET_EXPR_LOCATION (stmt, loc);
126 return add_stmt (stmt);
127 }
128
129 /* Complete a #pragma omp ordered construct. STMT is the structured-block
130 that follows the pragma. LOC is the location of the #pragma. */
131
132 tree
133 c_finish_omp_ordered (location_t loc, tree clauses, tree stmt)
134 {
135 tree t = make_node (OMP_ORDERED);
136 TREE_TYPE (t) = void_type_node;
137 OMP_ORDERED_BODY (t) = stmt;
138 if (!flag_openmp /* flag_openmp_simd */
139 && (OMP_CLAUSE_CODE (clauses) != OMP_CLAUSE_SIMD
140 || OMP_CLAUSE_CHAIN (clauses)))
141 clauses = build_omp_clause (loc, OMP_CLAUSE_SIMD);
142 OMP_ORDERED_CLAUSES (t) = clauses;
143 SET_EXPR_LOCATION (t, loc);
144 return add_stmt (t);
145 }
146
147
148 /* Complete a #pragma omp barrier construct. LOC is the location of
149 the #pragma. */
150
151 void
152 c_finish_omp_barrier (location_t loc)
153 {
154 tree x;
155
156 x = builtin_decl_explicit (BUILT_IN_GOMP_BARRIER);
157 x = build_call_expr_loc (loc, x, 0);
158 add_stmt (x);
159 }
160
161
162 /* Complete a #pragma omp taskwait construct. LOC is the location of the
163 pragma. */
164
165 void
166 c_finish_omp_taskwait (location_t loc)
167 {
168 tree x;
169
170 x = builtin_decl_explicit (BUILT_IN_GOMP_TASKWAIT);
171 x = build_call_expr_loc (loc, x, 0);
172 add_stmt (x);
173 }
174
175
176 /* Complete a #pragma omp taskyield construct. LOC is the location of the
177 pragma. */
178
179 void
180 c_finish_omp_taskyield (location_t loc)
181 {
182 tree x;
183
184 x = builtin_decl_explicit (BUILT_IN_GOMP_TASKYIELD);
185 x = build_call_expr_loc (loc, x, 0);
186 add_stmt (x);
187 }
188
189
190 /* Complete a #pragma omp atomic construct. For CODE OMP_ATOMIC
191 the expression to be implemented atomically is LHS opcode= RHS.
192 For OMP_ATOMIC_READ V = LHS, for OMP_ATOMIC_CAPTURE_{NEW,OLD} LHS
193 opcode= RHS with the new or old content of LHS returned.
194 LOC is the location of the atomic statement. The value returned
195 is either error_mark_node (if the construct was erroneous) or an
196 OMP_ATOMIC* node which should be added to the current statement
197 tree with add_stmt. If TEST is set, avoid calling save_expr
198 or create_tmp_var*. */
199
200 tree
201 c_finish_omp_atomic (location_t loc, enum tree_code code,
202 enum tree_code opcode, tree lhs, tree rhs,
203 tree v, tree lhs1, tree rhs1, bool swapped,
204 enum omp_memory_order memory_order, bool test)
205 {
206 tree x, type, addr, pre = NULL_TREE;
207 HOST_WIDE_INT bitpos = 0, bitsize = 0;
208
209 if (lhs == error_mark_node || rhs == error_mark_node
210 || v == error_mark_node || lhs1 == error_mark_node
211 || rhs1 == error_mark_node)
212 return error_mark_node;
213
214 /* ??? According to one reading of the OpenMP spec, complex type are
215 supported, but there are no atomic stores for any architecture.
216 But at least icc 9.0 doesn't support complex types here either.
217 And lets not even talk about vector types... */
218 type = TREE_TYPE (lhs);
219 if (!INTEGRAL_TYPE_P (type)
220 && !POINTER_TYPE_P (type)
221 && !SCALAR_FLOAT_TYPE_P (type))
222 {
223 error_at (loc, "invalid expression type for %<#pragma omp atomic%>");
224 return error_mark_node;
225 }
226 if (TYPE_ATOMIC (type))
227 {
228 error_at (loc, "%<_Atomic%> expression in %<#pragma omp atomic%>");
229 return error_mark_node;
230 }
231
232 if (opcode == RDIV_EXPR)
233 opcode = TRUNC_DIV_EXPR;
234
235 /* ??? Validate that rhs does not overlap lhs. */
236 tree blhs = NULL;
237 if (TREE_CODE (lhs) == COMPONENT_REF
238 && TREE_CODE (TREE_OPERAND (lhs, 1)) == FIELD_DECL
239 && DECL_C_BIT_FIELD (TREE_OPERAND (lhs, 1))
240 && DECL_BIT_FIELD_REPRESENTATIVE (TREE_OPERAND (lhs, 1)))
241 {
242 tree field = TREE_OPERAND (lhs, 1);
243 tree repr = DECL_BIT_FIELD_REPRESENTATIVE (field);
244 if (tree_fits_uhwi_p (DECL_FIELD_OFFSET (field))
245 && tree_fits_uhwi_p (DECL_FIELD_OFFSET (repr)))
246 bitpos = (tree_to_uhwi (DECL_FIELD_OFFSET (field))
247 - tree_to_uhwi (DECL_FIELD_OFFSET (repr))) * BITS_PER_UNIT;
248 else
249 bitpos = 0;
250 bitpos += (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field))
251 - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr)));
252 gcc_assert (tree_fits_shwi_p (DECL_SIZE (field)));
253 bitsize = tree_to_shwi (DECL_SIZE (field));
254 blhs = lhs;
255 type = TREE_TYPE (repr);
256 lhs = build3 (COMPONENT_REF, TREE_TYPE (repr), TREE_OPERAND (lhs, 0),
257 repr, TREE_OPERAND (lhs, 2));
258 }
259
260 /* Take and save the address of the lhs. From then on we'll reference it
261 via indirection. */
262 addr = build_unary_op (loc, ADDR_EXPR, lhs, false);
263 if (addr == error_mark_node)
264 return error_mark_node;
265 if (!test)
266 addr = save_expr (addr);
267 if (!test
268 && TREE_CODE (addr) != SAVE_EXPR
269 && (TREE_CODE (addr) != ADDR_EXPR
270 || !VAR_P (TREE_OPERAND (addr, 0))))
271 {
272 /* Make sure LHS is simple enough so that goa_lhs_expr_p can recognize
273 it even after unsharing function body. */
274 tree var = create_tmp_var_raw (TREE_TYPE (addr));
275 DECL_CONTEXT (var) = current_function_decl;
276 addr = build4 (TARGET_EXPR, TREE_TYPE (addr), var, addr, NULL, NULL);
277 }
278 tree orig_lhs = lhs;
279 lhs = build_indirect_ref (loc, addr, RO_NULL);
280 tree new_lhs = lhs;
281
282 if (code == OMP_ATOMIC_READ)
283 {
284 x = build1 (OMP_ATOMIC_READ, type, addr);
285 SET_EXPR_LOCATION (x, loc);
286 OMP_ATOMIC_MEMORY_ORDER (x) = memory_order;
287 if (blhs)
288 x = build3_loc (loc, BIT_FIELD_REF, TREE_TYPE (blhs), x,
289 bitsize_int (bitsize), bitsize_int (bitpos));
290 return build_modify_expr (loc, v, NULL_TREE, NOP_EXPR,
291 loc, x, NULL_TREE);
292 }
293
294 /* There are lots of warnings, errors, and conversions that need to happen
295 in the course of interpreting a statement. Use the normal mechanisms
296 to do this, and then take it apart again. */
297 if (blhs)
298 {
299 lhs = build3_loc (loc, BIT_FIELD_REF, TREE_TYPE (blhs), lhs,
300 bitsize_int (bitsize), bitsize_int (bitpos));
301 if (swapped)
302 rhs = build_binary_op (loc, opcode, rhs, lhs, true);
303 else if (opcode != NOP_EXPR)
304 rhs = build_binary_op (loc, opcode, lhs, rhs, true);
305 opcode = NOP_EXPR;
306 }
307 else if (swapped)
308 {
309 rhs = build_binary_op (loc, opcode, rhs, lhs, true);
310 opcode = NOP_EXPR;
311 }
312 bool save = in_late_binary_op;
313 in_late_binary_op = true;
314 x = build_modify_expr (loc, blhs ? blhs : lhs, NULL_TREE, opcode,
315 loc, rhs, NULL_TREE);
316 in_late_binary_op = save;
317 if (x == error_mark_node)
318 return error_mark_node;
319 if (TREE_CODE (x) == COMPOUND_EXPR)
320 {
321 pre = TREE_OPERAND (x, 0);
322 gcc_assert (TREE_CODE (pre) == SAVE_EXPR || tree_invariant_p (pre));
323 x = TREE_OPERAND (x, 1);
324 }
325 gcc_assert (TREE_CODE (x) == MODIFY_EXPR);
326 rhs = TREE_OPERAND (x, 1);
327
328 if (blhs)
329 rhs = build3_loc (loc, BIT_INSERT_EXPR, type, new_lhs,
330 rhs, bitsize_int (bitpos));
331
332 /* Punt the actual generation of atomic operations to common code. */
333 if (code == OMP_ATOMIC)
334 type = void_type_node;
335 x = build2 (code, type, addr, rhs);
336 SET_EXPR_LOCATION (x, loc);
337 OMP_ATOMIC_MEMORY_ORDER (x) = memory_order;
338
339 /* Generally it is hard to prove lhs1 and lhs are the same memory
340 location, just diagnose different variables. */
341 if (rhs1
342 && VAR_P (rhs1)
343 && VAR_P (orig_lhs)
344 && rhs1 != orig_lhs
345 && !test)
346 {
347 if (code == OMP_ATOMIC)
348 error_at (loc, "%<#pragma omp atomic update%> uses two different "
349 "variables for memory");
350 else
351 error_at (loc, "%<#pragma omp atomic capture%> uses two different "
352 "variables for memory");
353 return error_mark_node;
354 }
355
356 if (lhs1
357 && lhs1 != orig_lhs
358 && TREE_CODE (lhs1) == COMPONENT_REF
359 && TREE_CODE (TREE_OPERAND (lhs1, 1)) == FIELD_DECL
360 && DECL_C_BIT_FIELD (TREE_OPERAND (lhs1, 1))
361 && DECL_BIT_FIELD_REPRESENTATIVE (TREE_OPERAND (lhs1, 1)))
362 {
363 tree field = TREE_OPERAND (lhs1, 1);
364 tree repr = DECL_BIT_FIELD_REPRESENTATIVE (field);
365 lhs1 = build3 (COMPONENT_REF, TREE_TYPE (repr), TREE_OPERAND (lhs1, 0),
366 repr, TREE_OPERAND (lhs1, 2));
367 }
368 if (rhs1
369 && rhs1 != orig_lhs
370 && TREE_CODE (rhs1) == COMPONENT_REF
371 && TREE_CODE (TREE_OPERAND (rhs1, 1)) == FIELD_DECL
372 && DECL_C_BIT_FIELD (TREE_OPERAND (rhs1, 1))
373 && DECL_BIT_FIELD_REPRESENTATIVE (TREE_OPERAND (rhs1, 1)))
374 {
375 tree field = TREE_OPERAND (rhs1, 1);
376 tree repr = DECL_BIT_FIELD_REPRESENTATIVE (field);
377 rhs1 = build3 (COMPONENT_REF, TREE_TYPE (repr), TREE_OPERAND (rhs1, 0),
378 repr, TREE_OPERAND (rhs1, 2));
379 }
380
381 if (code != OMP_ATOMIC)
382 {
383 /* Generally it is hard to prove lhs1 and lhs are the same memory
384 location, just diagnose different variables. */
385 if (lhs1 && VAR_P (lhs1) && VAR_P (orig_lhs))
386 {
387 if (lhs1 != orig_lhs && !test)
388 {
389 error_at (loc, "%<#pragma omp atomic capture%> uses two "
390 "different variables for memory");
391 return error_mark_node;
392 }
393 }
394 if (blhs)
395 {
396 x = build3_loc (loc, BIT_FIELD_REF, TREE_TYPE (blhs), x,
397 bitsize_int (bitsize), bitsize_int (bitpos));
398 type = TREE_TYPE (blhs);
399 }
400 x = build_modify_expr (loc, v, NULL_TREE, NOP_EXPR,
401 loc, x, NULL_TREE);
402 if (rhs1 && rhs1 != orig_lhs)
403 {
404 tree rhs1addr = build_unary_op (loc, ADDR_EXPR, rhs1, false);
405 if (rhs1addr == error_mark_node)
406 return error_mark_node;
407 x = omit_one_operand_loc (loc, type, x, rhs1addr);
408 }
409 if (lhs1 && lhs1 != orig_lhs)
410 {
411 tree lhs1addr = build_unary_op (loc, ADDR_EXPR, lhs1, false);
412 if (lhs1addr == error_mark_node)
413 return error_mark_node;
414 if (code == OMP_ATOMIC_CAPTURE_OLD)
415 x = omit_one_operand_loc (loc, type, x, lhs1addr);
416 else
417 {
418 if (!test)
419 x = save_expr (x);
420 x = omit_two_operands_loc (loc, type, x, x, lhs1addr);
421 }
422 }
423 }
424 else if (rhs1 && rhs1 != orig_lhs)
425 {
426 tree rhs1addr = build_unary_op (loc, ADDR_EXPR, rhs1, false);
427 if (rhs1addr == error_mark_node)
428 return error_mark_node;
429 x = omit_one_operand_loc (loc, type, x, rhs1addr);
430 }
431
432 if (pre)
433 x = omit_one_operand_loc (loc, type, x, pre);
434 return x;
435 }
436
437
438 /* Return true if TYPE is the implementation's omp_depend_t. */
439
440 bool
441 c_omp_depend_t_p (tree type)
442 {
443 type = TYPE_MAIN_VARIANT (type);
444 return (TREE_CODE (type) == RECORD_TYPE
445 && TYPE_NAME (type)
446 && ((TREE_CODE (TYPE_NAME (type)) == TYPE_DECL
447 ? DECL_NAME (TYPE_NAME (type)) : TYPE_NAME (type))
448 == get_identifier ("omp_depend_t"))
449 && (!TYPE_CONTEXT (type)
450 || TREE_CODE (TYPE_CONTEXT (type)) == TRANSLATION_UNIT_DECL)
451 && COMPLETE_TYPE_P (type)
452 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
453 && !compare_tree_int (TYPE_SIZE (type),
454 2 * tree_to_uhwi (TYPE_SIZE (ptr_type_node))));
455 }
456
457
458 /* Complete a #pragma omp depobj construct. LOC is the location of the
459 #pragma. */
460
461 void
462 c_finish_omp_depobj (location_t loc, tree depobj,
463 enum omp_clause_depend_kind kind, tree clause)
464 {
465 tree t = NULL_TREE;
466 if (!error_operand_p (depobj))
467 {
468 if (!c_omp_depend_t_p (TREE_TYPE (depobj)))
469 {
470 error_at (EXPR_LOC_OR_LOC (depobj, loc),
471 "type of %<depobj%> expression is not %<omp_depend_t%>");
472 depobj = error_mark_node;
473 }
474 else if (TYPE_READONLY (TREE_TYPE (depobj)))
475 {
476 error_at (EXPR_LOC_OR_LOC (depobj, loc),
477 "%<const%> qualified %<depobj%> expression");
478 depobj = error_mark_node;
479 }
480 }
481 else
482 depobj = error_mark_node;
483
484 if (clause == error_mark_node)
485 return;
486
487 if (clause)
488 {
489 gcc_assert (TREE_CODE (clause) == OMP_CLAUSE
490 && OMP_CLAUSE_CODE (clause) == OMP_CLAUSE_DEPEND);
491 if (OMP_CLAUSE_CHAIN (clause))
492 error_at (OMP_CLAUSE_LOCATION (clause),
493 "more than one locator in %<depend%> clause on %<depobj%> "
494 "construct");
495 switch (OMP_CLAUSE_DEPEND_KIND (clause))
496 {
497 case OMP_CLAUSE_DEPEND_DEPOBJ:
498 error_at (OMP_CLAUSE_LOCATION (clause),
499 "%<depobj%> dependence type specified in %<depend%> "
500 "clause on %<depobj%> construct");
501 return;
502 case OMP_CLAUSE_DEPEND_SOURCE:
503 case OMP_CLAUSE_DEPEND_SINK:
504 error_at (OMP_CLAUSE_LOCATION (clause),
505 "%<depend(%s)%> is only allowed in %<omp ordered%>",
506 OMP_CLAUSE_DEPEND_KIND (clause) == OMP_CLAUSE_DEPEND_SOURCE
507 ? "source" : "sink");
508 return;
509 case OMP_CLAUSE_DEPEND_IN:
510 case OMP_CLAUSE_DEPEND_OUT:
511 case OMP_CLAUSE_DEPEND_INOUT:
512 case OMP_CLAUSE_DEPEND_MUTEXINOUTSET:
513 kind = OMP_CLAUSE_DEPEND_KIND (clause);
514 t = OMP_CLAUSE_DECL (clause);
515 gcc_assert (t);
516 if (TREE_CODE (t) == TREE_LIST
517 && TREE_PURPOSE (t)
518 && TREE_CODE (TREE_PURPOSE (t)) == TREE_VEC)
519 {
520 error_at (OMP_CLAUSE_LOCATION (clause),
521 "%<iterator%> modifier may not be specified on "
522 "%<depobj%> construct");
523 return;
524 }
525 if (TREE_CODE (t) == COMPOUND_EXPR)
526 {
527 tree t1 = build_fold_addr_expr (TREE_OPERAND (t, 1));
528 t = build2 (COMPOUND_EXPR, TREE_TYPE (t1), TREE_OPERAND (t, 0),
529 t1);
530 }
531 else
532 t = build_fold_addr_expr (t);
533 break;
534 default:
535 gcc_unreachable ();
536 }
537 }
538 else
539 gcc_assert (kind != OMP_CLAUSE_DEPEND_SOURCE);
540
541 if (depobj == error_mark_node)
542 return;
543
544 depobj = build_fold_addr_expr_loc (EXPR_LOC_OR_LOC (depobj, loc), depobj);
545 tree dtype
546 = build_pointer_type_for_mode (ptr_type_node, TYPE_MODE (ptr_type_node),
547 true);
548 depobj = fold_convert (dtype, depobj);
549 tree r;
550 if (clause)
551 {
552 depobj = save_expr (depobj);
553 r = build_indirect_ref (loc, depobj, RO_UNARY_STAR);
554 add_stmt (build2 (MODIFY_EXPR, void_type_node, r, t));
555 }
556 int k;
557 switch (kind)
558 {
559 case OMP_CLAUSE_DEPEND_IN:
560 k = GOMP_DEPEND_IN;
561 break;
562 case OMP_CLAUSE_DEPEND_OUT:
563 k = GOMP_DEPEND_OUT;
564 break;
565 case OMP_CLAUSE_DEPEND_INOUT:
566 k = GOMP_DEPEND_INOUT;
567 break;
568 case OMP_CLAUSE_DEPEND_MUTEXINOUTSET:
569 k = GOMP_DEPEND_MUTEXINOUTSET;
570 break;
571 case OMP_CLAUSE_DEPEND_LAST:
572 k = -1;
573 break;
574 default:
575 gcc_unreachable ();
576 }
577 t = build_int_cst (ptr_type_node, k);
578 depobj = build2_loc (loc, POINTER_PLUS_EXPR, TREE_TYPE (depobj), depobj,
579 TYPE_SIZE_UNIT (ptr_type_node));
580 r = build_indirect_ref (loc, depobj, RO_UNARY_STAR);
581 add_stmt (build2 (MODIFY_EXPR, void_type_node, r, t));
582 }
583
584
585 /* Complete a #pragma omp flush construct. We don't do anything with
586 the variable list that the syntax allows. LOC is the location of
587 the #pragma. */
588
589 void
590 c_finish_omp_flush (location_t loc, int mo)
591 {
592 tree x;
593
594 if (mo == MEMMODEL_LAST)
595 {
596 x = builtin_decl_explicit (BUILT_IN_SYNC_SYNCHRONIZE);
597 x = build_call_expr_loc (loc, x, 0);
598 }
599 else
600 {
601 x = builtin_decl_explicit (BUILT_IN_ATOMIC_THREAD_FENCE);
602 x = build_call_expr_loc (loc, x, 1,
603 build_int_cst (integer_type_node, mo));
604 }
605 add_stmt (x);
606 }
607
608
609 /* Check and canonicalize OMP_FOR increment expression.
610 Helper function for c_finish_omp_for. */
611
612 static tree
613 check_omp_for_incr_expr (location_t loc, tree exp, tree decl)
614 {
615 tree t;
616
617 if (!INTEGRAL_TYPE_P (TREE_TYPE (exp))
618 || TYPE_PRECISION (TREE_TYPE (exp)) < TYPE_PRECISION (TREE_TYPE (decl)))
619 return error_mark_node;
620
621 if (exp == decl)
622 return build_int_cst (TREE_TYPE (exp), 0);
623
624 switch (TREE_CODE (exp))
625 {
626 CASE_CONVERT:
627 t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl);
628 if (t != error_mark_node)
629 return fold_convert_loc (loc, TREE_TYPE (exp), t);
630 break;
631 case MINUS_EXPR:
632 t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl);
633 if (t != error_mark_node)
634 return fold_build2_loc (loc, MINUS_EXPR,
635 TREE_TYPE (exp), t, TREE_OPERAND (exp, 1));
636 break;
637 case PLUS_EXPR:
638 t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl);
639 if (t != error_mark_node)
640 return fold_build2_loc (loc, PLUS_EXPR,
641 TREE_TYPE (exp), t, TREE_OPERAND (exp, 1));
642 t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 1), decl);
643 if (t != error_mark_node)
644 return fold_build2_loc (loc, PLUS_EXPR,
645 TREE_TYPE (exp), TREE_OPERAND (exp, 0), t);
646 break;
647 case COMPOUND_EXPR:
648 {
649 /* cp_build_modify_expr forces preevaluation of the RHS to make
650 sure that it is evaluated before the lvalue-rvalue conversion
651 is applied to the LHS. Reconstruct the original expression. */
652 tree op0 = TREE_OPERAND (exp, 0);
653 if (TREE_CODE (op0) == TARGET_EXPR
654 && !VOID_TYPE_P (TREE_TYPE (op0)))
655 {
656 tree op1 = TREE_OPERAND (exp, 1);
657 tree temp = TARGET_EXPR_SLOT (op0);
658 if (BINARY_CLASS_P (op1)
659 && TREE_OPERAND (op1, 1) == temp)
660 {
661 op1 = copy_node (op1);
662 TREE_OPERAND (op1, 1) = TARGET_EXPR_INITIAL (op0);
663 return check_omp_for_incr_expr (loc, op1, decl);
664 }
665 }
666 break;
667 }
668 default:
669 break;
670 }
671
672 return error_mark_node;
673 }
674
675 /* If the OMP_FOR increment expression in INCR is of pointer type,
676 canonicalize it into an expression handled by gimplify_omp_for()
677 and return it. DECL is the iteration variable. */
678
679 static tree
680 c_omp_for_incr_canonicalize_ptr (location_t loc, tree decl, tree incr)
681 {
682 if (POINTER_TYPE_P (TREE_TYPE (decl))
683 && TREE_OPERAND (incr, 1))
684 {
685 tree t = fold_convert_loc (loc,
686 sizetype, TREE_OPERAND (incr, 1));
687
688 if (TREE_CODE (incr) == POSTDECREMENT_EXPR
689 || TREE_CODE (incr) == PREDECREMENT_EXPR)
690 t = fold_build1_loc (loc, NEGATE_EXPR, sizetype, t);
691 t = fold_build_pointer_plus (decl, t);
692 incr = build2 (MODIFY_EXPR, void_type_node, decl, t);
693 }
694 return incr;
695 }
696
697 /* Validate and generate OMP_FOR.
698 DECLV is a vector of iteration variables, for each collapsed loop.
699
700 ORIG_DECLV, if non-NULL, is a vector with the original iteration
701 variables (prior to any transformations, by say, C++ iterators).
702
703 INITV, CONDV and INCRV are vectors containing initialization
704 expressions, controlling predicates and increment expressions.
705 BODY is the body of the loop and PRE_BODY statements that go before
706 the loop. */
707
708 tree
709 c_finish_omp_for (location_t locus, enum tree_code code, tree declv,
710 tree orig_declv, tree initv, tree condv, tree incrv,
711 tree body, tree pre_body, bool final_p)
712 {
713 location_t elocus;
714 bool fail = false;
715 int i;
716
717 gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (initv));
718 gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (condv));
719 gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (incrv));
720 for (i = 0; i < TREE_VEC_LENGTH (declv); i++)
721 {
722 tree decl = TREE_VEC_ELT (declv, i);
723 tree init = TREE_VEC_ELT (initv, i);
724 tree cond = TREE_VEC_ELT (condv, i);
725 tree incr = TREE_VEC_ELT (incrv, i);
726
727 elocus = locus;
728 if (EXPR_HAS_LOCATION (init))
729 elocus = EXPR_LOCATION (init);
730
731 /* Validate the iteration variable. */
732 if (!INTEGRAL_TYPE_P (TREE_TYPE (decl))
733 && TREE_CODE (TREE_TYPE (decl)) != POINTER_TYPE)
734 {
735 error_at (elocus, "invalid type for iteration variable %qE", decl);
736 fail = true;
737 }
738 else if (TYPE_ATOMIC (TREE_TYPE (decl)))
739 {
740 error_at (elocus, "%<_Atomic%> iteration variable %qE", decl);
741 fail = true;
742 /* _Atomic iterator confuses stuff too much, so we risk ICE
743 trying to diagnose it further. */
744 continue;
745 }
746
747 /* In the case of "for (int i = 0...)", init will be a decl. It should
748 have a DECL_INITIAL that we can turn into an assignment. */
749 if (init == decl)
750 {
751 elocus = DECL_SOURCE_LOCATION (decl);
752
753 init = DECL_INITIAL (decl);
754 if (init == NULL)
755 {
756 error_at (elocus, "%qE is not initialized", decl);
757 init = integer_zero_node;
758 fail = true;
759 }
760 DECL_INITIAL (decl) = NULL_TREE;
761
762 init = build_modify_expr (elocus, decl, NULL_TREE, NOP_EXPR,
763 /* FIXME diagnostics: This should
764 be the location of the INIT. */
765 elocus,
766 init,
767 NULL_TREE);
768 }
769 if (init != error_mark_node)
770 {
771 gcc_assert (TREE_CODE (init) == MODIFY_EXPR);
772 gcc_assert (TREE_OPERAND (init, 0) == decl);
773 }
774
775 if (cond == NULL_TREE)
776 {
777 error_at (elocus, "missing controlling predicate");
778 fail = true;
779 }
780 else
781 {
782 bool cond_ok = false;
783
784 /* E.g. C sizeof (vla) could add COMPOUND_EXPRs with
785 evaluation of the vla VAR_DECL. We need to readd
786 them to the non-decl operand. See PR45784. */
787 while (TREE_CODE (cond) == COMPOUND_EXPR)
788 cond = TREE_OPERAND (cond, 1);
789
790 if (EXPR_HAS_LOCATION (cond))
791 elocus = EXPR_LOCATION (cond);
792
793 if (TREE_CODE (cond) == LT_EXPR
794 || TREE_CODE (cond) == LE_EXPR
795 || TREE_CODE (cond) == GT_EXPR
796 || TREE_CODE (cond) == GE_EXPR
797 || TREE_CODE (cond) == NE_EXPR
798 || TREE_CODE (cond) == EQ_EXPR)
799 {
800 tree op0 = TREE_OPERAND (cond, 0);
801 tree op1 = TREE_OPERAND (cond, 1);
802
803 /* 2.5.1. The comparison in the condition is computed in
804 the type of DECL, otherwise the behavior is undefined.
805
806 For example:
807 long n; int i;
808 i < n;
809
810 according to ISO will be evaluated as:
811 (long)i < n;
812
813 We want to force:
814 i < (int)n; */
815 if (TREE_CODE (op0) == NOP_EXPR
816 && decl == TREE_OPERAND (op0, 0))
817 {
818 TREE_OPERAND (cond, 0) = TREE_OPERAND (op0, 0);
819 TREE_OPERAND (cond, 1)
820 = fold_build1_loc (elocus, NOP_EXPR, TREE_TYPE (decl),
821 TREE_OPERAND (cond, 1));
822 }
823 else if (TREE_CODE (op1) == NOP_EXPR
824 && decl == TREE_OPERAND (op1, 0))
825 {
826 TREE_OPERAND (cond, 1) = TREE_OPERAND (op1, 0);
827 TREE_OPERAND (cond, 0)
828 = fold_build1_loc (elocus, NOP_EXPR, TREE_TYPE (decl),
829 TREE_OPERAND (cond, 0));
830 }
831
832 if (decl == TREE_OPERAND (cond, 0))
833 cond_ok = true;
834 else if (decl == TREE_OPERAND (cond, 1))
835 {
836 TREE_SET_CODE (cond,
837 swap_tree_comparison (TREE_CODE (cond)));
838 TREE_OPERAND (cond, 1) = TREE_OPERAND (cond, 0);
839 TREE_OPERAND (cond, 0) = decl;
840 cond_ok = true;
841 }
842
843 if (TREE_CODE (cond) == NE_EXPR
844 || TREE_CODE (cond) == EQ_EXPR)
845 {
846 if (!INTEGRAL_TYPE_P (TREE_TYPE (decl)))
847 {
848 if (code == OACC_LOOP || TREE_CODE (cond) == EQ_EXPR)
849 cond_ok = false;
850 }
851 else if (operand_equal_p (TREE_OPERAND (cond, 1),
852 TYPE_MIN_VALUE (TREE_TYPE (decl)),
853 0))
854 TREE_SET_CODE (cond, TREE_CODE (cond) == NE_EXPR
855 ? GT_EXPR : LE_EXPR);
856 else if (operand_equal_p (TREE_OPERAND (cond, 1),
857 TYPE_MAX_VALUE (TREE_TYPE (decl)),
858 0))
859 TREE_SET_CODE (cond, TREE_CODE (cond) == NE_EXPR
860 ? LT_EXPR : GE_EXPR);
861 else if (code == OACC_LOOP || TREE_CODE (cond) == EQ_EXPR)
862 cond_ok = false;
863 }
864
865 if (cond_ok && TREE_VEC_ELT (condv, i) != cond)
866 {
867 tree ce = NULL_TREE, *pce = &ce;
868 tree type = TREE_TYPE (TREE_OPERAND (cond, 1));
869 for (tree c = TREE_VEC_ELT (condv, i); c != cond;
870 c = TREE_OPERAND (c, 1))
871 {
872 *pce = build2 (COMPOUND_EXPR, type, TREE_OPERAND (c, 0),
873 TREE_OPERAND (cond, 1));
874 pce = &TREE_OPERAND (*pce, 1);
875 }
876 TREE_OPERAND (cond, 1) = ce;
877 TREE_VEC_ELT (condv, i) = cond;
878 }
879 }
880
881 if (!cond_ok)
882 {
883 error_at (elocus, "invalid controlling predicate");
884 fail = true;
885 }
886 }
887
888 if (incr == NULL_TREE)
889 {
890 error_at (elocus, "missing increment expression");
891 fail = true;
892 }
893 else
894 {
895 bool incr_ok = false;
896
897 if (EXPR_HAS_LOCATION (incr))
898 elocus = EXPR_LOCATION (incr);
899
900 /* Check all the valid increment expressions: v++, v--, ++v, --v,
901 v = v + incr, v = incr + v and v = v - incr. */
902 switch (TREE_CODE (incr))
903 {
904 case POSTINCREMENT_EXPR:
905 case PREINCREMENT_EXPR:
906 case POSTDECREMENT_EXPR:
907 case PREDECREMENT_EXPR:
908 if (TREE_OPERAND (incr, 0) != decl)
909 break;
910
911 incr_ok = true;
912 if (!fail
913 && TREE_CODE (cond) == NE_EXPR
914 && TREE_CODE (TREE_TYPE (decl)) == POINTER_TYPE
915 && TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (decl)))
916 && (TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (decl))))
917 != INTEGER_CST))
918 {
919 /* For pointer to VLA, transform != into < or >
920 depending on whether incr is increment or decrement. */
921 if (TREE_CODE (incr) == PREINCREMENT_EXPR
922 || TREE_CODE (incr) == POSTINCREMENT_EXPR)
923 TREE_SET_CODE (cond, LT_EXPR);
924 else
925 TREE_SET_CODE (cond, GT_EXPR);
926 }
927 incr = c_omp_for_incr_canonicalize_ptr (elocus, decl, incr);
928 break;
929
930 case COMPOUND_EXPR:
931 if (TREE_CODE (TREE_OPERAND (incr, 0)) != SAVE_EXPR
932 || TREE_CODE (TREE_OPERAND (incr, 1)) != MODIFY_EXPR)
933 break;
934 incr = TREE_OPERAND (incr, 1);
935 /* FALLTHRU */
936 case MODIFY_EXPR:
937 if (TREE_OPERAND (incr, 0) != decl)
938 break;
939 if (TREE_OPERAND (incr, 1) == decl)
940 break;
941 if (TREE_CODE (TREE_OPERAND (incr, 1)) == PLUS_EXPR
942 && (TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl
943 || TREE_OPERAND (TREE_OPERAND (incr, 1), 1) == decl))
944 incr_ok = true;
945 else if ((TREE_CODE (TREE_OPERAND (incr, 1)) == MINUS_EXPR
946 || (TREE_CODE (TREE_OPERAND (incr, 1))
947 == POINTER_PLUS_EXPR))
948 && TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl)
949 incr_ok = true;
950 else
951 {
952 tree t = check_omp_for_incr_expr (elocus,
953 TREE_OPERAND (incr, 1),
954 decl);
955 if (t != error_mark_node)
956 {
957 incr_ok = true;
958 t = build2 (PLUS_EXPR, TREE_TYPE (decl), decl, t);
959 incr = build2 (MODIFY_EXPR, void_type_node, decl, t);
960 }
961 }
962 if (!fail
963 && incr_ok
964 && TREE_CODE (cond) == NE_EXPR)
965 {
966 tree i = TREE_OPERAND (incr, 1);
967 i = TREE_OPERAND (i, TREE_OPERAND (i, 0) == decl);
968 i = c_fully_fold (i, false, NULL);
969 if (!final_p
970 && TREE_CODE (i) != INTEGER_CST)
971 ;
972 else if (TREE_CODE (TREE_TYPE (decl)) == POINTER_TYPE)
973 {
974 tree unit
975 = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (decl)));
976 if (unit)
977 {
978 enum tree_code ccode = GT_EXPR;
979 unit = c_fully_fold (unit, false, NULL);
980 i = fold_convert (TREE_TYPE (unit), i);
981 if (operand_equal_p (unit, i, 0))
982 ccode = LT_EXPR;
983 if (ccode == GT_EXPR)
984 {
985 i = fold_unary (NEGATE_EXPR, TREE_TYPE (i), i);
986 if (i == NULL_TREE
987 || !operand_equal_p (unit, i, 0))
988 {
989 error_at (elocus,
990 "increment is not constant 1 or "
991 "-1 for %<!=%> condition");
992 fail = true;
993 }
994 }
995 if (TREE_CODE (unit) != INTEGER_CST)
996 /* For pointer to VLA, transform != into < or >
997 depending on whether the pointer is
998 incremented or decremented in each
999 iteration. */
1000 TREE_SET_CODE (cond, ccode);
1001 }
1002 }
1003 else
1004 {
1005 if (!integer_onep (i) && !integer_minus_onep (i))
1006 {
1007 error_at (elocus,
1008 "increment is not constant 1 or -1 for"
1009 " %<!=%> condition");
1010 fail = true;
1011 }
1012 }
1013 }
1014 break;
1015
1016 default:
1017 break;
1018 }
1019 if (!incr_ok)
1020 {
1021 error_at (elocus, "invalid increment expression");
1022 fail = true;
1023 }
1024 }
1025
1026 TREE_VEC_ELT (initv, i) = init;
1027 TREE_VEC_ELT (incrv, i) = incr;
1028 }
1029
1030 if (fail)
1031 return NULL;
1032 else
1033 {
1034 tree t = make_node (code);
1035
1036 TREE_TYPE (t) = void_type_node;
1037 OMP_FOR_INIT (t) = initv;
1038 OMP_FOR_COND (t) = condv;
1039 OMP_FOR_INCR (t) = incrv;
1040 OMP_FOR_BODY (t) = body;
1041 OMP_FOR_PRE_BODY (t) = pre_body;
1042 OMP_FOR_ORIG_DECLS (t) = orig_declv;
1043
1044 SET_EXPR_LOCATION (t, locus);
1045 return t;
1046 }
1047 }
1048
1049 /* Type for passing data in between c_omp_check_loop_iv and
1050 c_omp_check_loop_iv_r. */
1051
1052 struct c_omp_check_loop_iv_data
1053 {
1054 tree declv;
1055 bool fail;
1056 bool maybe_nonrect;
1057 location_t stmt_loc;
1058 location_t expr_loc;
1059 int kind;
1060 int idx;
1061 walk_tree_lh lh;
1062 hash_set<tree> *ppset;
1063 };
1064
1065 /* Return -1 if DECL is not a loop iterator in loop nest D, otherwise
1066 return the index of the loop in which it is an iterator.
1067 Return TREE_VEC_LENGTH (d->declv) if it is a C++ range for iterator. */
1068
1069 static int
1070 c_omp_is_loop_iterator (tree decl, struct c_omp_check_loop_iv_data *d)
1071 {
1072 for (int i = 0; i < TREE_VEC_LENGTH (d->declv); i++)
1073 if (decl == TREE_VEC_ELT (d->declv, i)
1074 || (TREE_CODE (TREE_VEC_ELT (d->declv, i)) == TREE_LIST
1075 && decl == TREE_PURPOSE (TREE_VEC_ELT (d->declv, i))))
1076 return i;
1077 else if (TREE_CODE (TREE_VEC_ELT (d->declv, i)) == TREE_LIST
1078 && TREE_CHAIN (TREE_VEC_ELT (d->declv, i))
1079 && (TREE_CODE (TREE_CHAIN (TREE_VEC_ELT (d->declv, i)))
1080 == TREE_VEC)
1081 && decl == TREE_VEC_ELT (TREE_CHAIN (TREE_VEC_ELT (d->declv,
1082 i)), 2))
1083 return TREE_VEC_LENGTH (d->declv);
1084 return -1;
1085 }
1086
1087 /* Helper function called via walk_tree, to diagnose uses
1088 of associated loop IVs inside of lb, b and incr expressions
1089 of OpenMP loops. */
1090
1091 static tree
1092 c_omp_check_loop_iv_r (tree *tp, int *walk_subtrees, void *data)
1093 {
1094 struct c_omp_check_loop_iv_data *d
1095 = (struct c_omp_check_loop_iv_data *) data;
1096 if (DECL_P (*tp))
1097 {
1098 int idx = c_omp_is_loop_iterator (*tp, d);
1099 if (idx == -1)
1100 return NULL_TREE;
1101
1102 if ((d->kind & 4) && idx < d->idx)
1103 {
1104 d->maybe_nonrect = true;
1105 return NULL_TREE;
1106 }
1107
1108 if (d->ppset->add (*tp))
1109 return NULL_TREE;
1110
1111 location_t loc = d->expr_loc;
1112 if (loc == UNKNOWN_LOCATION)
1113 loc = d->stmt_loc;
1114
1115 switch (d->kind & 3)
1116 {
1117 case 0:
1118 error_at (loc, "initializer expression refers to "
1119 "iteration variable %qD", *tp);
1120 break;
1121 case 1:
1122 error_at (loc, "condition expression refers to "
1123 "iteration variable %qD", *tp);
1124 break;
1125 case 2:
1126 error_at (loc, "increment expression refers to "
1127 "iteration variable %qD", *tp);
1128 break;
1129 }
1130 d->fail = true;
1131 }
1132 else if (d->ppset->add (*tp))
1133 *walk_subtrees = 0;
1134 /* Don't walk dtors added by C++ wrap_cleanups_r. */
1135 else if (TREE_CODE (*tp) == TRY_CATCH_EXPR
1136 && TRY_CATCH_IS_CLEANUP (*tp))
1137 {
1138 *walk_subtrees = 0;
1139 return walk_tree_1 (&TREE_OPERAND (*tp, 0), c_omp_check_loop_iv_r, data,
1140 NULL, d->lh);
1141 }
1142
1143 return NULL_TREE;
1144 }
1145
1146 /* Check the allowed expressions for non-rectangular loop nest lb and b
1147 expressions. Return the outer var decl referenced in the expression. */
1148
1149 static tree
1150 c_omp_check_nonrect_loop_iv (tree *tp, struct c_omp_check_loop_iv_data *d,
1151 walk_tree_lh lh)
1152 {
1153 d->maybe_nonrect = false;
1154 if (d->fail)
1155 return NULL_TREE;
1156
1157 hash_set<tree> pset;
1158 hash_set<tree> *ppset = d->ppset;
1159 d->ppset = &pset;
1160
1161 tree t = *tp;
1162 if (TREE_CODE (t) == TREE_VEC
1163 && TREE_VEC_LENGTH (t) == 3
1164 && DECL_P (TREE_VEC_ELT (t, 0))
1165 && c_omp_is_loop_iterator (TREE_VEC_ELT (t, 0), d) >= 0)
1166 {
1167 d->kind &= 3;
1168 walk_tree_1 (&TREE_VEC_ELT (t, 1), c_omp_check_loop_iv_r, d, NULL, lh);
1169 walk_tree_1 (&TREE_VEC_ELT (t, 1), c_omp_check_loop_iv_r, d, NULL, lh);
1170 d->ppset = ppset;
1171 return d->fail ? NULL_TREE : TREE_VEC_ELT (t, 0);
1172 }
1173
1174 while (CONVERT_EXPR_P (t))
1175 t = TREE_OPERAND (t, 0);
1176
1177 tree a1 = t, a2 = integer_zero_node;
1178 bool neg_a1 = false, neg_a2 = false;
1179 switch (TREE_CODE (t))
1180 {
1181 case PLUS_EXPR:
1182 case MINUS_EXPR:
1183 a1 = TREE_OPERAND (t, 0);
1184 a2 = TREE_OPERAND (t, 1);
1185 while (CONVERT_EXPR_P (a1))
1186 a1 = TREE_OPERAND (a1, 0);
1187 while (CONVERT_EXPR_P (a2))
1188 a2 = TREE_OPERAND (a2, 0);
1189 if (DECL_P (a1) && c_omp_is_loop_iterator (a1, d) >= 0)
1190 {
1191 a2 = TREE_OPERAND (t, 1);
1192 if (TREE_CODE (t) == MINUS_EXPR)
1193 neg_a2 = true;
1194 t = a1;
1195 break;
1196 }
1197 if (DECL_P (a2) && c_omp_is_loop_iterator (a2, d) >= 0)
1198 {
1199 a1 = TREE_OPERAND (t, 0);
1200 if (TREE_CODE (t) == MINUS_EXPR)
1201 neg_a1 = true;
1202 t = a2;
1203 a2 = a1;
1204 break;
1205 }
1206 if (TREE_CODE (a1) == MULT_EXPR && TREE_CODE (a2) == MULT_EXPR)
1207 {
1208 tree o1 = TREE_OPERAND (a1, 0);
1209 tree o2 = TREE_OPERAND (a1, 1);
1210 while (CONVERT_EXPR_P (o1))
1211 o1 = TREE_OPERAND (o1, 0);
1212 while (CONVERT_EXPR_P (o2))
1213 o2 = TREE_OPERAND (o2, 0);
1214 if ((DECL_P (o1) && c_omp_is_loop_iterator (o1, d) >= 0)
1215 || (DECL_P (o2) && c_omp_is_loop_iterator (o2, d) >= 0))
1216 {
1217 a2 = TREE_OPERAND (t, 1);
1218 if (TREE_CODE (t) == MINUS_EXPR)
1219 neg_a2 = true;
1220 t = a1;
1221 break;
1222 }
1223 }
1224 if (TREE_CODE (a2) == MULT_EXPR)
1225 {
1226 a1 = TREE_OPERAND (t, 0);
1227 if (TREE_CODE (t) == MINUS_EXPR)
1228 neg_a1 = true;
1229 t = a2;
1230 a2 = a1;
1231 break;
1232 }
1233 if (TREE_CODE (a1) == MULT_EXPR)
1234 {
1235 a2 = TREE_OPERAND (t, 1);
1236 if (TREE_CODE (t) == MINUS_EXPR)
1237 neg_a2 = true;
1238 t = a1;
1239 break;
1240 }
1241 a2 = integer_zero_node;
1242 break;
1243 default:
1244 break;
1245 }
1246
1247 a1 = integer_one_node;
1248 if (TREE_CODE (t) == MULT_EXPR)
1249 {
1250 tree o1 = TREE_OPERAND (t, 0);
1251 tree o2 = TREE_OPERAND (t, 1);
1252 while (CONVERT_EXPR_P (o1))
1253 o1 = TREE_OPERAND (o1, 0);
1254 while (CONVERT_EXPR_P (o2))
1255 o2 = TREE_OPERAND (o2, 0);
1256 if (DECL_P (o1) && c_omp_is_loop_iterator (o1, d) >= 0)
1257 {
1258 a1 = TREE_OPERAND (t, 1);
1259 t = o1;
1260 }
1261 else if (DECL_P (o2) && c_omp_is_loop_iterator (o2, d) >= 0)
1262 {
1263 a1 = TREE_OPERAND (t, 0);
1264 t = o2;
1265 }
1266 }
1267
1268 d->kind &= 3;
1269 tree ret = NULL_TREE;
1270 if (DECL_P (t) && c_omp_is_loop_iterator (t, d) >= 0)
1271 {
1272 location_t loc = d->expr_loc;
1273 if (loc == UNKNOWN_LOCATION)
1274 loc = d->stmt_loc;
1275 if (!lang_hooks.types_compatible_p (TREE_TYPE (*tp), TREE_TYPE (t)))
1276 {
1277 if (d->kind == 0)
1278 error_at (loc, "outer iteration variable %qD used in initializer"
1279 " expression has type other than %qT",
1280 t, TREE_TYPE (*tp));
1281 else
1282 error_at (loc, "outer iteration variable %qD used in condition"
1283 " expression has type other than %qT",
1284 t, TREE_TYPE (*tp));
1285 d->fail = true;
1286 }
1287 else if (!INTEGRAL_TYPE_P (TREE_TYPE (a1)))
1288 {
1289 error_at (loc, "outer iteration variable %qD multiplier expression"
1290 " %qE is not integral", t, a1);
1291 d->fail = true;
1292 }
1293 else if (!INTEGRAL_TYPE_P (TREE_TYPE (a2)))
1294 {
1295 error_at (loc, "outer iteration variable %qD addend expression"
1296 " %qE is not integral", t, a2);
1297 d->fail = true;
1298 }
1299 else
1300 {
1301 walk_tree_1 (&a1, c_omp_check_loop_iv_r, d, NULL, lh);
1302 walk_tree_1 (&a2, c_omp_check_loop_iv_r, d, NULL, lh);
1303 }
1304 if (!d->fail)
1305 {
1306 a1 = fold_convert (TREE_TYPE (*tp), a1);
1307 a2 = fold_convert (TREE_TYPE (*tp), a2);
1308 if (neg_a1)
1309 a1 = fold_build1 (NEGATE_EXPR, TREE_TYPE (a1), a1);
1310 if (neg_a2)
1311 a2 = fold_build1 (NEGATE_EXPR, TREE_TYPE (a2), a2);
1312 ret = t;
1313 *tp = make_tree_vec (3);
1314 TREE_VEC_ELT (*tp, 0) = t;
1315 TREE_VEC_ELT (*tp, 1) = a1;
1316 TREE_VEC_ELT (*tp, 2) = a2;
1317 }
1318 }
1319 else
1320 walk_tree_1 (&t, c_omp_check_loop_iv_r, d, NULL, lh);
1321
1322 d->ppset = ppset;
1323 return ret;
1324 }
1325
1326 /* Diagnose invalid references to loop iterators in lb, b and incr
1327 expressions. */
1328
1329 bool
1330 c_omp_check_loop_iv (tree stmt, tree declv, walk_tree_lh lh)
1331 {
1332 hash_set<tree> pset;
1333 struct c_omp_check_loop_iv_data data;
1334 int i;
1335
1336 data.declv = declv;
1337 data.fail = false;
1338 data.maybe_nonrect = false;
1339 data.stmt_loc = EXPR_LOCATION (stmt);
1340 data.lh = lh;
1341 data.ppset = &pset;
1342 for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (stmt)); i++)
1343 {
1344 tree init = TREE_VEC_ELT (OMP_FOR_INIT (stmt), i);
1345 gcc_assert (TREE_CODE (init) == MODIFY_EXPR);
1346 tree decl = TREE_OPERAND (init, 0);
1347 tree cond = TREE_VEC_ELT (OMP_FOR_COND (stmt), i);
1348 gcc_assert (COMPARISON_CLASS_P (cond));
1349 gcc_assert (TREE_OPERAND (cond, 0) == decl);
1350 tree incr = TREE_VEC_ELT (OMP_FOR_INCR (stmt), i);
1351 data.expr_loc = EXPR_LOCATION (TREE_OPERAND (init, 1));
1352 tree vec_outer1 = NULL_TREE, vec_outer2 = NULL_TREE;
1353 int kind = 0;
1354 if (i > 0
1355 && (unsigned) c_omp_is_loop_iterator (decl, &data) < (unsigned) i)
1356 {
1357 location_t loc = data.expr_loc;
1358 if (loc == UNKNOWN_LOCATION)
1359 loc = data.stmt_loc;
1360 error_at (loc, "the same loop iteration variables %qD used in "
1361 "multiple associated loops", decl);
1362 data.fail = true;
1363 }
1364 /* Handle non-rectangular loop nests. */
1365 if (TREE_CODE (stmt) != OACC_LOOP
1366 && (TREE_CODE (TREE_OPERAND (init, 1)) == TREE_VEC
1367 || INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (init, 1))))
1368 && i > 0)
1369 kind = 4;
1370 data.kind = kind;
1371 data.idx = i;
1372 walk_tree_1 (&TREE_OPERAND (init, 1),
1373 c_omp_check_loop_iv_r, &data, NULL, lh);
1374 if (data.maybe_nonrect)
1375 vec_outer1 = c_omp_check_nonrect_loop_iv (&TREE_OPERAND (init, 1),
1376 &data, lh);
1377 /* Don't warn for C++ random access iterators here, the
1378 expression then involves the subtraction and always refers
1379 to the original value. The C++ FE needs to warn on those
1380 earlier. */
1381 if (decl == TREE_VEC_ELT (declv, i)
1382 || (TREE_CODE (TREE_VEC_ELT (declv, i)) == TREE_LIST
1383 && decl == TREE_PURPOSE (TREE_VEC_ELT (declv, i))))
1384 {
1385 data.expr_loc = EXPR_LOCATION (cond);
1386 data.kind = kind | 1;
1387 walk_tree_1 (&TREE_OPERAND (cond, 1),
1388 c_omp_check_loop_iv_r, &data, NULL, lh);
1389 if (data.maybe_nonrect)
1390 vec_outer2 = c_omp_check_nonrect_loop_iv (&TREE_OPERAND (cond, 1),
1391 &data, lh);
1392 }
1393 if (vec_outer1 && vec_outer2 && vec_outer1 != vec_outer2)
1394 {
1395 location_t loc = data.expr_loc;
1396 if (loc == UNKNOWN_LOCATION)
1397 loc = data.stmt_loc;
1398 error_at (loc, "two different outer iteration variables %qD and %qD"
1399 " used in a single loop", vec_outer1, vec_outer2);
1400 data.fail = true;
1401 }
1402 if (vec_outer1 || vec_outer2)
1403 OMP_FOR_NON_RECTANGULAR (stmt) = 1;
1404 if (TREE_CODE (incr) == MODIFY_EXPR)
1405 {
1406 gcc_assert (TREE_OPERAND (incr, 0) == decl);
1407 incr = TREE_OPERAND (incr, 1);
1408 data.kind = 2;
1409 if (TREE_CODE (incr) == PLUS_EXPR
1410 && TREE_OPERAND (incr, 1) == decl)
1411 {
1412 data.expr_loc = EXPR_LOCATION (TREE_OPERAND (incr, 0));
1413 walk_tree_1 (&TREE_OPERAND (incr, 0),
1414 c_omp_check_loop_iv_r, &data, NULL, lh);
1415 }
1416 else
1417 {
1418 data.expr_loc = EXPR_LOCATION (TREE_OPERAND (incr, 1));
1419 walk_tree_1 (&TREE_OPERAND (incr, 1),
1420 c_omp_check_loop_iv_r, &data, NULL, lh);
1421 }
1422 }
1423 }
1424 return !data.fail;
1425 }
1426
1427 /* Similar, but allows to check the init or cond expressions individually. */
1428
1429 bool
1430 c_omp_check_loop_iv_exprs (location_t stmt_loc, tree declv, int i, tree decl,
1431 tree init, tree cond, walk_tree_lh lh)
1432 {
1433 hash_set<tree> pset;
1434 struct c_omp_check_loop_iv_data data;
1435
1436 data.declv = declv;
1437 data.fail = false;
1438 data.maybe_nonrect = false;
1439 data.stmt_loc = stmt_loc;
1440 data.lh = lh;
1441 data.ppset = &pset;
1442 data.idx = i;
1443 if (i > 0
1444 && (unsigned) c_omp_is_loop_iterator (decl, &data) < (unsigned) i)
1445 {
1446 error_at (stmt_loc, "the same loop iteration variables %qD used in "
1447 "multiple associated loops", decl);
1448 data.fail = true;
1449 }
1450 if (init)
1451 {
1452 data.expr_loc = EXPR_LOCATION (init);
1453 data.kind = 0;
1454 walk_tree_1 (&init,
1455 c_omp_check_loop_iv_r, &data, NULL, lh);
1456 }
1457 if (cond)
1458 {
1459 gcc_assert (COMPARISON_CLASS_P (cond));
1460 data.expr_loc = EXPR_LOCATION (init);
1461 data.kind = 1;
1462 if (TREE_OPERAND (cond, 0) == decl)
1463 walk_tree_1 (&TREE_OPERAND (cond, 1),
1464 c_omp_check_loop_iv_r, &data, NULL, lh);
1465 else
1466 walk_tree_1 (&TREE_OPERAND (cond, 0),
1467 c_omp_check_loop_iv_r, &data, NULL, lh);
1468 }
1469 return !data.fail;
1470 }
1471
1472 /* This function splits clauses for OpenACC combined loop
1473 constructs. OpenACC combined loop constructs are:
1474 #pragma acc kernels loop
1475 #pragma acc parallel loop */
1476
1477 tree
1478 c_oacc_split_loop_clauses (tree clauses, tree *not_loop_clauses,
1479 bool is_parallel)
1480 {
1481 tree next, loop_clauses, nc;
1482
1483 loop_clauses = *not_loop_clauses = NULL_TREE;
1484 for (; clauses ; clauses = next)
1485 {
1486 next = OMP_CLAUSE_CHAIN (clauses);
1487
1488 switch (OMP_CLAUSE_CODE (clauses))
1489 {
1490 /* Loop clauses. */
1491 case OMP_CLAUSE_COLLAPSE:
1492 case OMP_CLAUSE_TILE:
1493 case OMP_CLAUSE_GANG:
1494 case OMP_CLAUSE_WORKER:
1495 case OMP_CLAUSE_VECTOR:
1496 case OMP_CLAUSE_AUTO:
1497 case OMP_CLAUSE_SEQ:
1498 case OMP_CLAUSE_INDEPENDENT:
1499 case OMP_CLAUSE_PRIVATE:
1500 OMP_CLAUSE_CHAIN (clauses) = loop_clauses;
1501 loop_clauses = clauses;
1502 break;
1503
1504 /* Reductions must be duplicated on both constructs. */
1505 case OMP_CLAUSE_REDUCTION:
1506 if (is_parallel)
1507 {
1508 nc = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1509 OMP_CLAUSE_REDUCTION);
1510 OMP_CLAUSE_DECL (nc) = OMP_CLAUSE_DECL (clauses);
1511 OMP_CLAUSE_REDUCTION_CODE (nc)
1512 = OMP_CLAUSE_REDUCTION_CODE (clauses);
1513 OMP_CLAUSE_CHAIN (nc) = *not_loop_clauses;
1514 *not_loop_clauses = nc;
1515 }
1516
1517 OMP_CLAUSE_CHAIN (clauses) = loop_clauses;
1518 loop_clauses = clauses;
1519 break;
1520
1521 /* Parallel/kernels clauses. */
1522 default:
1523 OMP_CLAUSE_CHAIN (clauses) = *not_loop_clauses;
1524 *not_loop_clauses = clauses;
1525 break;
1526 }
1527 }
1528
1529 return loop_clauses;
1530 }
1531
1532 /* This function attempts to split or duplicate clauses for OpenMP
1533 combined/composite constructs. Right now there are 30 different
1534 constructs. CODE is the innermost construct in the combined construct,
1535 and MASK allows to determine which constructs are combined together,
1536 as every construct has at least one clause that no other construct
1537 has (except for OMP_SECTIONS, but that can be only combined with parallel,
1538 and OMP_MASTER, which doesn't have any clauses at all).
1539 OpenMP combined/composite constructs are:
1540 #pragma omp distribute parallel for
1541 #pragma omp distribute parallel for simd
1542 #pragma omp distribute simd
1543 #pragma omp for simd
1544 #pragma omp master taskloop
1545 #pragma omp master taskloop simd
1546 #pragma omp parallel for
1547 #pragma omp parallel for simd
1548 #pragma omp parallel loop
1549 #pragma omp parallel master
1550 #pragma omp parallel master taskloop
1551 #pragma omp parallel master taskloop simd
1552 #pragma omp parallel sections
1553 #pragma omp target parallel
1554 #pragma omp target parallel for
1555 #pragma omp target parallel for simd
1556 #pragma omp target parallel loop
1557 #pragma omp target teams
1558 #pragma omp target teams distribute
1559 #pragma omp target teams distribute parallel for
1560 #pragma omp target teams distribute parallel for simd
1561 #pragma omp target teams distribute simd
1562 #pragma omp target teams loop
1563 #pragma omp target simd
1564 #pragma omp taskloop simd
1565 #pragma omp teams distribute
1566 #pragma omp teams distribute parallel for
1567 #pragma omp teams distribute parallel for simd
1568 #pragma omp teams distribute simd
1569 #pragma omp teams loop */
1570
1571 void
1572 c_omp_split_clauses (location_t loc, enum tree_code code,
1573 omp_clause_mask mask, tree clauses, tree *cclauses)
1574 {
1575 tree next, c;
1576 enum c_omp_clause_split s;
1577 int i;
1578
1579 for (i = 0; i < C_OMP_CLAUSE_SPLIT_COUNT; i++)
1580 cclauses[i] = NULL;
1581 /* Add implicit nowait clause on
1582 #pragma omp parallel {for,for simd,sections}. */
1583 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0)
1584 switch (code)
1585 {
1586 case OMP_FOR:
1587 case OMP_SIMD:
1588 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
1589 cclauses[C_OMP_CLAUSE_SPLIT_FOR]
1590 = build_omp_clause (loc, OMP_CLAUSE_NOWAIT);
1591 break;
1592 case OMP_SECTIONS:
1593 cclauses[C_OMP_CLAUSE_SPLIT_SECTIONS]
1594 = build_omp_clause (loc, OMP_CLAUSE_NOWAIT);
1595 break;
1596 default:
1597 break;
1598 }
1599
1600 for (; clauses ; clauses = next)
1601 {
1602 next = OMP_CLAUSE_CHAIN (clauses);
1603
1604 switch (OMP_CLAUSE_CODE (clauses))
1605 {
1606 /* First the clauses that are unique to some constructs. */
1607 case OMP_CLAUSE_DEVICE:
1608 case OMP_CLAUSE_MAP:
1609 case OMP_CLAUSE_IS_DEVICE_PTR:
1610 case OMP_CLAUSE_DEFAULTMAP:
1611 case OMP_CLAUSE_DEPEND:
1612 s = C_OMP_CLAUSE_SPLIT_TARGET;
1613 break;
1614 case OMP_CLAUSE_NUM_TEAMS:
1615 case OMP_CLAUSE_THREAD_LIMIT:
1616 s = C_OMP_CLAUSE_SPLIT_TEAMS;
1617 break;
1618 case OMP_CLAUSE_DIST_SCHEDULE:
1619 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
1620 break;
1621 case OMP_CLAUSE_COPYIN:
1622 case OMP_CLAUSE_NUM_THREADS:
1623 case OMP_CLAUSE_PROC_BIND:
1624 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
1625 break;
1626 case OMP_CLAUSE_ORDERED:
1627 s = C_OMP_CLAUSE_SPLIT_FOR;
1628 break;
1629 case OMP_CLAUSE_SCHEDULE:
1630 s = C_OMP_CLAUSE_SPLIT_FOR;
1631 if (code != OMP_SIMD)
1632 OMP_CLAUSE_SCHEDULE_SIMD (clauses) = 0;
1633 break;
1634 case OMP_CLAUSE_SAFELEN:
1635 case OMP_CLAUSE_SIMDLEN:
1636 case OMP_CLAUSE_ALIGNED:
1637 case OMP_CLAUSE_NONTEMPORAL:
1638 s = C_OMP_CLAUSE_SPLIT_SIMD;
1639 break;
1640 case OMP_CLAUSE_GRAINSIZE:
1641 case OMP_CLAUSE_NUM_TASKS:
1642 case OMP_CLAUSE_FINAL:
1643 case OMP_CLAUSE_UNTIED:
1644 case OMP_CLAUSE_MERGEABLE:
1645 case OMP_CLAUSE_NOGROUP:
1646 case OMP_CLAUSE_PRIORITY:
1647 s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
1648 break;
1649 case OMP_CLAUSE_BIND:
1650 s = C_OMP_CLAUSE_SPLIT_LOOP;
1651 break;
1652 /* Duplicate this to all of taskloop, distribute, for, simd and
1653 loop. */
1654 case OMP_CLAUSE_COLLAPSE:
1655 if (code == OMP_SIMD)
1656 {
1657 if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)
1658 | (OMP_CLAUSE_MASK_1
1659 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)
1660 | (OMP_CLAUSE_MASK_1
1661 << PRAGMA_OMP_CLAUSE_NOGROUP))) != 0)
1662 {
1663 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1664 OMP_CLAUSE_COLLAPSE);
1665 OMP_CLAUSE_COLLAPSE_EXPR (c)
1666 = OMP_CLAUSE_COLLAPSE_EXPR (clauses);
1667 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD];
1668 cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c;
1669 }
1670 else
1671 {
1672 /* This must be #pragma omp target simd */
1673 s = C_OMP_CLAUSE_SPLIT_SIMD;
1674 break;
1675 }
1676 }
1677 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
1678 {
1679 if ((mask & (OMP_CLAUSE_MASK_1
1680 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0)
1681 {
1682 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1683 OMP_CLAUSE_COLLAPSE);
1684 OMP_CLAUSE_COLLAPSE_EXPR (c)
1685 = OMP_CLAUSE_COLLAPSE_EXPR (clauses);
1686 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_FOR];
1687 cclauses[C_OMP_CLAUSE_SPLIT_FOR] = c;
1688 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
1689 }
1690 else
1691 s = C_OMP_CLAUSE_SPLIT_FOR;
1692 }
1693 else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))
1694 != 0)
1695 s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
1696 else if (code == OMP_LOOP)
1697 s = C_OMP_CLAUSE_SPLIT_LOOP;
1698 else
1699 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
1700 break;
1701 /* Private clause is supported on all constructs but master,
1702 it is enough to put it on the innermost one other than master. For
1703 #pragma omp {for,sections} put it on parallel though,
1704 as that's what we did for OpenMP 3.1. */
1705 case OMP_CLAUSE_PRIVATE:
1706 switch (code)
1707 {
1708 case OMP_SIMD: s = C_OMP_CLAUSE_SPLIT_SIMD; break;
1709 case OMP_FOR: case OMP_SECTIONS:
1710 case OMP_PARALLEL: s = C_OMP_CLAUSE_SPLIT_PARALLEL; break;
1711 case OMP_DISTRIBUTE: s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; break;
1712 case OMP_TEAMS: s = C_OMP_CLAUSE_SPLIT_TEAMS; break;
1713 case OMP_MASTER: s = C_OMP_CLAUSE_SPLIT_PARALLEL; break;
1714 case OMP_TASKLOOP: s = C_OMP_CLAUSE_SPLIT_TASKLOOP; break;
1715 case OMP_LOOP: s = C_OMP_CLAUSE_SPLIT_LOOP; break;
1716 default: gcc_unreachable ();
1717 }
1718 break;
1719 /* Firstprivate clause is supported on all constructs but
1720 simd, master and loop. Put it on the outermost of those and
1721 duplicate on teams and parallel. */
1722 case OMP_CLAUSE_FIRSTPRIVATE:
1723 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP))
1724 != 0)
1725 {
1726 if (code == OMP_SIMD
1727 && (mask & ((OMP_CLAUSE_MASK_1
1728 << PRAGMA_OMP_CLAUSE_NUM_THREADS)
1729 | (OMP_CLAUSE_MASK_1
1730 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))) == 0)
1731 {
1732 /* This must be #pragma omp target simd. */
1733 s = C_OMP_CLAUSE_SPLIT_TARGET;
1734 break;
1735 }
1736 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1737 OMP_CLAUSE_FIRSTPRIVATE);
1738 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
1739 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TARGET];
1740 cclauses[C_OMP_CLAUSE_SPLIT_TARGET] = c;
1741 }
1742 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
1743 != 0)
1744 {
1745 if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)
1746 | (OMP_CLAUSE_MASK_1
1747 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE))) != 0)
1748 {
1749 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1750 OMP_CLAUSE_FIRSTPRIVATE);
1751 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
1752 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL];
1753 cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] = c;
1754 if ((mask & (OMP_CLAUSE_MASK_1
1755 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) != 0)
1756 s = C_OMP_CLAUSE_SPLIT_TEAMS;
1757 else
1758 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
1759 }
1760 else if ((mask & (OMP_CLAUSE_MASK_1
1761 << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0)
1762 /* This must be
1763 #pragma omp parallel master taskloop{, simd}. */
1764 s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
1765 else
1766 /* This must be
1767 #pragma omp parallel{, for{, simd}, sections,loop}
1768 or
1769 #pragma omp target parallel. */
1770 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
1771 }
1772 else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))
1773 != 0)
1774 {
1775 /* This must be one of
1776 #pragma omp {,target }teams {distribute,loop}
1777 #pragma omp target teams
1778 #pragma omp {,target }teams distribute simd. */
1779 gcc_assert (code == OMP_DISTRIBUTE
1780 || code == OMP_LOOP
1781 || code == OMP_TEAMS
1782 || code == OMP_SIMD);
1783 s = C_OMP_CLAUSE_SPLIT_TEAMS;
1784 }
1785 else if ((mask & (OMP_CLAUSE_MASK_1
1786 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0)
1787 {
1788 /* This must be #pragma omp distribute simd. */
1789 gcc_assert (code == OMP_SIMD);
1790 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
1791 }
1792 else if ((mask & (OMP_CLAUSE_MASK_1
1793 << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0)
1794 {
1795 /* This must be #pragma omp {,{,parallel }master }taskloop simd
1796 or
1797 #pragma omp {,parallel }master taskloop. */
1798 gcc_assert (code == OMP_SIMD || code == OMP_TASKLOOP);
1799 s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
1800 }
1801 else
1802 {
1803 /* This must be #pragma omp for simd. */
1804 gcc_assert (code == OMP_SIMD);
1805 s = C_OMP_CLAUSE_SPLIT_FOR;
1806 }
1807 break;
1808 /* Lastprivate is allowed on distribute, for, sections, taskloop, loop
1809 and simd. In parallel {for{, simd},sections} we actually want to
1810 put it on parallel rather than for or sections. */
1811 case OMP_CLAUSE_LASTPRIVATE:
1812 if (code == OMP_DISTRIBUTE)
1813 {
1814 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
1815 break;
1816 }
1817 if ((mask & (OMP_CLAUSE_MASK_1
1818 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0)
1819 {
1820 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1821 OMP_CLAUSE_LASTPRIVATE);
1822 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
1823 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE];
1824 OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c)
1825 = OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (clauses);
1826 cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE] = c;
1827 }
1828 if (code == OMP_FOR || code == OMP_SECTIONS)
1829 {
1830 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
1831 != 0)
1832 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
1833 else
1834 s = C_OMP_CLAUSE_SPLIT_FOR;
1835 break;
1836 }
1837 if (code == OMP_TASKLOOP)
1838 {
1839 s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
1840 break;
1841 }
1842 if (code == OMP_LOOP)
1843 {
1844 s = C_OMP_CLAUSE_SPLIT_LOOP;
1845 break;
1846 }
1847 gcc_assert (code == OMP_SIMD);
1848 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
1849 {
1850 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1851 OMP_CLAUSE_LASTPRIVATE);
1852 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
1853 OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c)
1854 = OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (clauses);
1855 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
1856 != 0)
1857 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
1858 else
1859 s = C_OMP_CLAUSE_SPLIT_FOR;
1860 OMP_CLAUSE_CHAIN (c) = cclauses[s];
1861 cclauses[s] = c;
1862 }
1863 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0)
1864 {
1865 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1866 OMP_CLAUSE_LASTPRIVATE);
1867 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
1868 OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c)
1869 = OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (clauses);
1870 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TASKLOOP];
1871 cclauses[C_OMP_CLAUSE_SPLIT_TASKLOOP] = c;
1872 }
1873 s = C_OMP_CLAUSE_SPLIT_SIMD;
1874 break;
1875 /* Shared and default clauses are allowed on parallel, teams and
1876 taskloop. */
1877 case OMP_CLAUSE_SHARED:
1878 case OMP_CLAUSE_DEFAULT:
1879 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))
1880 != 0)
1881 {
1882 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
1883 != 0)
1884 {
1885 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1886 OMP_CLAUSE_CODE (clauses));
1887 if (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_SHARED)
1888 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
1889 else
1890 OMP_CLAUSE_DEFAULT_KIND (c)
1891 = OMP_CLAUSE_DEFAULT_KIND (clauses);
1892 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL];
1893 cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] = c;
1894 }
1895 s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
1896 break;
1897 }
1898 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))
1899 != 0)
1900 {
1901 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
1902 == 0)
1903 {
1904 s = C_OMP_CLAUSE_SPLIT_TEAMS;
1905 break;
1906 }
1907 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1908 OMP_CLAUSE_CODE (clauses));
1909 if (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_SHARED)
1910 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
1911 else
1912 OMP_CLAUSE_DEFAULT_KIND (c)
1913 = OMP_CLAUSE_DEFAULT_KIND (clauses);
1914 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TEAMS];
1915 cclauses[C_OMP_CLAUSE_SPLIT_TEAMS] = c;
1916 }
1917 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
1918 break;
1919 /* order clauses are allowed on for, simd and loop. */
1920 case OMP_CLAUSE_ORDER:
1921 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
1922 {
1923 if (code == OMP_SIMD)
1924 {
1925 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1926 OMP_CLAUSE_ORDER);
1927 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_FOR];
1928 cclauses[C_OMP_CLAUSE_SPLIT_FOR] = c;
1929 s = C_OMP_CLAUSE_SPLIT_SIMD;
1930 }
1931 else
1932 s = C_OMP_CLAUSE_SPLIT_FOR;
1933 }
1934 else if (code == OMP_LOOP)
1935 s = C_OMP_CLAUSE_SPLIT_LOOP;
1936 else
1937 s = C_OMP_CLAUSE_SPLIT_SIMD;
1938 break;
1939 /* Reduction is allowed on simd, for, parallel, sections, taskloop,
1940 teams and loop. Duplicate it on all of them, but omit on for or
1941 sections if parallel is present (unless inscan, in that case
1942 omit on parallel). If taskloop or loop is combined with
1943 parallel, omit it on parallel. */
1944 case OMP_CLAUSE_REDUCTION:
1945 if (OMP_CLAUSE_REDUCTION_TASK (clauses))
1946 {
1947 if (code == OMP_SIMD || code == OMP_LOOP)
1948 {
1949 error_at (OMP_CLAUSE_LOCATION (clauses),
1950 "invalid %<task%> reduction modifier on construct "
1951 "combined with %<simd%> or %<loop%>");
1952 OMP_CLAUSE_REDUCTION_TASK (clauses) = 0;
1953 }
1954 else if (code != OMP_SECTIONS
1955 && (mask & (OMP_CLAUSE_MASK_1
1956 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) == 0
1957 && (mask & (OMP_CLAUSE_MASK_1
1958 << PRAGMA_OMP_CLAUSE_SCHEDULE)) == 0)
1959 {
1960 error_at (OMP_CLAUSE_LOCATION (clauses),
1961 "invalid %<task%> reduction modifier on construct "
1962 "not combined with %<parallel%>, %<for%> or "
1963 "%<sections%>");
1964 OMP_CLAUSE_REDUCTION_TASK (clauses) = 0;
1965 }
1966 }
1967 if (OMP_CLAUSE_REDUCTION_INSCAN (clauses)
1968 && ((mask & ((OMP_CLAUSE_MASK_1
1969 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)
1970 | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)))
1971 != 0))
1972 {
1973 error_at (OMP_CLAUSE_LOCATION (clauses),
1974 "%<inscan%> %<reduction%> clause on construct other "
1975 "than %<for%>, %<simd%>, %<for simd%>, "
1976 "%<parallel for%>, %<parallel for simd%>");
1977 OMP_CLAUSE_REDUCTION_INSCAN (clauses) = 0;
1978 }
1979 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
1980 {
1981 if (code == OMP_SIMD)
1982 {
1983 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1984 OMP_CLAUSE_REDUCTION);
1985 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
1986 OMP_CLAUSE_REDUCTION_CODE (c)
1987 = OMP_CLAUSE_REDUCTION_CODE (clauses);
1988 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)
1989 = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses);
1990 OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c)
1991 = OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (clauses);
1992 OMP_CLAUSE_REDUCTION_INSCAN (c)
1993 = OMP_CLAUSE_REDUCTION_INSCAN (clauses);
1994 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD];
1995 cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c;
1996 }
1997 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))
1998 != 0)
1999 {
2000 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
2001 OMP_CLAUSE_REDUCTION);
2002 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
2003 OMP_CLAUSE_REDUCTION_CODE (c)
2004 = OMP_CLAUSE_REDUCTION_CODE (clauses);
2005 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)
2006 = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses);
2007 OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c)
2008 = OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (clauses);
2009 OMP_CLAUSE_REDUCTION_INSCAN (c)
2010 = OMP_CLAUSE_REDUCTION_INSCAN (clauses);
2011 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TEAMS];
2012 cclauses[C_OMP_CLAUSE_SPLIT_TEAMS] = c;
2013 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
2014 }
2015 else if ((mask & (OMP_CLAUSE_MASK_1
2016 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0
2017 && !OMP_CLAUSE_REDUCTION_INSCAN (clauses))
2018 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
2019 else
2020 s = C_OMP_CLAUSE_SPLIT_FOR;
2021 }
2022 else if (code == OMP_SECTIONS
2023 || code == OMP_PARALLEL
2024 || code == OMP_MASTER)
2025 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
2026 else if (code == OMP_TASKLOOP)
2027 s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
2028 else if (code == OMP_LOOP)
2029 s = C_OMP_CLAUSE_SPLIT_LOOP;
2030 else if (code == OMP_SIMD)
2031 {
2032 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))
2033 != 0)
2034 {
2035 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
2036 OMP_CLAUSE_REDUCTION);
2037 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
2038 OMP_CLAUSE_REDUCTION_CODE (c)
2039 = OMP_CLAUSE_REDUCTION_CODE (clauses);
2040 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)
2041 = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses);
2042 OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c)
2043 = OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (clauses);
2044 OMP_CLAUSE_REDUCTION_INSCAN (c)
2045 = OMP_CLAUSE_REDUCTION_INSCAN (clauses);
2046 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TASKLOOP];
2047 cclauses[C_OMP_CLAUSE_SPLIT_TASKLOOP] = c;
2048 }
2049 s = C_OMP_CLAUSE_SPLIT_SIMD;
2050 }
2051 else
2052 s = C_OMP_CLAUSE_SPLIT_TEAMS;
2053 break;
2054 case OMP_CLAUSE_IN_REDUCTION:
2055 /* in_reduction on taskloop simd becomes reduction on the simd
2056 and keeps being in_reduction on taskloop. */
2057 if (code == OMP_SIMD)
2058 {
2059 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
2060 OMP_CLAUSE_REDUCTION);
2061 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
2062 OMP_CLAUSE_REDUCTION_CODE (c)
2063 = OMP_CLAUSE_REDUCTION_CODE (clauses);
2064 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)
2065 = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses);
2066 OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c)
2067 = OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (clauses);
2068 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD];
2069 cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c;
2070 }
2071 s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
2072 break;
2073 case OMP_CLAUSE_IF:
2074 if (OMP_CLAUSE_IF_MODIFIER (clauses) != ERROR_MARK)
2075 {
2076 s = C_OMP_CLAUSE_SPLIT_COUNT;
2077 switch (OMP_CLAUSE_IF_MODIFIER (clauses))
2078 {
2079 case OMP_PARALLEL:
2080 if ((mask & (OMP_CLAUSE_MASK_1
2081 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0)
2082 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
2083 break;
2084 case OMP_SIMD:
2085 if (code == OMP_SIMD)
2086 s = C_OMP_CLAUSE_SPLIT_SIMD;
2087 break;
2088 case OMP_TASKLOOP:
2089 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))
2090 != 0)
2091 s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
2092 break;
2093 case OMP_TARGET:
2094 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP))
2095 != 0)
2096 s = C_OMP_CLAUSE_SPLIT_TARGET;
2097 break;
2098 default:
2099 break;
2100 }
2101 if (s != C_OMP_CLAUSE_SPLIT_COUNT)
2102 break;
2103 /* Error-recovery here, invalid if-modifier specified, add the
2104 clause to just one construct. */
2105 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) != 0)
2106 s = C_OMP_CLAUSE_SPLIT_TARGET;
2107 else if ((mask & (OMP_CLAUSE_MASK_1
2108 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0)
2109 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
2110 else if ((mask & (OMP_CLAUSE_MASK_1
2111 << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0)
2112 s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
2113 else if (code == OMP_SIMD)
2114 s = C_OMP_CLAUSE_SPLIT_SIMD;
2115 else
2116 gcc_unreachable ();
2117 break;
2118 }
2119 /* Otherwise, duplicate if clause to all constructs. */
2120 if (code == OMP_SIMD)
2121 {
2122 if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)
2123 | (OMP_CLAUSE_MASK_1
2124 << PRAGMA_OMP_CLAUSE_NUM_THREADS)
2125 | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP)))
2126 != 0)
2127 {
2128 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
2129 OMP_CLAUSE_IF);
2130 OMP_CLAUSE_IF_MODIFIER (c)
2131 = OMP_CLAUSE_IF_MODIFIER (clauses);
2132 OMP_CLAUSE_IF_EXPR (c) = OMP_CLAUSE_IF_EXPR (clauses);
2133 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD];
2134 cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c;
2135 }
2136 else
2137 {
2138 s = C_OMP_CLAUSE_SPLIT_SIMD;
2139 break;
2140 }
2141 }
2142 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))
2143 != 0)
2144 {
2145 if ((mask & (OMP_CLAUSE_MASK_1
2146 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0)
2147 {
2148 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
2149 OMP_CLAUSE_IF);
2150 OMP_CLAUSE_IF_MODIFIER (c)
2151 = OMP_CLAUSE_IF_MODIFIER (clauses);
2152 OMP_CLAUSE_IF_EXPR (c) = OMP_CLAUSE_IF_EXPR (clauses);
2153 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TASKLOOP];
2154 cclauses[C_OMP_CLAUSE_SPLIT_TASKLOOP] = c;
2155 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
2156 }
2157 else
2158 s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
2159 }
2160 else if ((mask & (OMP_CLAUSE_MASK_1
2161 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0)
2162 {
2163 if ((mask & (OMP_CLAUSE_MASK_1
2164 << PRAGMA_OMP_CLAUSE_MAP)) != 0)
2165 {
2166 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
2167 OMP_CLAUSE_IF);
2168 OMP_CLAUSE_IF_MODIFIER (c)
2169 = OMP_CLAUSE_IF_MODIFIER (clauses);
2170 OMP_CLAUSE_IF_EXPR (c) = OMP_CLAUSE_IF_EXPR (clauses);
2171 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TARGET];
2172 cclauses[C_OMP_CLAUSE_SPLIT_TARGET] = c;
2173 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
2174 }
2175 else
2176 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
2177 }
2178 else
2179 s = C_OMP_CLAUSE_SPLIT_TARGET;
2180 break;
2181 case OMP_CLAUSE_LINEAR:
2182 /* Linear clause is allowed on simd and for. Put it on the
2183 innermost construct. */
2184 if (code == OMP_SIMD)
2185 s = C_OMP_CLAUSE_SPLIT_SIMD;
2186 else
2187 s = C_OMP_CLAUSE_SPLIT_FOR;
2188 break;
2189 case OMP_CLAUSE_NOWAIT:
2190 /* Nowait clause is allowed on target, for and sections, but
2191 is not allowed on parallel for or parallel sections. Therefore,
2192 put it on target construct if present, because that can only
2193 be combined with parallel for{, simd} and not with for{, simd},
2194 otherwise to the worksharing construct. */
2195 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP))
2196 != 0)
2197 s = C_OMP_CLAUSE_SPLIT_TARGET;
2198 else
2199 s = C_OMP_CLAUSE_SPLIT_FOR;
2200 break;
2201 default:
2202 gcc_unreachable ();
2203 }
2204 OMP_CLAUSE_CHAIN (clauses) = cclauses[s];
2205 cclauses[s] = clauses;
2206 }
2207
2208 if (!flag_checking)
2209 return;
2210
2211 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) == 0)
2212 gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_TARGET] == NULL_TREE);
2213 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) == 0)
2214 gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_TEAMS] == NULL_TREE);
2215 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) == 0)
2216 gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE] == NULL_TREE);
2217 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) == 0)
2218 gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] == NULL_TREE);
2219 if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)
2220 | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))) == 0
2221 && code != OMP_SECTIONS
2222 && code != OMP_LOOP)
2223 gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_FOR] == NULL_TREE);
2224 if (code != OMP_SIMD)
2225 gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_SIMD] == NULL_TREE);
2226 }
2227
2228
2229 /* qsort callback to compare #pragma omp declare simd clauses. */
2230
2231 static int
2232 c_omp_declare_simd_clause_cmp (const void *p, const void *q)
2233 {
2234 tree a = *(const tree *) p;
2235 tree b = *(const tree *) q;
2236 if (OMP_CLAUSE_CODE (a) != OMP_CLAUSE_CODE (b))
2237 {
2238 if (OMP_CLAUSE_CODE (a) > OMP_CLAUSE_CODE (b))
2239 return -1;
2240 return 1;
2241 }
2242 if (OMP_CLAUSE_CODE (a) != OMP_CLAUSE_SIMDLEN
2243 && OMP_CLAUSE_CODE (a) != OMP_CLAUSE_INBRANCH
2244 && OMP_CLAUSE_CODE (a) != OMP_CLAUSE_NOTINBRANCH)
2245 {
2246 int c = tree_to_shwi (OMP_CLAUSE_DECL (a));
2247 int d = tree_to_shwi (OMP_CLAUSE_DECL (b));
2248 if (c < d)
2249 return 1;
2250 if (c > d)
2251 return -1;
2252 }
2253 return 0;
2254 }
2255
2256 /* Change PARM_DECLs in OMP_CLAUSE_DECL of #pragma omp declare simd
2257 CLAUSES on FNDECL into argument indexes and sort them. */
2258
2259 tree
2260 c_omp_declare_simd_clauses_to_numbers (tree parms, tree clauses)
2261 {
2262 tree c;
2263 vec<tree> clvec = vNULL;
2264
2265 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
2266 {
2267 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_SIMDLEN
2268 && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_INBRANCH
2269 && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_NOTINBRANCH)
2270 {
2271 tree decl = OMP_CLAUSE_DECL (c);
2272 tree arg;
2273 int idx;
2274 for (arg = parms, idx = 0; arg;
2275 arg = TREE_CHAIN (arg), idx++)
2276 if (arg == decl)
2277 break;
2278 if (arg == NULL_TREE)
2279 {
2280 error_at (OMP_CLAUSE_LOCATION (c),
2281 "%qD is not a function argument", decl);
2282 continue;
2283 }
2284 OMP_CLAUSE_DECL (c) = build_int_cst (integer_type_node, idx);
2285 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
2286 && OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (c))
2287 {
2288 decl = OMP_CLAUSE_LINEAR_STEP (c);
2289 for (arg = parms, idx = 0; arg;
2290 arg = TREE_CHAIN (arg), idx++)
2291 if (arg == decl)
2292 break;
2293 if (arg == NULL_TREE)
2294 {
2295 error_at (OMP_CLAUSE_LOCATION (c),
2296 "%qD is not a function argument", decl);
2297 continue;
2298 }
2299 OMP_CLAUSE_LINEAR_STEP (c)
2300 = build_int_cst (integer_type_node, idx);
2301 }
2302 }
2303 clvec.safe_push (c);
2304 }
2305 if (!clvec.is_empty ())
2306 {
2307 unsigned int len = clvec.length (), i;
2308 clvec.qsort (c_omp_declare_simd_clause_cmp);
2309 clauses = clvec[0];
2310 for (i = 0; i < len; i++)
2311 OMP_CLAUSE_CHAIN (clvec[i]) = (i < len - 1) ? clvec[i + 1] : NULL_TREE;
2312 }
2313 else
2314 clauses = NULL_TREE;
2315 clvec.release ();
2316 return clauses;
2317 }
2318
2319 /* Change argument indexes in CLAUSES of FNDECL back to PARM_DECLs. */
2320
2321 void
2322 c_omp_declare_simd_clauses_to_decls (tree fndecl, tree clauses)
2323 {
2324 tree c;
2325
2326 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
2327 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_SIMDLEN
2328 && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_INBRANCH
2329 && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_NOTINBRANCH)
2330 {
2331 int idx = tree_to_shwi (OMP_CLAUSE_DECL (c)), i;
2332 tree arg;
2333 for (arg = DECL_ARGUMENTS (fndecl), i = 0; arg;
2334 arg = TREE_CHAIN (arg), i++)
2335 if (i == idx)
2336 break;
2337 gcc_assert (arg);
2338 OMP_CLAUSE_DECL (c) = arg;
2339 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
2340 && OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (c))
2341 {
2342 idx = tree_to_shwi (OMP_CLAUSE_LINEAR_STEP (c));
2343 for (arg = DECL_ARGUMENTS (fndecl), i = 0; arg;
2344 arg = TREE_CHAIN (arg), i++)
2345 if (i == idx)
2346 break;
2347 gcc_assert (arg);
2348 OMP_CLAUSE_LINEAR_STEP (c) = arg;
2349 }
2350 }
2351 }
2352
2353 /* Return true for __func__ and similar function-local predefined
2354 variables (which are in OpenMP predetermined shared, allowed in
2355 shared/firstprivate clauses). */
2356
2357 bool
2358 c_omp_predefined_variable (tree decl)
2359 {
2360 if (VAR_P (decl)
2361 && DECL_ARTIFICIAL (decl)
2362 && TREE_READONLY (decl)
2363 && TREE_STATIC (decl)
2364 && DECL_NAME (decl)
2365 && (DECL_NAME (decl) == ridpointers[RID_C99_FUNCTION_NAME]
2366 || DECL_NAME (decl) == ridpointers[RID_FUNCTION_NAME]
2367 || DECL_NAME (decl) == ridpointers[RID_PRETTY_FUNCTION_NAME]))
2368 return true;
2369 return false;
2370 }
2371
2372 /* OMP_CLAUSE_DEFAULT_UNSPECIFIED unless OpenMP sharing attribute of DECL
2373 is predetermined. */
2374
2375 enum omp_clause_default_kind
2376 c_omp_predetermined_sharing (tree decl)
2377 {
2378 /* Predetermine artificial variables holding integral values, those
2379 are usually result of gimplify_one_sizepos or SAVE_EXPR
2380 gimplification. */
2381 if (VAR_P (decl)
2382 && DECL_ARTIFICIAL (decl)
2383 && INTEGRAL_TYPE_P (TREE_TYPE (decl)))
2384 return OMP_CLAUSE_DEFAULT_SHARED;
2385
2386 if (c_omp_predefined_variable (decl))
2387 return OMP_CLAUSE_DEFAULT_SHARED;
2388
2389 return OMP_CLAUSE_DEFAULT_UNSPECIFIED;
2390 }
2391
2392 /* OMP_CLAUSE_DEFAULTMAP_CATEGORY_UNSPECIFIED unless OpenMP mapping attribute
2393 of DECL is predetermined. */
2394
2395 enum omp_clause_defaultmap_kind
2396 c_omp_predetermined_mapping (tree decl)
2397 {
2398 /* Predetermine artificial variables holding integral values, those
2399 are usually result of gimplify_one_sizepos or SAVE_EXPR
2400 gimplification. */
2401 if (VAR_P (decl)
2402 && DECL_ARTIFICIAL (decl)
2403 && INTEGRAL_TYPE_P (TREE_TYPE (decl)))
2404 return OMP_CLAUSE_DEFAULTMAP_FIRSTPRIVATE;
2405
2406 if (c_omp_predefined_variable (decl))
2407 return OMP_CLAUSE_DEFAULTMAP_TO;
2408
2409 return OMP_CLAUSE_DEFAULTMAP_CATEGORY_UNSPECIFIED;
2410 }
2411
2412
2413 /* Diagnose errors in an OpenMP context selector, return CTX if
2414 it is correct or error_mark_node otherwise. */
2415
2416 tree
2417 c_omp_check_context_selector (location_t loc, tree ctx)
2418 {
2419 /* Each trait-set-selector-name can only be specified once.
2420 There are just 4 set names. */
2421 for (tree t1 = ctx; t1; t1 = TREE_CHAIN (t1))
2422 for (tree t2 = TREE_CHAIN (t1); t2; t2 = TREE_CHAIN (t2))
2423 if (TREE_PURPOSE (t1) == TREE_PURPOSE (t2))
2424 {
2425 error_at (loc, "selector set %qs specified more than once",
2426 IDENTIFIER_POINTER (TREE_PURPOSE (t1)));
2427 return error_mark_node;
2428 }
2429 for (tree t = ctx; t; t = TREE_CHAIN (t))
2430 {
2431 /* Each trait-selector-name can only be specified once. */
2432 if (list_length (TREE_VALUE (t)) < 5)
2433 {
2434 for (tree t1 = TREE_VALUE (t); t1; t1 = TREE_CHAIN (t1))
2435 for (tree t2 = TREE_CHAIN (t1); t2; t2 = TREE_CHAIN (t2))
2436 if (TREE_PURPOSE (t1) == TREE_PURPOSE (t2))
2437 {
2438 error_at (loc,
2439 "selector %qs specified more than once in set %qs",
2440 IDENTIFIER_POINTER (TREE_PURPOSE (t1)),
2441 IDENTIFIER_POINTER (TREE_PURPOSE (t)));
2442 return error_mark_node;
2443 }
2444 }
2445 else
2446 {
2447 hash_set<tree> pset;
2448 for (tree t1 = TREE_VALUE (t); t1; t1 = TREE_CHAIN (t1))
2449 if (pset.add (TREE_PURPOSE (t1)))
2450 {
2451 error_at (loc,
2452 "selector %qs specified more than once in set %qs",
2453 IDENTIFIER_POINTER (TREE_PURPOSE (t1)),
2454 IDENTIFIER_POINTER (TREE_PURPOSE (t)));
2455 return error_mark_node;
2456 }
2457 }
2458
2459 static const char *const kind[] = {
2460 "host", "nohost", "cpu", "gpu", "fpga", "any", NULL };
2461 static const char *const vendor[] = {
2462 "amd", "arm", "bsc", "cray", "fujitsu", "gnu", "ibm", "intel",
2463 "llvm", "nvidia", "pgi", "ti", "unknown", NULL };
2464 static const char *const extension[] = { NULL };
2465 static const char *const atomic_default_mem_order[] = {
2466 "seq_cst", "relaxed", "acq_rel", NULL };
2467 struct known_properties { const char *set; const char *selector;
2468 const char *const *props; };
2469 known_properties props[] = {
2470 { "device", "kind", kind },
2471 { "implementation", "vendor", vendor },
2472 { "implementation", "extension", extension },
2473 { "implementation", "atomic_default_mem_order",
2474 atomic_default_mem_order } };
2475 for (tree t1 = TREE_VALUE (t); t1; t1 = TREE_CHAIN (t1))
2476 for (unsigned i = 0; i < ARRAY_SIZE (props); i++)
2477 if (!strcmp (IDENTIFIER_POINTER (TREE_PURPOSE (t1)),
2478 props[i].selector)
2479 && !strcmp (IDENTIFIER_POINTER (TREE_PURPOSE (t)),
2480 props[i].set))
2481 for (tree t2 = TREE_VALUE (t1); t2; t2 = TREE_CHAIN (t2))
2482 for (unsigned j = 0; ; j++)
2483 {
2484 if (props[i].props[j] == NULL)
2485 {
2486 if (TREE_PURPOSE (t2)
2487 && !strcmp (IDENTIFIER_POINTER (TREE_PURPOSE (t2)),
2488 " score"))
2489 break;
2490 if (props[i].props == atomic_default_mem_order)
2491 {
2492 error_at (loc,
2493 "incorrect property %qs of %qs selector",
2494 IDENTIFIER_POINTER (TREE_PURPOSE (t2)),
2495 "atomic_default_mem_order");
2496 return error_mark_node;
2497 }
2498 else if (TREE_PURPOSE (t2))
2499 warning_at (loc, 0,
2500 "unknown property %qs of %qs selector",
2501 IDENTIFIER_POINTER (TREE_PURPOSE (t2)),
2502 props[i].selector);
2503 else
2504 warning_at (loc, 0,
2505 "unknown property %qE of %qs selector",
2506 TREE_VALUE (t2), props[i].selector);
2507 break;
2508 }
2509 else if (TREE_PURPOSE (t2) == NULL_TREE)
2510 {
2511 const char *str = TREE_STRING_POINTER (TREE_VALUE (t2));
2512 if (!strcmp (str, props[i].props[j])
2513 && ((size_t) TREE_STRING_LENGTH (TREE_VALUE (t2))
2514 == strlen (str) + 1))
2515 break;
2516 }
2517 else if (!strcmp (IDENTIFIER_POINTER (TREE_PURPOSE (t2)),
2518 props[i].props[j]))
2519 break;
2520 }
2521 }
2522 return ctx;
2523 }
2524
2525 /* Register VARIANT as variant of some base function marked with
2526 #pragma omp declare variant. CONSTRUCT is corresponding construct
2527 selector set. */
2528
2529 void
2530 c_omp_mark_declare_variant (location_t loc, tree variant, tree construct)
2531 {
2532 tree attr = lookup_attribute ("omp declare variant variant",
2533 DECL_ATTRIBUTES (variant));
2534 if (attr == NULL_TREE)
2535 {
2536 attr = tree_cons (get_identifier ("omp declare variant variant"),
2537 unshare_expr (construct),
2538 DECL_ATTRIBUTES (variant));
2539 DECL_ATTRIBUTES (variant) = attr;
2540 return;
2541 }
2542 if ((TREE_VALUE (attr) != NULL_TREE) != (construct != NULL_TREE)
2543 || (construct != NULL_TREE
2544 && omp_context_selector_set_compare ("construct", TREE_VALUE (attr),
2545 construct)))
2546 error_at (loc, "%qD used as a variant with incompatible %<construct%> "
2547 "selector sets", variant);
2548 }
2549
2550 /* For OpenACC, the OMP_CLAUSE_MAP_KIND of an OMP_CLAUSE_MAP is used internally
2551 to distinguish clauses as seen by the user. Return the "friendly" clause
2552 name for error messages etc., where possible. See also
2553 c/c-parser.c:c_parser_oacc_data_clause and
2554 cp/parser.c:cp_parser_oacc_data_clause. */
2555
2556 const char *
2557 c_omp_map_clause_name (tree clause, bool oacc)
2558 {
2559 if (oacc && OMP_CLAUSE_CODE (clause) == OMP_CLAUSE_MAP)
2560 switch (OMP_CLAUSE_MAP_KIND (clause))
2561 {
2562 case GOMP_MAP_FORCE_ALLOC:
2563 case GOMP_MAP_ALLOC: return "create";
2564 case GOMP_MAP_FORCE_TO:
2565 case GOMP_MAP_TO: return "copyin";
2566 case GOMP_MAP_FORCE_FROM:
2567 case GOMP_MAP_FROM: return "copyout";
2568 case GOMP_MAP_FORCE_TOFROM:
2569 case GOMP_MAP_TOFROM: return "copy";
2570 case GOMP_MAP_RELEASE: return "delete";
2571 case GOMP_MAP_FORCE_PRESENT: return "present";
2572 case GOMP_MAP_ATTACH: return "attach";
2573 case GOMP_MAP_FORCE_DETACH:
2574 case GOMP_MAP_DETACH: return "detach";
2575 case GOMP_MAP_DEVICE_RESIDENT: return "device_resident";
2576 case GOMP_MAP_LINK: return "link";
2577 case GOMP_MAP_FORCE_DEVICEPTR: return "deviceptr";
2578 default: break;
2579 }
2580 return omp_clause_code_name[OMP_CLAUSE_CODE (clause)];
2581 }