Daily bump.
[gcc.git] / gcc / c-family / c-omp.c
1 /* This file contains routines to construct OpenACC and OpenMP constructs,
2 called from parsing in the C and C++ front ends.
3
4 Copyright (C) 2005-2021 Free Software Foundation, Inc.
5 Contributed by Richard Henderson <rth@redhat.com>,
6 Diego Novillo <dnovillo@redhat.com>.
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
14
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "options.h"
28 #include "c-common.h"
29 #include "gimple-expr.h"
30 #include "c-pragma.h"
31 #include "stringpool.h"
32 #include "omp-general.h"
33 #include "gomp-constants.h"
34 #include "memmodel.h"
35 #include "attribs.h"
36 #include "gimplify.h"
37 #include "langhooks.h"
38 #include "bitmap.h"
39
40
41 /* Complete a #pragma oacc wait construct. LOC is the location of
42 the #pragma. */
43
44 tree
45 c_finish_oacc_wait (location_t loc, tree parms, tree clauses)
46 {
47 const int nparms = list_length (parms);
48 tree stmt, t;
49 vec<tree, va_gc> *args;
50
51 vec_alloc (args, nparms + 2);
52 stmt = builtin_decl_explicit (BUILT_IN_GOACC_WAIT);
53
54 if (omp_find_clause (clauses, OMP_CLAUSE_ASYNC))
55 t = OMP_CLAUSE_ASYNC_EXPR (clauses);
56 else
57 t = build_int_cst (integer_type_node, GOMP_ASYNC_SYNC);
58
59 args->quick_push (t);
60 args->quick_push (build_int_cst (integer_type_node, nparms));
61
62 for (t = parms; t; t = TREE_CHAIN (t))
63 {
64 if (TREE_CODE (OMP_CLAUSE_WAIT_EXPR (t)) == INTEGER_CST)
65 args->quick_push (build_int_cst (integer_type_node,
66 TREE_INT_CST_LOW (OMP_CLAUSE_WAIT_EXPR (t))));
67 else
68 args->quick_push (OMP_CLAUSE_WAIT_EXPR (t));
69 }
70
71 stmt = build_call_expr_loc_vec (loc, stmt, args);
72
73 vec_free (args);
74
75 return stmt;
76 }
77
78 /* Complete a #pragma omp master construct. STMT is the structured-block
79 that follows the pragma. LOC is the location of the #pragma. */
80
81 tree
82 c_finish_omp_master (location_t loc, tree stmt)
83 {
84 tree t = add_stmt (build1 (OMP_MASTER, void_type_node, stmt));
85 SET_EXPR_LOCATION (t, loc);
86 return t;
87 }
88
89 /* Complete a #pragma omp taskgroup construct. BODY is the structured-block
90 that follows the pragma. LOC is the location of the #pragma. */
91
92 tree
93 c_finish_omp_taskgroup (location_t loc, tree body, tree clauses)
94 {
95 tree stmt = make_node (OMP_TASKGROUP);
96 TREE_TYPE (stmt) = void_type_node;
97 OMP_TASKGROUP_BODY (stmt) = body;
98 OMP_TASKGROUP_CLAUSES (stmt) = clauses;
99 SET_EXPR_LOCATION (stmt, loc);
100 return add_stmt (stmt);
101 }
102
103 /* Complete a #pragma omp critical construct. BODY is the structured-block
104 that follows the pragma, NAME is the identifier in the pragma, or null
105 if it was omitted. LOC is the location of the #pragma. */
106
107 tree
108 c_finish_omp_critical (location_t loc, tree body, tree name, tree clauses)
109 {
110 gcc_assert (!clauses || OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_HINT);
111 if (name == NULL_TREE
112 && clauses != NULL_TREE
113 && integer_nonzerop (OMP_CLAUSE_HINT_EXPR (clauses)))
114 {
115 error_at (OMP_CLAUSE_LOCATION (clauses),
116 "%<#pragma omp critical%> with %<hint%> clause requires "
117 "a name, except when %<omp_sync_hint_none%> is used");
118 return error_mark_node;
119 }
120
121 tree stmt = make_node (OMP_CRITICAL);
122 TREE_TYPE (stmt) = void_type_node;
123 OMP_CRITICAL_BODY (stmt) = body;
124 OMP_CRITICAL_NAME (stmt) = name;
125 OMP_CRITICAL_CLAUSES (stmt) = clauses;
126 SET_EXPR_LOCATION (stmt, loc);
127 return add_stmt (stmt);
128 }
129
130 /* Complete a #pragma omp ordered construct. STMT is the structured-block
131 that follows the pragma. LOC is the location of the #pragma. */
132
133 tree
134 c_finish_omp_ordered (location_t loc, tree clauses, tree stmt)
135 {
136 tree t = make_node (OMP_ORDERED);
137 TREE_TYPE (t) = void_type_node;
138 OMP_ORDERED_BODY (t) = stmt;
139 if (!flag_openmp /* flag_openmp_simd */
140 && (OMP_CLAUSE_CODE (clauses) != OMP_CLAUSE_SIMD
141 || OMP_CLAUSE_CHAIN (clauses)))
142 clauses = build_omp_clause (loc, OMP_CLAUSE_SIMD);
143 OMP_ORDERED_CLAUSES (t) = clauses;
144 SET_EXPR_LOCATION (t, loc);
145 return add_stmt (t);
146 }
147
148
149 /* Complete a #pragma omp barrier construct. LOC is the location of
150 the #pragma. */
151
152 void
153 c_finish_omp_barrier (location_t loc)
154 {
155 tree x;
156
157 x = builtin_decl_explicit (BUILT_IN_GOMP_BARRIER);
158 x = build_call_expr_loc (loc, x, 0);
159 add_stmt (x);
160 }
161
162
163 /* Complete a #pragma omp taskwait construct. LOC is the location of the
164 pragma. */
165
166 void
167 c_finish_omp_taskwait (location_t loc)
168 {
169 tree x;
170
171 x = builtin_decl_explicit (BUILT_IN_GOMP_TASKWAIT);
172 x = build_call_expr_loc (loc, x, 0);
173 add_stmt (x);
174 }
175
176
177 /* Complete a #pragma omp taskyield construct. LOC is the location of the
178 pragma. */
179
180 void
181 c_finish_omp_taskyield (location_t loc)
182 {
183 tree x;
184
185 x = builtin_decl_explicit (BUILT_IN_GOMP_TASKYIELD);
186 x = build_call_expr_loc (loc, x, 0);
187 add_stmt (x);
188 }
189
190
191 /* Complete a #pragma omp atomic construct. For CODE OMP_ATOMIC
192 the expression to be implemented atomically is LHS opcode= RHS.
193 For OMP_ATOMIC_READ V = LHS, for OMP_ATOMIC_CAPTURE_{NEW,OLD} LHS
194 opcode= RHS with the new or old content of LHS returned.
195 LOC is the location of the atomic statement. The value returned
196 is either error_mark_node (if the construct was erroneous) or an
197 OMP_ATOMIC* node which should be added to the current statement
198 tree with add_stmt. If TEST is set, avoid calling save_expr
199 or create_tmp_var*. */
200
201 tree
202 c_finish_omp_atomic (location_t loc, enum tree_code code,
203 enum tree_code opcode, tree lhs, tree rhs,
204 tree v, tree lhs1, tree rhs1, bool swapped,
205 enum omp_memory_order memory_order, bool test)
206 {
207 tree x, type, addr, pre = NULL_TREE;
208 HOST_WIDE_INT bitpos = 0, bitsize = 0;
209
210 if (lhs == error_mark_node || rhs == error_mark_node
211 || v == error_mark_node || lhs1 == error_mark_node
212 || rhs1 == error_mark_node)
213 return error_mark_node;
214
215 /* ??? According to one reading of the OpenMP spec, complex type are
216 supported, but there are no atomic stores for any architecture.
217 But at least icc 9.0 doesn't support complex types here either.
218 And lets not even talk about vector types... */
219 type = TREE_TYPE (lhs);
220 if (!INTEGRAL_TYPE_P (type)
221 && !POINTER_TYPE_P (type)
222 && !SCALAR_FLOAT_TYPE_P (type))
223 {
224 error_at (loc, "invalid expression type for %<#pragma omp atomic%>");
225 return error_mark_node;
226 }
227 if (TYPE_ATOMIC (type))
228 {
229 error_at (loc, "%<_Atomic%> expression in %<#pragma omp atomic%>");
230 return error_mark_node;
231 }
232
233 if (opcode == RDIV_EXPR)
234 opcode = TRUNC_DIV_EXPR;
235
236 /* ??? Validate that rhs does not overlap lhs. */
237 tree blhs = NULL;
238 if (TREE_CODE (lhs) == COMPONENT_REF
239 && TREE_CODE (TREE_OPERAND (lhs, 1)) == FIELD_DECL
240 && DECL_C_BIT_FIELD (TREE_OPERAND (lhs, 1))
241 && DECL_BIT_FIELD_REPRESENTATIVE (TREE_OPERAND (lhs, 1)))
242 {
243 tree field = TREE_OPERAND (lhs, 1);
244 tree repr = DECL_BIT_FIELD_REPRESENTATIVE (field);
245 if (tree_fits_uhwi_p (DECL_FIELD_OFFSET (field))
246 && tree_fits_uhwi_p (DECL_FIELD_OFFSET (repr)))
247 bitpos = (tree_to_uhwi (DECL_FIELD_OFFSET (field))
248 - tree_to_uhwi (DECL_FIELD_OFFSET (repr))) * BITS_PER_UNIT;
249 else
250 bitpos = 0;
251 bitpos += (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field))
252 - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr)));
253 gcc_assert (tree_fits_shwi_p (DECL_SIZE (field)));
254 bitsize = tree_to_shwi (DECL_SIZE (field));
255 blhs = lhs;
256 type = TREE_TYPE (repr);
257 lhs = build3 (COMPONENT_REF, TREE_TYPE (repr), TREE_OPERAND (lhs, 0),
258 repr, TREE_OPERAND (lhs, 2));
259 }
260
261 /* Take and save the address of the lhs. From then on we'll reference it
262 via indirection. */
263 addr = build_unary_op (loc, ADDR_EXPR, lhs, false);
264 if (addr == error_mark_node)
265 return error_mark_node;
266 if (!test)
267 addr = save_expr (addr);
268 if (!test
269 && TREE_CODE (addr) != SAVE_EXPR
270 && (TREE_CODE (addr) != ADDR_EXPR
271 || !VAR_P (TREE_OPERAND (addr, 0))))
272 {
273 /* Make sure LHS is simple enough so that goa_lhs_expr_p can recognize
274 it even after unsharing function body. */
275 tree var = create_tmp_var_raw (TREE_TYPE (addr));
276 DECL_CONTEXT (var) = current_function_decl;
277 addr = build4 (TARGET_EXPR, TREE_TYPE (addr), var, addr, NULL, NULL);
278 }
279 tree orig_lhs = lhs;
280 lhs = build_indirect_ref (loc, addr, RO_NULL);
281 tree new_lhs = lhs;
282
283 if (code == OMP_ATOMIC_READ)
284 {
285 x = build1 (OMP_ATOMIC_READ, type, addr);
286 SET_EXPR_LOCATION (x, loc);
287 OMP_ATOMIC_MEMORY_ORDER (x) = memory_order;
288 if (blhs)
289 x = build3_loc (loc, BIT_FIELD_REF, TREE_TYPE (blhs), x,
290 bitsize_int (bitsize), bitsize_int (bitpos));
291 return build_modify_expr (loc, v, NULL_TREE, NOP_EXPR,
292 loc, x, NULL_TREE);
293 }
294
295 /* There are lots of warnings, errors, and conversions that need to happen
296 in the course of interpreting a statement. Use the normal mechanisms
297 to do this, and then take it apart again. */
298 if (blhs)
299 {
300 lhs = build3_loc (loc, BIT_FIELD_REF, TREE_TYPE (blhs), lhs,
301 bitsize_int (bitsize), bitsize_int (bitpos));
302 if (swapped)
303 rhs = build_binary_op (loc, opcode, rhs, lhs, true);
304 else if (opcode != NOP_EXPR)
305 rhs = build_binary_op (loc, opcode, lhs, rhs, true);
306 opcode = NOP_EXPR;
307 }
308 else if (swapped)
309 {
310 rhs = build_binary_op (loc, opcode, rhs, lhs, true);
311 opcode = NOP_EXPR;
312 }
313 bool save = in_late_binary_op;
314 in_late_binary_op = true;
315 x = build_modify_expr (loc, blhs ? blhs : lhs, NULL_TREE, opcode,
316 loc, rhs, NULL_TREE);
317 in_late_binary_op = save;
318 if (x == error_mark_node)
319 return error_mark_node;
320 if (TREE_CODE (x) == COMPOUND_EXPR)
321 {
322 pre = TREE_OPERAND (x, 0);
323 gcc_assert (TREE_CODE (pre) == SAVE_EXPR || tree_invariant_p (pre));
324 x = TREE_OPERAND (x, 1);
325 }
326 gcc_assert (TREE_CODE (x) == MODIFY_EXPR);
327 rhs = TREE_OPERAND (x, 1);
328
329 if (blhs)
330 rhs = build3_loc (loc, BIT_INSERT_EXPR, type, new_lhs,
331 rhs, bitsize_int (bitpos));
332
333 /* Punt the actual generation of atomic operations to common code. */
334 if (code == OMP_ATOMIC)
335 type = void_type_node;
336 x = build2 (code, type, addr, rhs);
337 SET_EXPR_LOCATION (x, loc);
338 OMP_ATOMIC_MEMORY_ORDER (x) = memory_order;
339
340 /* Generally it is hard to prove lhs1 and lhs are the same memory
341 location, just diagnose different variables. */
342 if (rhs1
343 && VAR_P (rhs1)
344 && VAR_P (orig_lhs)
345 && rhs1 != orig_lhs
346 && !test)
347 {
348 if (code == OMP_ATOMIC)
349 error_at (loc, "%<#pragma omp atomic update%> uses two different "
350 "variables for memory");
351 else
352 error_at (loc, "%<#pragma omp atomic capture%> uses two different "
353 "variables for memory");
354 return error_mark_node;
355 }
356
357 if (lhs1
358 && lhs1 != orig_lhs
359 && TREE_CODE (lhs1) == COMPONENT_REF
360 && TREE_CODE (TREE_OPERAND (lhs1, 1)) == FIELD_DECL
361 && DECL_C_BIT_FIELD (TREE_OPERAND (lhs1, 1))
362 && DECL_BIT_FIELD_REPRESENTATIVE (TREE_OPERAND (lhs1, 1)))
363 {
364 tree field = TREE_OPERAND (lhs1, 1);
365 tree repr = DECL_BIT_FIELD_REPRESENTATIVE (field);
366 lhs1 = build3 (COMPONENT_REF, TREE_TYPE (repr), TREE_OPERAND (lhs1, 0),
367 repr, TREE_OPERAND (lhs1, 2));
368 }
369 if (rhs1
370 && rhs1 != orig_lhs
371 && TREE_CODE (rhs1) == COMPONENT_REF
372 && TREE_CODE (TREE_OPERAND (rhs1, 1)) == FIELD_DECL
373 && DECL_C_BIT_FIELD (TREE_OPERAND (rhs1, 1))
374 && DECL_BIT_FIELD_REPRESENTATIVE (TREE_OPERAND (rhs1, 1)))
375 {
376 tree field = TREE_OPERAND (rhs1, 1);
377 tree repr = DECL_BIT_FIELD_REPRESENTATIVE (field);
378 rhs1 = build3 (COMPONENT_REF, TREE_TYPE (repr), TREE_OPERAND (rhs1, 0),
379 repr, TREE_OPERAND (rhs1, 2));
380 }
381
382 if (code != OMP_ATOMIC)
383 {
384 /* Generally it is hard to prove lhs1 and lhs are the same memory
385 location, just diagnose different variables. */
386 if (lhs1 && VAR_P (lhs1) && VAR_P (orig_lhs))
387 {
388 if (lhs1 != orig_lhs && !test)
389 {
390 error_at (loc, "%<#pragma omp atomic capture%> uses two "
391 "different variables for memory");
392 return error_mark_node;
393 }
394 }
395 if (blhs)
396 {
397 x = build3_loc (loc, BIT_FIELD_REF, TREE_TYPE (blhs), x,
398 bitsize_int (bitsize), bitsize_int (bitpos));
399 type = TREE_TYPE (blhs);
400 }
401 x = build_modify_expr (loc, v, NULL_TREE, NOP_EXPR,
402 loc, x, NULL_TREE);
403 if (rhs1 && rhs1 != orig_lhs)
404 {
405 tree rhs1addr = build_unary_op (loc, ADDR_EXPR, rhs1, false);
406 if (rhs1addr == error_mark_node)
407 return error_mark_node;
408 x = omit_one_operand_loc (loc, type, x, rhs1addr);
409 }
410 if (lhs1 && lhs1 != orig_lhs)
411 {
412 tree lhs1addr = build_unary_op (loc, ADDR_EXPR, lhs1, false);
413 if (lhs1addr == error_mark_node)
414 return error_mark_node;
415 if (code == OMP_ATOMIC_CAPTURE_OLD)
416 x = omit_one_operand_loc (loc, type, x, lhs1addr);
417 else
418 {
419 if (!test)
420 x = save_expr (x);
421 x = omit_two_operands_loc (loc, type, x, x, lhs1addr);
422 }
423 }
424 }
425 else if (rhs1 && rhs1 != orig_lhs)
426 {
427 tree rhs1addr = build_unary_op (loc, ADDR_EXPR, rhs1, false);
428 if (rhs1addr == error_mark_node)
429 return error_mark_node;
430 x = omit_one_operand_loc (loc, type, x, rhs1addr);
431 }
432
433 if (pre)
434 x = omit_one_operand_loc (loc, type, x, pre);
435 return x;
436 }
437
438
439 /* Return true if TYPE is the implementation's omp_depend_t. */
440
441 bool
442 c_omp_depend_t_p (tree type)
443 {
444 type = TYPE_MAIN_VARIANT (type);
445 return (TREE_CODE (type) == RECORD_TYPE
446 && TYPE_NAME (type)
447 && ((TREE_CODE (TYPE_NAME (type)) == TYPE_DECL
448 ? DECL_NAME (TYPE_NAME (type)) : TYPE_NAME (type))
449 == get_identifier ("omp_depend_t"))
450 && (!TYPE_CONTEXT (type)
451 || TREE_CODE (TYPE_CONTEXT (type)) == TRANSLATION_UNIT_DECL)
452 && COMPLETE_TYPE_P (type)
453 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
454 && !compare_tree_int (TYPE_SIZE (type),
455 2 * tree_to_uhwi (TYPE_SIZE (ptr_type_node))));
456 }
457
458
459 /* Complete a #pragma omp depobj construct. LOC is the location of the
460 #pragma. */
461
462 void
463 c_finish_omp_depobj (location_t loc, tree depobj,
464 enum omp_clause_depend_kind kind, tree clause)
465 {
466 tree t = NULL_TREE;
467 if (!error_operand_p (depobj))
468 {
469 if (!c_omp_depend_t_p (TREE_TYPE (depobj)))
470 {
471 error_at (EXPR_LOC_OR_LOC (depobj, loc),
472 "type of %<depobj%> expression is not %<omp_depend_t%>");
473 depobj = error_mark_node;
474 }
475 else if (TYPE_READONLY (TREE_TYPE (depobj)))
476 {
477 error_at (EXPR_LOC_OR_LOC (depobj, loc),
478 "%<const%> qualified %<depobj%> expression");
479 depobj = error_mark_node;
480 }
481 }
482 else
483 depobj = error_mark_node;
484
485 if (clause == error_mark_node)
486 return;
487
488 if (clause)
489 {
490 gcc_assert (TREE_CODE (clause) == OMP_CLAUSE
491 && OMP_CLAUSE_CODE (clause) == OMP_CLAUSE_DEPEND);
492 if (OMP_CLAUSE_CHAIN (clause))
493 error_at (OMP_CLAUSE_LOCATION (clause),
494 "more than one locator in %<depend%> clause on %<depobj%> "
495 "construct");
496 switch (OMP_CLAUSE_DEPEND_KIND (clause))
497 {
498 case OMP_CLAUSE_DEPEND_DEPOBJ:
499 error_at (OMP_CLAUSE_LOCATION (clause),
500 "%<depobj%> dependence type specified in %<depend%> "
501 "clause on %<depobj%> construct");
502 return;
503 case OMP_CLAUSE_DEPEND_SOURCE:
504 case OMP_CLAUSE_DEPEND_SINK:
505 error_at (OMP_CLAUSE_LOCATION (clause),
506 "%<depend(%s)%> is only allowed in %<omp ordered%>",
507 OMP_CLAUSE_DEPEND_KIND (clause) == OMP_CLAUSE_DEPEND_SOURCE
508 ? "source" : "sink");
509 return;
510 case OMP_CLAUSE_DEPEND_IN:
511 case OMP_CLAUSE_DEPEND_OUT:
512 case OMP_CLAUSE_DEPEND_INOUT:
513 case OMP_CLAUSE_DEPEND_MUTEXINOUTSET:
514 kind = OMP_CLAUSE_DEPEND_KIND (clause);
515 t = OMP_CLAUSE_DECL (clause);
516 gcc_assert (t);
517 if (TREE_CODE (t) == TREE_LIST
518 && TREE_PURPOSE (t)
519 && TREE_CODE (TREE_PURPOSE (t)) == TREE_VEC)
520 {
521 error_at (OMP_CLAUSE_LOCATION (clause),
522 "%<iterator%> modifier may not be specified on "
523 "%<depobj%> construct");
524 return;
525 }
526 if (TREE_CODE (t) == COMPOUND_EXPR)
527 {
528 tree t1 = build_fold_addr_expr (TREE_OPERAND (t, 1));
529 t = build2 (COMPOUND_EXPR, TREE_TYPE (t1), TREE_OPERAND (t, 0),
530 t1);
531 }
532 else
533 t = build_fold_addr_expr (t);
534 break;
535 default:
536 gcc_unreachable ();
537 }
538 }
539 else
540 gcc_assert (kind != OMP_CLAUSE_DEPEND_SOURCE);
541
542 if (depobj == error_mark_node)
543 return;
544
545 depobj = build_fold_addr_expr_loc (EXPR_LOC_OR_LOC (depobj, loc), depobj);
546 tree dtype
547 = build_pointer_type_for_mode (ptr_type_node, TYPE_MODE (ptr_type_node),
548 true);
549 depobj = fold_convert (dtype, depobj);
550 tree r;
551 if (clause)
552 {
553 depobj = save_expr (depobj);
554 r = build_indirect_ref (loc, depobj, RO_UNARY_STAR);
555 add_stmt (build2 (MODIFY_EXPR, void_type_node, r, t));
556 }
557 int k;
558 switch (kind)
559 {
560 case OMP_CLAUSE_DEPEND_IN:
561 k = GOMP_DEPEND_IN;
562 break;
563 case OMP_CLAUSE_DEPEND_OUT:
564 k = GOMP_DEPEND_OUT;
565 break;
566 case OMP_CLAUSE_DEPEND_INOUT:
567 k = GOMP_DEPEND_INOUT;
568 break;
569 case OMP_CLAUSE_DEPEND_MUTEXINOUTSET:
570 k = GOMP_DEPEND_MUTEXINOUTSET;
571 break;
572 case OMP_CLAUSE_DEPEND_LAST:
573 k = -1;
574 break;
575 default:
576 gcc_unreachable ();
577 }
578 t = build_int_cst (ptr_type_node, k);
579 depobj = build2_loc (loc, POINTER_PLUS_EXPR, TREE_TYPE (depobj), depobj,
580 TYPE_SIZE_UNIT (ptr_type_node));
581 r = build_indirect_ref (loc, depobj, RO_UNARY_STAR);
582 add_stmt (build2 (MODIFY_EXPR, void_type_node, r, t));
583 }
584
585
586 /* Complete a #pragma omp flush construct. We don't do anything with
587 the variable list that the syntax allows. LOC is the location of
588 the #pragma. */
589
590 void
591 c_finish_omp_flush (location_t loc, int mo)
592 {
593 tree x;
594
595 if (mo == MEMMODEL_LAST)
596 {
597 x = builtin_decl_explicit (BUILT_IN_SYNC_SYNCHRONIZE);
598 x = build_call_expr_loc (loc, x, 0);
599 }
600 else
601 {
602 x = builtin_decl_explicit (BUILT_IN_ATOMIC_THREAD_FENCE);
603 x = build_call_expr_loc (loc, x, 1,
604 build_int_cst (integer_type_node, mo));
605 }
606 add_stmt (x);
607 }
608
609
610 /* Check and canonicalize OMP_FOR increment expression.
611 Helper function for c_finish_omp_for. */
612
613 static tree
614 check_omp_for_incr_expr (location_t loc, tree exp, tree decl)
615 {
616 tree t;
617
618 if (!INTEGRAL_TYPE_P (TREE_TYPE (exp))
619 || TYPE_PRECISION (TREE_TYPE (exp)) < TYPE_PRECISION (TREE_TYPE (decl)))
620 return error_mark_node;
621
622 if (exp == decl)
623 return build_int_cst (TREE_TYPE (exp), 0);
624
625 switch (TREE_CODE (exp))
626 {
627 CASE_CONVERT:
628 t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl);
629 if (t != error_mark_node)
630 return fold_convert_loc (loc, TREE_TYPE (exp), t);
631 break;
632 case MINUS_EXPR:
633 t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl);
634 if (t != error_mark_node)
635 return fold_build2_loc (loc, MINUS_EXPR,
636 TREE_TYPE (exp), t, TREE_OPERAND (exp, 1));
637 break;
638 case PLUS_EXPR:
639 t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 0), decl);
640 if (t != error_mark_node)
641 return fold_build2_loc (loc, PLUS_EXPR,
642 TREE_TYPE (exp), t, TREE_OPERAND (exp, 1));
643 t = check_omp_for_incr_expr (loc, TREE_OPERAND (exp, 1), decl);
644 if (t != error_mark_node)
645 return fold_build2_loc (loc, PLUS_EXPR,
646 TREE_TYPE (exp), TREE_OPERAND (exp, 0), t);
647 break;
648 case COMPOUND_EXPR:
649 {
650 /* cp_build_modify_expr forces preevaluation of the RHS to make
651 sure that it is evaluated before the lvalue-rvalue conversion
652 is applied to the LHS. Reconstruct the original expression. */
653 tree op0 = TREE_OPERAND (exp, 0);
654 if (TREE_CODE (op0) == TARGET_EXPR
655 && !VOID_TYPE_P (TREE_TYPE (op0)))
656 {
657 tree op1 = TREE_OPERAND (exp, 1);
658 tree temp = TARGET_EXPR_SLOT (op0);
659 if (BINARY_CLASS_P (op1)
660 && TREE_OPERAND (op1, 1) == temp)
661 {
662 op1 = copy_node (op1);
663 TREE_OPERAND (op1, 1) = TARGET_EXPR_INITIAL (op0);
664 return check_omp_for_incr_expr (loc, op1, decl);
665 }
666 }
667 break;
668 }
669 default:
670 break;
671 }
672
673 return error_mark_node;
674 }
675
676 /* If the OMP_FOR increment expression in INCR is of pointer type,
677 canonicalize it into an expression handled by gimplify_omp_for()
678 and return it. DECL is the iteration variable. */
679
680 static tree
681 c_omp_for_incr_canonicalize_ptr (location_t loc, tree decl, tree incr)
682 {
683 if (POINTER_TYPE_P (TREE_TYPE (decl))
684 && TREE_OPERAND (incr, 1))
685 {
686 tree t = fold_convert_loc (loc,
687 sizetype, TREE_OPERAND (incr, 1));
688
689 if (TREE_CODE (incr) == POSTDECREMENT_EXPR
690 || TREE_CODE (incr) == PREDECREMENT_EXPR)
691 t = fold_build1_loc (loc, NEGATE_EXPR, sizetype, t);
692 t = fold_build_pointer_plus (decl, t);
693 incr = build2 (MODIFY_EXPR, void_type_node, decl, t);
694 }
695 return incr;
696 }
697
698 /* Validate and generate OMP_FOR.
699 DECLV is a vector of iteration variables, for each collapsed loop.
700
701 ORIG_DECLV, if non-NULL, is a vector with the original iteration
702 variables (prior to any transformations, by say, C++ iterators).
703
704 INITV, CONDV and INCRV are vectors containing initialization
705 expressions, controlling predicates and increment expressions.
706 BODY is the body of the loop and PRE_BODY statements that go before
707 the loop. */
708
709 tree
710 c_finish_omp_for (location_t locus, enum tree_code code, tree declv,
711 tree orig_declv, tree initv, tree condv, tree incrv,
712 tree body, tree pre_body, bool final_p)
713 {
714 location_t elocus;
715 bool fail = false;
716 int i;
717
718 gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (initv));
719 gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (condv));
720 gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (incrv));
721 for (i = 0; i < TREE_VEC_LENGTH (declv); i++)
722 {
723 tree decl = TREE_VEC_ELT (declv, i);
724 tree init = TREE_VEC_ELT (initv, i);
725 tree cond = TREE_VEC_ELT (condv, i);
726 tree incr = TREE_VEC_ELT (incrv, i);
727
728 elocus = locus;
729 if (EXPR_HAS_LOCATION (init))
730 elocus = EXPR_LOCATION (init);
731
732 /* Validate the iteration variable. */
733 if (!INTEGRAL_TYPE_P (TREE_TYPE (decl))
734 && TREE_CODE (TREE_TYPE (decl)) != POINTER_TYPE)
735 {
736 error_at (elocus, "invalid type for iteration variable %qE", decl);
737 fail = true;
738 }
739 else if (TYPE_ATOMIC (TREE_TYPE (decl)))
740 {
741 error_at (elocus, "%<_Atomic%> iteration variable %qE", decl);
742 fail = true;
743 /* _Atomic iterator confuses stuff too much, so we risk ICE
744 trying to diagnose it further. */
745 continue;
746 }
747
748 /* In the case of "for (int i = 0...)", init will be a decl. It should
749 have a DECL_INITIAL that we can turn into an assignment. */
750 if (init == decl)
751 {
752 elocus = DECL_SOURCE_LOCATION (decl);
753
754 init = DECL_INITIAL (decl);
755 if (init == NULL)
756 {
757 error_at (elocus, "%qE is not initialized", decl);
758 init = integer_zero_node;
759 fail = true;
760 }
761 DECL_INITIAL (decl) = NULL_TREE;
762
763 init = build_modify_expr (elocus, decl, NULL_TREE, NOP_EXPR,
764 /* FIXME diagnostics: This should
765 be the location of the INIT. */
766 elocus,
767 init,
768 NULL_TREE);
769 }
770 if (init != error_mark_node)
771 {
772 gcc_assert (TREE_CODE (init) == MODIFY_EXPR);
773 gcc_assert (TREE_OPERAND (init, 0) == decl);
774 }
775
776 if (cond == NULL_TREE)
777 {
778 error_at (elocus, "missing controlling predicate");
779 fail = true;
780 }
781 else
782 {
783 bool cond_ok = false;
784
785 /* E.g. C sizeof (vla) could add COMPOUND_EXPRs with
786 evaluation of the vla VAR_DECL. We need to readd
787 them to the non-decl operand. See PR45784. */
788 while (TREE_CODE (cond) == COMPOUND_EXPR)
789 cond = TREE_OPERAND (cond, 1);
790
791 if (EXPR_HAS_LOCATION (cond))
792 elocus = EXPR_LOCATION (cond);
793
794 if (TREE_CODE (cond) == LT_EXPR
795 || TREE_CODE (cond) == LE_EXPR
796 || TREE_CODE (cond) == GT_EXPR
797 || TREE_CODE (cond) == GE_EXPR
798 || TREE_CODE (cond) == NE_EXPR
799 || TREE_CODE (cond) == EQ_EXPR)
800 {
801 tree op0 = TREE_OPERAND (cond, 0);
802 tree op1 = TREE_OPERAND (cond, 1);
803
804 /* 2.5.1. The comparison in the condition is computed in
805 the type of DECL, otherwise the behavior is undefined.
806
807 For example:
808 long n; int i;
809 i < n;
810
811 according to ISO will be evaluated as:
812 (long)i < n;
813
814 We want to force:
815 i < (int)n; */
816 if (TREE_CODE (op0) == NOP_EXPR
817 && decl == TREE_OPERAND (op0, 0))
818 {
819 TREE_OPERAND (cond, 0) = TREE_OPERAND (op0, 0);
820 TREE_OPERAND (cond, 1)
821 = fold_build1_loc (elocus, NOP_EXPR, TREE_TYPE (decl),
822 TREE_OPERAND (cond, 1));
823 }
824 else if (TREE_CODE (op1) == NOP_EXPR
825 && decl == TREE_OPERAND (op1, 0))
826 {
827 TREE_OPERAND (cond, 1) = TREE_OPERAND (op1, 0);
828 TREE_OPERAND (cond, 0)
829 = fold_build1_loc (elocus, NOP_EXPR, TREE_TYPE (decl),
830 TREE_OPERAND (cond, 0));
831 }
832
833 if (decl == TREE_OPERAND (cond, 0))
834 cond_ok = true;
835 else if (decl == TREE_OPERAND (cond, 1))
836 {
837 TREE_SET_CODE (cond,
838 swap_tree_comparison (TREE_CODE (cond)));
839 TREE_OPERAND (cond, 1) = TREE_OPERAND (cond, 0);
840 TREE_OPERAND (cond, 0) = decl;
841 cond_ok = true;
842 }
843
844 if (TREE_CODE (cond) == NE_EXPR
845 || TREE_CODE (cond) == EQ_EXPR)
846 {
847 if (!INTEGRAL_TYPE_P (TREE_TYPE (decl)))
848 {
849 if (code == OACC_LOOP || TREE_CODE (cond) == EQ_EXPR)
850 cond_ok = false;
851 }
852 else if (operand_equal_p (TREE_OPERAND (cond, 1),
853 TYPE_MIN_VALUE (TREE_TYPE (decl)),
854 0))
855 TREE_SET_CODE (cond, TREE_CODE (cond) == NE_EXPR
856 ? GT_EXPR : LE_EXPR);
857 else if (operand_equal_p (TREE_OPERAND (cond, 1),
858 TYPE_MAX_VALUE (TREE_TYPE (decl)),
859 0))
860 TREE_SET_CODE (cond, TREE_CODE (cond) == NE_EXPR
861 ? LT_EXPR : GE_EXPR);
862 else if (code == OACC_LOOP || TREE_CODE (cond) == EQ_EXPR)
863 cond_ok = false;
864 }
865
866 if (cond_ok && TREE_VEC_ELT (condv, i) != cond)
867 {
868 tree ce = NULL_TREE, *pce = &ce;
869 tree type = TREE_TYPE (TREE_OPERAND (cond, 1));
870 for (tree c = TREE_VEC_ELT (condv, i); c != cond;
871 c = TREE_OPERAND (c, 1))
872 {
873 *pce = build2 (COMPOUND_EXPR, type, TREE_OPERAND (c, 0),
874 TREE_OPERAND (cond, 1));
875 pce = &TREE_OPERAND (*pce, 1);
876 }
877 TREE_OPERAND (cond, 1) = ce;
878 TREE_VEC_ELT (condv, i) = cond;
879 }
880 }
881
882 if (!cond_ok)
883 {
884 error_at (elocus, "invalid controlling predicate");
885 fail = true;
886 }
887 }
888
889 if (incr == NULL_TREE)
890 {
891 error_at (elocus, "missing increment expression");
892 fail = true;
893 }
894 else
895 {
896 bool incr_ok = false;
897
898 if (EXPR_HAS_LOCATION (incr))
899 elocus = EXPR_LOCATION (incr);
900
901 /* Check all the valid increment expressions: v++, v--, ++v, --v,
902 v = v + incr, v = incr + v and v = v - incr. */
903 switch (TREE_CODE (incr))
904 {
905 case POSTINCREMENT_EXPR:
906 case PREINCREMENT_EXPR:
907 case POSTDECREMENT_EXPR:
908 case PREDECREMENT_EXPR:
909 if (TREE_OPERAND (incr, 0) != decl)
910 break;
911
912 incr_ok = true;
913 if (!fail
914 && TREE_CODE (cond) == NE_EXPR
915 && TREE_CODE (TREE_TYPE (decl)) == POINTER_TYPE
916 && TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (decl)))
917 && (TREE_CODE (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (decl))))
918 != INTEGER_CST))
919 {
920 /* For pointer to VLA, transform != into < or >
921 depending on whether incr is increment or decrement. */
922 if (TREE_CODE (incr) == PREINCREMENT_EXPR
923 || TREE_CODE (incr) == POSTINCREMENT_EXPR)
924 TREE_SET_CODE (cond, LT_EXPR);
925 else
926 TREE_SET_CODE (cond, GT_EXPR);
927 }
928 incr = c_omp_for_incr_canonicalize_ptr (elocus, decl, incr);
929 break;
930
931 case COMPOUND_EXPR:
932 if (TREE_CODE (TREE_OPERAND (incr, 0)) != SAVE_EXPR
933 || TREE_CODE (TREE_OPERAND (incr, 1)) != MODIFY_EXPR)
934 break;
935 incr = TREE_OPERAND (incr, 1);
936 /* FALLTHRU */
937 case MODIFY_EXPR:
938 if (TREE_OPERAND (incr, 0) != decl)
939 break;
940 if (TREE_OPERAND (incr, 1) == decl)
941 break;
942 if (TREE_CODE (TREE_OPERAND (incr, 1)) == PLUS_EXPR
943 && (TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl
944 || TREE_OPERAND (TREE_OPERAND (incr, 1), 1) == decl))
945 incr_ok = true;
946 else if ((TREE_CODE (TREE_OPERAND (incr, 1)) == MINUS_EXPR
947 || (TREE_CODE (TREE_OPERAND (incr, 1))
948 == POINTER_PLUS_EXPR))
949 && TREE_OPERAND (TREE_OPERAND (incr, 1), 0) == decl)
950 incr_ok = true;
951 else
952 {
953 tree t = check_omp_for_incr_expr (elocus,
954 TREE_OPERAND (incr, 1),
955 decl);
956 if (t != error_mark_node)
957 {
958 incr_ok = true;
959 t = build2 (PLUS_EXPR, TREE_TYPE (decl), decl, t);
960 incr = build2 (MODIFY_EXPR, void_type_node, decl, t);
961 }
962 }
963 if (!fail
964 && incr_ok
965 && TREE_CODE (cond) == NE_EXPR)
966 {
967 tree i = TREE_OPERAND (incr, 1);
968 i = TREE_OPERAND (i, TREE_OPERAND (i, 0) == decl);
969 i = c_fully_fold (i, false, NULL);
970 if (!final_p
971 && TREE_CODE (i) != INTEGER_CST)
972 ;
973 else if (TREE_CODE (TREE_TYPE (decl)) == POINTER_TYPE)
974 {
975 tree unit
976 = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (decl)));
977 if (unit)
978 {
979 enum tree_code ccode = GT_EXPR;
980 unit = c_fully_fold (unit, false, NULL);
981 i = fold_convert (TREE_TYPE (unit), i);
982 if (operand_equal_p (unit, i, 0))
983 ccode = LT_EXPR;
984 if (ccode == GT_EXPR)
985 {
986 i = fold_unary (NEGATE_EXPR, TREE_TYPE (i), i);
987 if (i == NULL_TREE
988 || !operand_equal_p (unit, i, 0))
989 {
990 error_at (elocus,
991 "increment is not constant 1 or "
992 "-1 for %<!=%> condition");
993 fail = true;
994 }
995 }
996 if (TREE_CODE (unit) != INTEGER_CST)
997 /* For pointer to VLA, transform != into < or >
998 depending on whether the pointer is
999 incremented or decremented in each
1000 iteration. */
1001 TREE_SET_CODE (cond, ccode);
1002 }
1003 }
1004 else
1005 {
1006 if (!integer_onep (i) && !integer_minus_onep (i))
1007 {
1008 error_at (elocus,
1009 "increment is not constant 1 or -1 for"
1010 " %<!=%> condition");
1011 fail = true;
1012 }
1013 }
1014 }
1015 break;
1016
1017 default:
1018 break;
1019 }
1020 if (!incr_ok)
1021 {
1022 error_at (elocus, "invalid increment expression");
1023 fail = true;
1024 }
1025 }
1026
1027 TREE_VEC_ELT (initv, i) = init;
1028 TREE_VEC_ELT (incrv, i) = incr;
1029 }
1030
1031 if (fail)
1032 return NULL;
1033 else
1034 {
1035 tree t = make_node (code);
1036
1037 TREE_TYPE (t) = void_type_node;
1038 OMP_FOR_INIT (t) = initv;
1039 OMP_FOR_COND (t) = condv;
1040 OMP_FOR_INCR (t) = incrv;
1041 OMP_FOR_BODY (t) = body;
1042 OMP_FOR_PRE_BODY (t) = pre_body;
1043 OMP_FOR_ORIG_DECLS (t) = orig_declv;
1044
1045 SET_EXPR_LOCATION (t, locus);
1046 return t;
1047 }
1048 }
1049
1050 /* Type for passing data in between c_omp_check_loop_iv and
1051 c_omp_check_loop_iv_r. */
1052
1053 struct c_omp_check_loop_iv_data
1054 {
1055 tree declv;
1056 bool fail;
1057 bool maybe_nonrect;
1058 location_t stmt_loc;
1059 location_t expr_loc;
1060 int kind;
1061 int idx;
1062 walk_tree_lh lh;
1063 hash_set<tree> *ppset;
1064 };
1065
1066 /* Return -1 if DECL is not a loop iterator in loop nest D, otherwise
1067 return the index of the loop in which it is an iterator.
1068 Return TREE_VEC_LENGTH (d->declv) if it is a C++ range for iterator. */
1069
1070 static int
1071 c_omp_is_loop_iterator (tree decl, struct c_omp_check_loop_iv_data *d)
1072 {
1073 for (int i = 0; i < TREE_VEC_LENGTH (d->declv); i++)
1074 if (decl == TREE_VEC_ELT (d->declv, i)
1075 || (TREE_CODE (TREE_VEC_ELT (d->declv, i)) == TREE_LIST
1076 && decl == TREE_PURPOSE (TREE_VEC_ELT (d->declv, i))))
1077 return i;
1078 else if (TREE_CODE (TREE_VEC_ELT (d->declv, i)) == TREE_LIST
1079 && TREE_CHAIN (TREE_VEC_ELT (d->declv, i))
1080 && (TREE_CODE (TREE_CHAIN (TREE_VEC_ELT (d->declv, i)))
1081 == TREE_VEC)
1082 && decl == TREE_VEC_ELT (TREE_CHAIN (TREE_VEC_ELT (d->declv,
1083 i)), 2))
1084 return TREE_VEC_LENGTH (d->declv);
1085 return -1;
1086 }
1087
1088 /* Helper function called via walk_tree, to diagnose uses
1089 of associated loop IVs inside of lb, b and incr expressions
1090 of OpenMP loops. */
1091
1092 static tree
1093 c_omp_check_loop_iv_r (tree *tp, int *walk_subtrees, void *data)
1094 {
1095 struct c_omp_check_loop_iv_data *d
1096 = (struct c_omp_check_loop_iv_data *) data;
1097 if (DECL_P (*tp))
1098 {
1099 int idx = c_omp_is_loop_iterator (*tp, d);
1100 if (idx == -1)
1101 return NULL_TREE;
1102
1103 if ((d->kind & 4) && idx < d->idx)
1104 {
1105 d->maybe_nonrect = true;
1106 return NULL_TREE;
1107 }
1108
1109 if (d->ppset->add (*tp))
1110 return NULL_TREE;
1111
1112 location_t loc = d->expr_loc;
1113 if (loc == UNKNOWN_LOCATION)
1114 loc = d->stmt_loc;
1115
1116 switch (d->kind & 3)
1117 {
1118 case 0:
1119 error_at (loc, "initializer expression refers to "
1120 "iteration variable %qD", *tp);
1121 break;
1122 case 1:
1123 error_at (loc, "condition expression refers to "
1124 "iteration variable %qD", *tp);
1125 break;
1126 case 2:
1127 error_at (loc, "increment expression refers to "
1128 "iteration variable %qD", *tp);
1129 break;
1130 }
1131 d->fail = true;
1132 }
1133 else if (d->ppset->add (*tp))
1134 *walk_subtrees = 0;
1135 /* Don't walk dtors added by C++ wrap_cleanups_r. */
1136 else if (TREE_CODE (*tp) == TRY_CATCH_EXPR
1137 && TRY_CATCH_IS_CLEANUP (*tp))
1138 {
1139 *walk_subtrees = 0;
1140 return walk_tree_1 (&TREE_OPERAND (*tp, 0), c_omp_check_loop_iv_r, data,
1141 NULL, d->lh);
1142 }
1143
1144 return NULL_TREE;
1145 }
1146
1147 /* Check the allowed expressions for non-rectangular loop nest lb and b
1148 expressions. Return the outer var decl referenced in the expression. */
1149
1150 static tree
1151 c_omp_check_nonrect_loop_iv (tree *tp, struct c_omp_check_loop_iv_data *d,
1152 walk_tree_lh lh)
1153 {
1154 d->maybe_nonrect = false;
1155 if (d->fail)
1156 return NULL_TREE;
1157
1158 hash_set<tree> pset;
1159 hash_set<tree> *ppset = d->ppset;
1160 d->ppset = &pset;
1161
1162 tree t = *tp;
1163 if (TREE_CODE (t) == TREE_VEC
1164 && TREE_VEC_LENGTH (t) == 3
1165 && DECL_P (TREE_VEC_ELT (t, 0))
1166 && c_omp_is_loop_iterator (TREE_VEC_ELT (t, 0), d) >= 0)
1167 {
1168 d->kind &= 3;
1169 walk_tree_1 (&TREE_VEC_ELT (t, 1), c_omp_check_loop_iv_r, d, NULL, lh);
1170 walk_tree_1 (&TREE_VEC_ELT (t, 1), c_omp_check_loop_iv_r, d, NULL, lh);
1171 d->ppset = ppset;
1172 return d->fail ? NULL_TREE : TREE_VEC_ELT (t, 0);
1173 }
1174
1175 while (CONVERT_EXPR_P (t))
1176 t = TREE_OPERAND (t, 0);
1177
1178 tree a1 = t, a2 = integer_zero_node;
1179 bool neg_a1 = false, neg_a2 = false;
1180 switch (TREE_CODE (t))
1181 {
1182 case PLUS_EXPR:
1183 case MINUS_EXPR:
1184 a1 = TREE_OPERAND (t, 0);
1185 a2 = TREE_OPERAND (t, 1);
1186 while (CONVERT_EXPR_P (a1))
1187 a1 = TREE_OPERAND (a1, 0);
1188 while (CONVERT_EXPR_P (a2))
1189 a2 = TREE_OPERAND (a2, 0);
1190 if (DECL_P (a1) && c_omp_is_loop_iterator (a1, d) >= 0)
1191 {
1192 a2 = TREE_OPERAND (t, 1);
1193 if (TREE_CODE (t) == MINUS_EXPR)
1194 neg_a2 = true;
1195 t = a1;
1196 break;
1197 }
1198 if (DECL_P (a2) && c_omp_is_loop_iterator (a2, d) >= 0)
1199 {
1200 a1 = TREE_OPERAND (t, 0);
1201 if (TREE_CODE (t) == MINUS_EXPR)
1202 neg_a1 = true;
1203 t = a2;
1204 a2 = a1;
1205 break;
1206 }
1207 if (TREE_CODE (a1) == MULT_EXPR && TREE_CODE (a2) == MULT_EXPR)
1208 {
1209 tree o1 = TREE_OPERAND (a1, 0);
1210 tree o2 = TREE_OPERAND (a1, 1);
1211 while (CONVERT_EXPR_P (o1))
1212 o1 = TREE_OPERAND (o1, 0);
1213 while (CONVERT_EXPR_P (o2))
1214 o2 = TREE_OPERAND (o2, 0);
1215 if ((DECL_P (o1) && c_omp_is_loop_iterator (o1, d) >= 0)
1216 || (DECL_P (o2) && c_omp_is_loop_iterator (o2, d) >= 0))
1217 {
1218 a2 = TREE_OPERAND (t, 1);
1219 if (TREE_CODE (t) == MINUS_EXPR)
1220 neg_a2 = true;
1221 t = a1;
1222 break;
1223 }
1224 }
1225 if (TREE_CODE (a2) == MULT_EXPR)
1226 {
1227 a1 = TREE_OPERAND (t, 0);
1228 if (TREE_CODE (t) == MINUS_EXPR)
1229 neg_a1 = true;
1230 t = a2;
1231 a2 = a1;
1232 break;
1233 }
1234 if (TREE_CODE (a1) == MULT_EXPR)
1235 {
1236 a2 = TREE_OPERAND (t, 1);
1237 if (TREE_CODE (t) == MINUS_EXPR)
1238 neg_a2 = true;
1239 t = a1;
1240 break;
1241 }
1242 a2 = integer_zero_node;
1243 break;
1244 default:
1245 break;
1246 }
1247
1248 a1 = integer_one_node;
1249 if (TREE_CODE (t) == MULT_EXPR)
1250 {
1251 tree o1 = TREE_OPERAND (t, 0);
1252 tree o2 = TREE_OPERAND (t, 1);
1253 while (CONVERT_EXPR_P (o1))
1254 o1 = TREE_OPERAND (o1, 0);
1255 while (CONVERT_EXPR_P (o2))
1256 o2 = TREE_OPERAND (o2, 0);
1257 if (DECL_P (o1) && c_omp_is_loop_iterator (o1, d) >= 0)
1258 {
1259 a1 = TREE_OPERAND (t, 1);
1260 t = o1;
1261 }
1262 else if (DECL_P (o2) && c_omp_is_loop_iterator (o2, d) >= 0)
1263 {
1264 a1 = TREE_OPERAND (t, 0);
1265 t = o2;
1266 }
1267 }
1268
1269 d->kind &= 3;
1270 tree ret = NULL_TREE;
1271 if (DECL_P (t) && c_omp_is_loop_iterator (t, d) >= 0)
1272 {
1273 location_t loc = d->expr_loc;
1274 if (loc == UNKNOWN_LOCATION)
1275 loc = d->stmt_loc;
1276 if (!lang_hooks.types_compatible_p (TREE_TYPE (*tp), TREE_TYPE (t)))
1277 {
1278 if (d->kind == 0)
1279 error_at (loc, "outer iteration variable %qD used in initializer"
1280 " expression has type other than %qT",
1281 t, TREE_TYPE (*tp));
1282 else
1283 error_at (loc, "outer iteration variable %qD used in condition"
1284 " expression has type other than %qT",
1285 t, TREE_TYPE (*tp));
1286 d->fail = true;
1287 }
1288 else if (!INTEGRAL_TYPE_P (TREE_TYPE (a1)))
1289 {
1290 error_at (loc, "outer iteration variable %qD multiplier expression"
1291 " %qE is not integral", t, a1);
1292 d->fail = true;
1293 }
1294 else if (!INTEGRAL_TYPE_P (TREE_TYPE (a2)))
1295 {
1296 error_at (loc, "outer iteration variable %qD addend expression"
1297 " %qE is not integral", t, a2);
1298 d->fail = true;
1299 }
1300 else
1301 {
1302 walk_tree_1 (&a1, c_omp_check_loop_iv_r, d, NULL, lh);
1303 walk_tree_1 (&a2, c_omp_check_loop_iv_r, d, NULL, lh);
1304 }
1305 if (!d->fail)
1306 {
1307 a1 = fold_convert (TREE_TYPE (*tp), a1);
1308 a2 = fold_convert (TREE_TYPE (*tp), a2);
1309 if (neg_a1)
1310 a1 = fold_build1 (NEGATE_EXPR, TREE_TYPE (a1), a1);
1311 if (neg_a2)
1312 a2 = fold_build1 (NEGATE_EXPR, TREE_TYPE (a2), a2);
1313 ret = t;
1314 *tp = make_tree_vec (3);
1315 TREE_VEC_ELT (*tp, 0) = t;
1316 TREE_VEC_ELT (*tp, 1) = a1;
1317 TREE_VEC_ELT (*tp, 2) = a2;
1318 }
1319 }
1320 else
1321 walk_tree_1 (&t, c_omp_check_loop_iv_r, d, NULL, lh);
1322
1323 d->ppset = ppset;
1324 return ret;
1325 }
1326
1327 /* Diagnose invalid references to loop iterators in lb, b and incr
1328 expressions. */
1329
1330 bool
1331 c_omp_check_loop_iv (tree stmt, tree declv, walk_tree_lh lh)
1332 {
1333 hash_set<tree> pset;
1334 struct c_omp_check_loop_iv_data data;
1335 int i;
1336
1337 data.declv = declv;
1338 data.fail = false;
1339 data.maybe_nonrect = false;
1340 data.stmt_loc = EXPR_LOCATION (stmt);
1341 data.lh = lh;
1342 data.ppset = &pset;
1343 for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (stmt)); i++)
1344 {
1345 tree init = TREE_VEC_ELT (OMP_FOR_INIT (stmt), i);
1346 gcc_assert (TREE_CODE (init) == MODIFY_EXPR);
1347 tree decl = TREE_OPERAND (init, 0);
1348 tree cond = TREE_VEC_ELT (OMP_FOR_COND (stmt), i);
1349 gcc_assert (COMPARISON_CLASS_P (cond));
1350 gcc_assert (TREE_OPERAND (cond, 0) == decl);
1351 tree incr = TREE_VEC_ELT (OMP_FOR_INCR (stmt), i);
1352 data.expr_loc = EXPR_LOCATION (TREE_OPERAND (init, 1));
1353 tree vec_outer1 = NULL_TREE, vec_outer2 = NULL_TREE;
1354 int kind = 0;
1355 if (i > 0
1356 && (unsigned) c_omp_is_loop_iterator (decl, &data) < (unsigned) i)
1357 {
1358 location_t loc = data.expr_loc;
1359 if (loc == UNKNOWN_LOCATION)
1360 loc = data.stmt_loc;
1361 error_at (loc, "the same loop iteration variables %qD used in "
1362 "multiple associated loops", decl);
1363 data.fail = true;
1364 }
1365 /* Handle non-rectangular loop nests. */
1366 if (TREE_CODE (stmt) != OACC_LOOP
1367 && (TREE_CODE (TREE_OPERAND (init, 1)) == TREE_VEC
1368 || INTEGRAL_TYPE_P (TREE_TYPE (TREE_OPERAND (init, 1))))
1369 && i > 0)
1370 kind = 4;
1371 data.kind = kind;
1372 data.idx = i;
1373 walk_tree_1 (&TREE_OPERAND (init, 1),
1374 c_omp_check_loop_iv_r, &data, NULL, lh);
1375 if (data.maybe_nonrect)
1376 vec_outer1 = c_omp_check_nonrect_loop_iv (&TREE_OPERAND (init, 1),
1377 &data, lh);
1378 /* Don't warn for C++ random access iterators here, the
1379 expression then involves the subtraction and always refers
1380 to the original value. The C++ FE needs to warn on those
1381 earlier. */
1382 if (decl == TREE_VEC_ELT (declv, i)
1383 || (TREE_CODE (TREE_VEC_ELT (declv, i)) == TREE_LIST
1384 && decl == TREE_PURPOSE (TREE_VEC_ELT (declv, i))))
1385 {
1386 data.expr_loc = EXPR_LOCATION (cond);
1387 data.kind = kind | 1;
1388 walk_tree_1 (&TREE_OPERAND (cond, 1),
1389 c_omp_check_loop_iv_r, &data, NULL, lh);
1390 if (data.maybe_nonrect)
1391 vec_outer2 = c_omp_check_nonrect_loop_iv (&TREE_OPERAND (cond, 1),
1392 &data, lh);
1393 }
1394 if (vec_outer1 && vec_outer2 && vec_outer1 != vec_outer2)
1395 {
1396 location_t loc = data.expr_loc;
1397 if (loc == UNKNOWN_LOCATION)
1398 loc = data.stmt_loc;
1399 error_at (loc, "two different outer iteration variables %qD and %qD"
1400 " used in a single loop", vec_outer1, vec_outer2);
1401 data.fail = true;
1402 }
1403 if (vec_outer1 || vec_outer2)
1404 OMP_FOR_NON_RECTANGULAR (stmt) = 1;
1405 if (TREE_CODE (incr) == MODIFY_EXPR)
1406 {
1407 gcc_assert (TREE_OPERAND (incr, 0) == decl);
1408 incr = TREE_OPERAND (incr, 1);
1409 data.kind = 2;
1410 if (TREE_CODE (incr) == PLUS_EXPR
1411 && TREE_OPERAND (incr, 1) == decl)
1412 {
1413 data.expr_loc = EXPR_LOCATION (TREE_OPERAND (incr, 0));
1414 walk_tree_1 (&TREE_OPERAND (incr, 0),
1415 c_omp_check_loop_iv_r, &data, NULL, lh);
1416 }
1417 else
1418 {
1419 data.expr_loc = EXPR_LOCATION (TREE_OPERAND (incr, 1));
1420 walk_tree_1 (&TREE_OPERAND (incr, 1),
1421 c_omp_check_loop_iv_r, &data, NULL, lh);
1422 }
1423 }
1424 }
1425 return !data.fail;
1426 }
1427
1428 /* Similar, but allows to check the init or cond expressions individually. */
1429
1430 bool
1431 c_omp_check_loop_iv_exprs (location_t stmt_loc, tree declv, int i, tree decl,
1432 tree init, tree cond, walk_tree_lh lh)
1433 {
1434 hash_set<tree> pset;
1435 struct c_omp_check_loop_iv_data data;
1436
1437 data.declv = declv;
1438 data.fail = false;
1439 data.maybe_nonrect = false;
1440 data.stmt_loc = stmt_loc;
1441 data.lh = lh;
1442 data.ppset = &pset;
1443 data.idx = i;
1444 if (i > 0
1445 && (unsigned) c_omp_is_loop_iterator (decl, &data) < (unsigned) i)
1446 {
1447 error_at (stmt_loc, "the same loop iteration variables %qD used in "
1448 "multiple associated loops", decl);
1449 data.fail = true;
1450 }
1451 if (init)
1452 {
1453 data.expr_loc = EXPR_LOCATION (init);
1454 data.kind = 0;
1455 walk_tree_1 (&init,
1456 c_omp_check_loop_iv_r, &data, NULL, lh);
1457 }
1458 if (cond)
1459 {
1460 gcc_assert (COMPARISON_CLASS_P (cond));
1461 data.expr_loc = EXPR_LOCATION (init);
1462 data.kind = 1;
1463 if (TREE_OPERAND (cond, 0) == decl)
1464 walk_tree_1 (&TREE_OPERAND (cond, 1),
1465 c_omp_check_loop_iv_r, &data, NULL, lh);
1466 else
1467 walk_tree_1 (&TREE_OPERAND (cond, 0),
1468 c_omp_check_loop_iv_r, &data, NULL, lh);
1469 }
1470 return !data.fail;
1471 }
1472
1473 /* This function splits clauses for OpenACC combined loop
1474 constructs. OpenACC combined loop constructs are:
1475 #pragma acc kernels loop
1476 #pragma acc parallel loop */
1477
1478 tree
1479 c_oacc_split_loop_clauses (tree clauses, tree *not_loop_clauses,
1480 bool is_parallel)
1481 {
1482 tree next, loop_clauses, nc;
1483
1484 loop_clauses = *not_loop_clauses = NULL_TREE;
1485 for (; clauses ; clauses = next)
1486 {
1487 next = OMP_CLAUSE_CHAIN (clauses);
1488
1489 switch (OMP_CLAUSE_CODE (clauses))
1490 {
1491 /* Loop clauses. */
1492 case OMP_CLAUSE_COLLAPSE:
1493 case OMP_CLAUSE_TILE:
1494 case OMP_CLAUSE_GANG:
1495 case OMP_CLAUSE_WORKER:
1496 case OMP_CLAUSE_VECTOR:
1497 case OMP_CLAUSE_AUTO:
1498 case OMP_CLAUSE_SEQ:
1499 case OMP_CLAUSE_INDEPENDENT:
1500 case OMP_CLAUSE_PRIVATE:
1501 OMP_CLAUSE_CHAIN (clauses) = loop_clauses;
1502 loop_clauses = clauses;
1503 break;
1504
1505 /* Reductions must be duplicated on both constructs. */
1506 case OMP_CLAUSE_REDUCTION:
1507 if (is_parallel)
1508 {
1509 nc = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1510 OMP_CLAUSE_REDUCTION);
1511 OMP_CLAUSE_DECL (nc) = OMP_CLAUSE_DECL (clauses);
1512 OMP_CLAUSE_REDUCTION_CODE (nc)
1513 = OMP_CLAUSE_REDUCTION_CODE (clauses);
1514 OMP_CLAUSE_CHAIN (nc) = *not_loop_clauses;
1515 *not_loop_clauses = nc;
1516 }
1517
1518 OMP_CLAUSE_CHAIN (clauses) = loop_clauses;
1519 loop_clauses = clauses;
1520 break;
1521
1522 /* Parallel/kernels clauses. */
1523 default:
1524 OMP_CLAUSE_CHAIN (clauses) = *not_loop_clauses;
1525 *not_loop_clauses = clauses;
1526 break;
1527 }
1528 }
1529
1530 return loop_clauses;
1531 }
1532
1533 /* This function attempts to split or duplicate clauses for OpenMP
1534 combined/composite constructs. Right now there are 30 different
1535 constructs. CODE is the innermost construct in the combined construct,
1536 and MASK allows to determine which constructs are combined together,
1537 as every construct has at least one clause that no other construct
1538 has (except for OMP_SECTIONS, but that can be only combined with parallel,
1539 and OMP_MASTER, which doesn't have any clauses at all).
1540 OpenMP combined/composite constructs are:
1541 #pragma omp distribute parallel for
1542 #pragma omp distribute parallel for simd
1543 #pragma omp distribute simd
1544 #pragma omp for simd
1545 #pragma omp master taskloop
1546 #pragma omp master taskloop simd
1547 #pragma omp parallel for
1548 #pragma omp parallel for simd
1549 #pragma omp parallel loop
1550 #pragma omp parallel master
1551 #pragma omp parallel master taskloop
1552 #pragma omp parallel master taskloop simd
1553 #pragma omp parallel sections
1554 #pragma omp target parallel
1555 #pragma omp target parallel for
1556 #pragma omp target parallel for simd
1557 #pragma omp target parallel loop
1558 #pragma omp target teams
1559 #pragma omp target teams distribute
1560 #pragma omp target teams distribute parallel for
1561 #pragma omp target teams distribute parallel for simd
1562 #pragma omp target teams distribute simd
1563 #pragma omp target teams loop
1564 #pragma omp target simd
1565 #pragma omp taskloop simd
1566 #pragma omp teams distribute
1567 #pragma omp teams distribute parallel for
1568 #pragma omp teams distribute parallel for simd
1569 #pragma omp teams distribute simd
1570 #pragma omp teams loop */
1571
1572 void
1573 c_omp_split_clauses (location_t loc, enum tree_code code,
1574 omp_clause_mask mask, tree clauses, tree *cclauses)
1575 {
1576 tree next, c;
1577 enum c_omp_clause_split s;
1578 int i;
1579 bool has_dup_allocate = false;
1580
1581 for (i = 0; i < C_OMP_CLAUSE_SPLIT_COUNT; i++)
1582 cclauses[i] = NULL;
1583 /* Add implicit nowait clause on
1584 #pragma omp parallel {for,for simd,sections}. */
1585 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0)
1586 switch (code)
1587 {
1588 case OMP_FOR:
1589 case OMP_SIMD:
1590 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
1591 cclauses[C_OMP_CLAUSE_SPLIT_FOR]
1592 = build_omp_clause (loc, OMP_CLAUSE_NOWAIT);
1593 break;
1594 case OMP_SECTIONS:
1595 cclauses[C_OMP_CLAUSE_SPLIT_SECTIONS]
1596 = build_omp_clause (loc, OMP_CLAUSE_NOWAIT);
1597 break;
1598 default:
1599 break;
1600 }
1601
1602 for (; clauses ; clauses = next)
1603 {
1604 next = OMP_CLAUSE_CHAIN (clauses);
1605
1606 switch (OMP_CLAUSE_CODE (clauses))
1607 {
1608 /* First the clauses that are unique to some constructs. */
1609 case OMP_CLAUSE_DEVICE:
1610 case OMP_CLAUSE_MAP:
1611 case OMP_CLAUSE_IS_DEVICE_PTR:
1612 case OMP_CLAUSE_DEFAULTMAP:
1613 case OMP_CLAUSE_DEPEND:
1614 s = C_OMP_CLAUSE_SPLIT_TARGET;
1615 break;
1616 case OMP_CLAUSE_NUM_TEAMS:
1617 case OMP_CLAUSE_THREAD_LIMIT:
1618 s = C_OMP_CLAUSE_SPLIT_TEAMS;
1619 break;
1620 case OMP_CLAUSE_DIST_SCHEDULE:
1621 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
1622 break;
1623 case OMP_CLAUSE_COPYIN:
1624 case OMP_CLAUSE_NUM_THREADS:
1625 case OMP_CLAUSE_PROC_BIND:
1626 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
1627 break;
1628 case OMP_CLAUSE_ORDERED:
1629 s = C_OMP_CLAUSE_SPLIT_FOR;
1630 break;
1631 case OMP_CLAUSE_SCHEDULE:
1632 s = C_OMP_CLAUSE_SPLIT_FOR;
1633 if (code != OMP_SIMD)
1634 OMP_CLAUSE_SCHEDULE_SIMD (clauses) = 0;
1635 break;
1636 case OMP_CLAUSE_SAFELEN:
1637 case OMP_CLAUSE_SIMDLEN:
1638 case OMP_CLAUSE_ALIGNED:
1639 case OMP_CLAUSE_NONTEMPORAL:
1640 s = C_OMP_CLAUSE_SPLIT_SIMD;
1641 break;
1642 case OMP_CLAUSE_GRAINSIZE:
1643 case OMP_CLAUSE_NUM_TASKS:
1644 case OMP_CLAUSE_FINAL:
1645 case OMP_CLAUSE_UNTIED:
1646 case OMP_CLAUSE_MERGEABLE:
1647 case OMP_CLAUSE_NOGROUP:
1648 case OMP_CLAUSE_PRIORITY:
1649 s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
1650 break;
1651 case OMP_CLAUSE_BIND:
1652 s = C_OMP_CLAUSE_SPLIT_LOOP;
1653 break;
1654 /* Duplicate this to all of taskloop, distribute, for, simd and
1655 loop. */
1656 case OMP_CLAUSE_COLLAPSE:
1657 if (code == OMP_SIMD)
1658 {
1659 if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)
1660 | (OMP_CLAUSE_MASK_1
1661 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)
1662 | (OMP_CLAUSE_MASK_1
1663 << PRAGMA_OMP_CLAUSE_NOGROUP))) != 0)
1664 {
1665 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1666 OMP_CLAUSE_COLLAPSE);
1667 OMP_CLAUSE_COLLAPSE_EXPR (c)
1668 = OMP_CLAUSE_COLLAPSE_EXPR (clauses);
1669 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD];
1670 cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c;
1671 }
1672 else
1673 {
1674 /* This must be #pragma omp target simd */
1675 s = C_OMP_CLAUSE_SPLIT_SIMD;
1676 break;
1677 }
1678 }
1679 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
1680 {
1681 if ((mask & (OMP_CLAUSE_MASK_1
1682 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0)
1683 {
1684 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1685 OMP_CLAUSE_COLLAPSE);
1686 OMP_CLAUSE_COLLAPSE_EXPR (c)
1687 = OMP_CLAUSE_COLLAPSE_EXPR (clauses);
1688 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_FOR];
1689 cclauses[C_OMP_CLAUSE_SPLIT_FOR] = c;
1690 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
1691 }
1692 else
1693 s = C_OMP_CLAUSE_SPLIT_FOR;
1694 }
1695 else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))
1696 != 0)
1697 s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
1698 else if (code == OMP_LOOP)
1699 s = C_OMP_CLAUSE_SPLIT_LOOP;
1700 else
1701 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
1702 break;
1703 /* Private clause is supported on all constructs but master,
1704 it is enough to put it on the innermost one other than master. For
1705 #pragma omp {for,sections} put it on parallel though,
1706 as that's what we did for OpenMP 3.1. */
1707 case OMP_CLAUSE_PRIVATE:
1708 switch (code)
1709 {
1710 case OMP_SIMD: s = C_OMP_CLAUSE_SPLIT_SIMD; break;
1711 case OMP_FOR: case OMP_SECTIONS:
1712 case OMP_PARALLEL: s = C_OMP_CLAUSE_SPLIT_PARALLEL; break;
1713 case OMP_DISTRIBUTE: s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE; break;
1714 case OMP_TEAMS: s = C_OMP_CLAUSE_SPLIT_TEAMS; break;
1715 case OMP_MASTER: s = C_OMP_CLAUSE_SPLIT_PARALLEL; break;
1716 case OMP_TASKLOOP: s = C_OMP_CLAUSE_SPLIT_TASKLOOP; break;
1717 case OMP_LOOP: s = C_OMP_CLAUSE_SPLIT_LOOP; break;
1718 default: gcc_unreachable ();
1719 }
1720 break;
1721 /* Firstprivate clause is supported on all constructs but
1722 simd, master and loop. Put it on the outermost of those and
1723 duplicate on teams and parallel. */
1724 case OMP_CLAUSE_FIRSTPRIVATE:
1725 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP))
1726 != 0)
1727 {
1728 if (code == OMP_SIMD
1729 && (mask & ((OMP_CLAUSE_MASK_1
1730 << PRAGMA_OMP_CLAUSE_NUM_THREADS)
1731 | (OMP_CLAUSE_MASK_1
1732 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))) == 0)
1733 {
1734 /* This must be #pragma omp target simd. */
1735 s = C_OMP_CLAUSE_SPLIT_TARGET;
1736 break;
1737 }
1738 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1739 OMP_CLAUSE_FIRSTPRIVATE);
1740 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
1741 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TARGET];
1742 cclauses[C_OMP_CLAUSE_SPLIT_TARGET] = c;
1743 }
1744 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
1745 != 0)
1746 {
1747 if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)
1748 | (OMP_CLAUSE_MASK_1
1749 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE))) != 0)
1750 {
1751 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1752 OMP_CLAUSE_FIRSTPRIVATE);
1753 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
1754 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL];
1755 cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] = c;
1756 if ((mask & (OMP_CLAUSE_MASK_1
1757 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) != 0)
1758 s = C_OMP_CLAUSE_SPLIT_TEAMS;
1759 else
1760 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
1761 }
1762 else if ((mask & (OMP_CLAUSE_MASK_1
1763 << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0)
1764 /* This must be
1765 #pragma omp parallel master taskloop{, simd}. */
1766 s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
1767 else
1768 /* This must be
1769 #pragma omp parallel{, for{, simd}, sections,loop}
1770 or
1771 #pragma omp target parallel. */
1772 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
1773 }
1774 else if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))
1775 != 0)
1776 {
1777 /* This must be one of
1778 #pragma omp {,target }teams {distribute,loop}
1779 #pragma omp target teams
1780 #pragma omp {,target }teams distribute simd. */
1781 gcc_assert (code == OMP_DISTRIBUTE
1782 || code == OMP_LOOP
1783 || code == OMP_TEAMS
1784 || code == OMP_SIMD);
1785 s = C_OMP_CLAUSE_SPLIT_TEAMS;
1786 }
1787 else if ((mask & (OMP_CLAUSE_MASK_1
1788 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0)
1789 {
1790 /* This must be #pragma omp distribute simd. */
1791 gcc_assert (code == OMP_SIMD);
1792 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
1793 }
1794 else if ((mask & (OMP_CLAUSE_MASK_1
1795 << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0)
1796 {
1797 /* This must be #pragma omp {,{,parallel }master }taskloop simd
1798 or
1799 #pragma omp {,parallel }master taskloop. */
1800 gcc_assert (code == OMP_SIMD || code == OMP_TASKLOOP);
1801 s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
1802 }
1803 else
1804 {
1805 /* This must be #pragma omp for simd. */
1806 gcc_assert (code == OMP_SIMD);
1807 s = C_OMP_CLAUSE_SPLIT_FOR;
1808 }
1809 break;
1810 /* Lastprivate is allowed on distribute, for, sections, taskloop, loop
1811 and simd. In parallel {for{, simd},sections} we actually want to
1812 put it on parallel rather than for or sections. */
1813 case OMP_CLAUSE_LASTPRIVATE:
1814 if (code == OMP_DISTRIBUTE)
1815 {
1816 s = C_OMP_CLAUSE_SPLIT_DISTRIBUTE;
1817 break;
1818 }
1819 if ((mask & (OMP_CLAUSE_MASK_1
1820 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) != 0)
1821 {
1822 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1823 OMP_CLAUSE_LASTPRIVATE);
1824 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
1825 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE];
1826 OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c)
1827 = OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (clauses);
1828 cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE] = c;
1829 }
1830 if (code == OMP_FOR || code == OMP_SECTIONS)
1831 {
1832 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
1833 != 0)
1834 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
1835 else
1836 s = C_OMP_CLAUSE_SPLIT_FOR;
1837 break;
1838 }
1839 if (code == OMP_TASKLOOP)
1840 {
1841 s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
1842 break;
1843 }
1844 if (code == OMP_LOOP)
1845 {
1846 s = C_OMP_CLAUSE_SPLIT_LOOP;
1847 break;
1848 }
1849 gcc_assert (code == OMP_SIMD);
1850 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
1851 {
1852 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1853 OMP_CLAUSE_LASTPRIVATE);
1854 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
1855 OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c)
1856 = OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (clauses);
1857 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
1858 != 0)
1859 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
1860 else
1861 s = C_OMP_CLAUSE_SPLIT_FOR;
1862 OMP_CLAUSE_CHAIN (c) = cclauses[s];
1863 cclauses[s] = c;
1864 }
1865 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0)
1866 {
1867 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1868 OMP_CLAUSE_LASTPRIVATE);
1869 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
1870 OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c)
1871 = OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (clauses);
1872 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TASKLOOP];
1873 cclauses[C_OMP_CLAUSE_SPLIT_TASKLOOP] = c;
1874 }
1875 s = C_OMP_CLAUSE_SPLIT_SIMD;
1876 break;
1877 /* Shared and default clauses are allowed on parallel, teams and
1878 taskloop. */
1879 case OMP_CLAUSE_SHARED:
1880 case OMP_CLAUSE_DEFAULT:
1881 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))
1882 != 0)
1883 {
1884 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
1885 != 0)
1886 {
1887 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1888 OMP_CLAUSE_CODE (clauses));
1889 if (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_SHARED)
1890 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
1891 else
1892 OMP_CLAUSE_DEFAULT_KIND (c)
1893 = OMP_CLAUSE_DEFAULT_KIND (clauses);
1894 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL];
1895 cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] = c;
1896 }
1897 s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
1898 break;
1899 }
1900 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))
1901 != 0)
1902 {
1903 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS))
1904 == 0)
1905 {
1906 s = C_OMP_CLAUSE_SPLIT_TEAMS;
1907 break;
1908 }
1909 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1910 OMP_CLAUSE_CODE (clauses));
1911 if (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_SHARED)
1912 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
1913 else
1914 OMP_CLAUSE_DEFAULT_KIND (c)
1915 = OMP_CLAUSE_DEFAULT_KIND (clauses);
1916 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TEAMS];
1917 cclauses[C_OMP_CLAUSE_SPLIT_TEAMS] = c;
1918 }
1919 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
1920 break;
1921 /* order clauses are allowed on for, simd and loop. */
1922 case OMP_CLAUSE_ORDER:
1923 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
1924 {
1925 if (code == OMP_SIMD)
1926 {
1927 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1928 OMP_CLAUSE_ORDER);
1929 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_FOR];
1930 cclauses[C_OMP_CLAUSE_SPLIT_FOR] = c;
1931 s = C_OMP_CLAUSE_SPLIT_SIMD;
1932 }
1933 else
1934 s = C_OMP_CLAUSE_SPLIT_FOR;
1935 }
1936 else if (code == OMP_LOOP)
1937 s = C_OMP_CLAUSE_SPLIT_LOOP;
1938 else
1939 s = C_OMP_CLAUSE_SPLIT_SIMD;
1940 break;
1941 /* Reduction is allowed on simd, for, parallel, sections, taskloop,
1942 teams and loop. Duplicate it on all of them, but omit on for or
1943 sections if parallel is present (unless inscan, in that case
1944 omit on parallel). If taskloop or loop is combined with
1945 parallel, omit it on parallel. */
1946 case OMP_CLAUSE_REDUCTION:
1947 if (OMP_CLAUSE_REDUCTION_TASK (clauses))
1948 {
1949 if (code == OMP_SIMD || code == OMP_LOOP)
1950 {
1951 error_at (OMP_CLAUSE_LOCATION (clauses),
1952 "invalid %<task%> reduction modifier on construct "
1953 "combined with %<simd%> or %<loop%>");
1954 OMP_CLAUSE_REDUCTION_TASK (clauses) = 0;
1955 }
1956 else if (code != OMP_SECTIONS
1957 && (mask & (OMP_CLAUSE_MASK_1
1958 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) == 0
1959 && (mask & (OMP_CLAUSE_MASK_1
1960 << PRAGMA_OMP_CLAUSE_SCHEDULE)) == 0)
1961 {
1962 error_at (OMP_CLAUSE_LOCATION (clauses),
1963 "invalid %<task%> reduction modifier on construct "
1964 "not combined with %<parallel%>, %<for%> or "
1965 "%<sections%>");
1966 OMP_CLAUSE_REDUCTION_TASK (clauses) = 0;
1967 }
1968 }
1969 if (OMP_CLAUSE_REDUCTION_INSCAN (clauses)
1970 && ((mask & ((OMP_CLAUSE_MASK_1
1971 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)
1972 | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)))
1973 != 0))
1974 {
1975 error_at (OMP_CLAUSE_LOCATION (clauses),
1976 "%<inscan%> %<reduction%> clause on construct other "
1977 "than %<for%>, %<simd%>, %<for simd%>, "
1978 "%<parallel for%>, %<parallel for simd%>");
1979 OMP_CLAUSE_REDUCTION_INSCAN (clauses) = 0;
1980 }
1981 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
1982 {
1983 if (code == OMP_SIMD)
1984 {
1985 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
1986 OMP_CLAUSE_REDUCTION);
1987 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
1988 OMP_CLAUSE_REDUCTION_CODE (c)
1989 = OMP_CLAUSE_REDUCTION_CODE (clauses);
1990 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)
1991 = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses);
1992 OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c)
1993 = OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (clauses);
1994 OMP_CLAUSE_REDUCTION_INSCAN (c)
1995 = OMP_CLAUSE_REDUCTION_INSCAN (clauses);
1996 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD];
1997 cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c;
1998 }
1999 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS))
2000 != 0)
2001 {
2002 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
2003 OMP_CLAUSE_REDUCTION);
2004 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
2005 OMP_CLAUSE_REDUCTION_CODE (c)
2006 = OMP_CLAUSE_REDUCTION_CODE (clauses);
2007 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)
2008 = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses);
2009 OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c)
2010 = OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (clauses);
2011 OMP_CLAUSE_REDUCTION_INSCAN (c)
2012 = OMP_CLAUSE_REDUCTION_INSCAN (clauses);
2013 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TEAMS];
2014 cclauses[C_OMP_CLAUSE_SPLIT_TEAMS] = c;
2015 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
2016 }
2017 else if ((mask & (OMP_CLAUSE_MASK_1
2018 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0
2019 && !OMP_CLAUSE_REDUCTION_INSCAN (clauses))
2020 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
2021 else
2022 s = C_OMP_CLAUSE_SPLIT_FOR;
2023 }
2024 else if (code == OMP_SECTIONS
2025 || code == OMP_PARALLEL
2026 || code == OMP_MASTER)
2027 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
2028 else if (code == OMP_TASKLOOP)
2029 s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
2030 else if (code == OMP_LOOP)
2031 s = C_OMP_CLAUSE_SPLIT_LOOP;
2032 else if (code == OMP_SIMD)
2033 {
2034 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))
2035 != 0)
2036 {
2037 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
2038 OMP_CLAUSE_REDUCTION);
2039 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
2040 OMP_CLAUSE_REDUCTION_CODE (c)
2041 = OMP_CLAUSE_REDUCTION_CODE (clauses);
2042 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)
2043 = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses);
2044 OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c)
2045 = OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (clauses);
2046 OMP_CLAUSE_REDUCTION_INSCAN (c)
2047 = OMP_CLAUSE_REDUCTION_INSCAN (clauses);
2048 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TASKLOOP];
2049 cclauses[C_OMP_CLAUSE_SPLIT_TASKLOOP] = c;
2050 }
2051 s = C_OMP_CLAUSE_SPLIT_SIMD;
2052 }
2053 else
2054 s = C_OMP_CLAUSE_SPLIT_TEAMS;
2055 break;
2056 case OMP_CLAUSE_IN_REDUCTION:
2057 /* in_reduction on taskloop simd becomes reduction on the simd
2058 and keeps being in_reduction on taskloop. */
2059 if (code == OMP_SIMD)
2060 {
2061 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
2062 OMP_CLAUSE_REDUCTION);
2063 OMP_CLAUSE_DECL (c) = OMP_CLAUSE_DECL (clauses);
2064 OMP_CLAUSE_REDUCTION_CODE (c)
2065 = OMP_CLAUSE_REDUCTION_CODE (clauses);
2066 OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)
2067 = OMP_CLAUSE_REDUCTION_PLACEHOLDER (clauses);
2068 OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c)
2069 = OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (clauses);
2070 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD];
2071 cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c;
2072 }
2073 s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
2074 break;
2075 case OMP_CLAUSE_IF:
2076 if (OMP_CLAUSE_IF_MODIFIER (clauses) != ERROR_MARK)
2077 {
2078 s = C_OMP_CLAUSE_SPLIT_COUNT;
2079 switch (OMP_CLAUSE_IF_MODIFIER (clauses))
2080 {
2081 case OMP_PARALLEL:
2082 if ((mask & (OMP_CLAUSE_MASK_1
2083 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0)
2084 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
2085 break;
2086 case OMP_SIMD:
2087 if (code == OMP_SIMD)
2088 s = C_OMP_CLAUSE_SPLIT_SIMD;
2089 break;
2090 case OMP_TASKLOOP:
2091 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))
2092 != 0)
2093 s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
2094 break;
2095 case OMP_TARGET:
2096 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP))
2097 != 0)
2098 s = C_OMP_CLAUSE_SPLIT_TARGET;
2099 break;
2100 default:
2101 break;
2102 }
2103 if (s != C_OMP_CLAUSE_SPLIT_COUNT)
2104 break;
2105 /* Error-recovery here, invalid if-modifier specified, add the
2106 clause to just one construct. */
2107 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) != 0)
2108 s = C_OMP_CLAUSE_SPLIT_TARGET;
2109 else if ((mask & (OMP_CLAUSE_MASK_1
2110 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0)
2111 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
2112 else if ((mask & (OMP_CLAUSE_MASK_1
2113 << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0)
2114 s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
2115 else if (code == OMP_SIMD)
2116 s = C_OMP_CLAUSE_SPLIT_SIMD;
2117 else
2118 gcc_unreachable ();
2119 break;
2120 }
2121 /* Otherwise, duplicate if clause to all constructs. */
2122 if (code == OMP_SIMD)
2123 {
2124 if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)
2125 | (OMP_CLAUSE_MASK_1
2126 << PRAGMA_OMP_CLAUSE_NUM_THREADS)
2127 | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP)))
2128 != 0)
2129 {
2130 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
2131 OMP_CLAUSE_IF);
2132 OMP_CLAUSE_IF_MODIFIER (c)
2133 = OMP_CLAUSE_IF_MODIFIER (clauses);
2134 OMP_CLAUSE_IF_EXPR (c) = OMP_CLAUSE_IF_EXPR (clauses);
2135 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_SIMD];
2136 cclauses[C_OMP_CLAUSE_SPLIT_SIMD] = c;
2137 }
2138 else
2139 {
2140 s = C_OMP_CLAUSE_SPLIT_SIMD;
2141 break;
2142 }
2143 }
2144 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))
2145 != 0)
2146 {
2147 if ((mask & (OMP_CLAUSE_MASK_1
2148 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0)
2149 {
2150 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
2151 OMP_CLAUSE_IF);
2152 OMP_CLAUSE_IF_MODIFIER (c)
2153 = OMP_CLAUSE_IF_MODIFIER (clauses);
2154 OMP_CLAUSE_IF_EXPR (c) = OMP_CLAUSE_IF_EXPR (clauses);
2155 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TASKLOOP];
2156 cclauses[C_OMP_CLAUSE_SPLIT_TASKLOOP] = c;
2157 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
2158 }
2159 else
2160 s = C_OMP_CLAUSE_SPLIT_TASKLOOP;
2161 }
2162 else if ((mask & (OMP_CLAUSE_MASK_1
2163 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) != 0)
2164 {
2165 if ((mask & (OMP_CLAUSE_MASK_1
2166 << PRAGMA_OMP_CLAUSE_MAP)) != 0)
2167 {
2168 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
2169 OMP_CLAUSE_IF);
2170 OMP_CLAUSE_IF_MODIFIER (c)
2171 = OMP_CLAUSE_IF_MODIFIER (clauses);
2172 OMP_CLAUSE_IF_EXPR (c) = OMP_CLAUSE_IF_EXPR (clauses);
2173 OMP_CLAUSE_CHAIN (c) = cclauses[C_OMP_CLAUSE_SPLIT_TARGET];
2174 cclauses[C_OMP_CLAUSE_SPLIT_TARGET] = c;
2175 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
2176 }
2177 else
2178 s = C_OMP_CLAUSE_SPLIT_PARALLEL;
2179 }
2180 else
2181 s = C_OMP_CLAUSE_SPLIT_TARGET;
2182 break;
2183 case OMP_CLAUSE_LINEAR:
2184 /* Linear clause is allowed on simd and for. Put it on the
2185 innermost construct. */
2186 if (code == OMP_SIMD)
2187 s = C_OMP_CLAUSE_SPLIT_SIMD;
2188 else
2189 s = C_OMP_CLAUSE_SPLIT_FOR;
2190 break;
2191 case OMP_CLAUSE_NOWAIT:
2192 /* Nowait clause is allowed on target, for and sections, but
2193 is not allowed on parallel for or parallel sections. Therefore,
2194 put it on target construct if present, because that can only
2195 be combined with parallel for{, simd} and not with for{, simd},
2196 otherwise to the worksharing construct. */
2197 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP))
2198 != 0)
2199 s = C_OMP_CLAUSE_SPLIT_TARGET;
2200 else
2201 s = C_OMP_CLAUSE_SPLIT_FOR;
2202 break;
2203 /* Allocate clause is allowed on target, teams, distribute, parallel,
2204 for, sections and taskloop. Distribute it to all. */
2205 case OMP_CLAUSE_ALLOCATE:
2206 s = C_OMP_CLAUSE_SPLIT_COUNT;
2207 for (i = 0; i < C_OMP_CLAUSE_SPLIT_COUNT; i++)
2208 {
2209 switch (i)
2210 {
2211 case C_OMP_CLAUSE_SPLIT_TARGET:
2212 if ((mask & (OMP_CLAUSE_MASK_1
2213 << PRAGMA_OMP_CLAUSE_MAP)) == 0)
2214 continue;
2215 break;
2216 case C_OMP_CLAUSE_SPLIT_TEAMS:
2217 if ((mask & (OMP_CLAUSE_MASK_1
2218 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) == 0)
2219 continue;
2220 break;
2221 case C_OMP_CLAUSE_SPLIT_DISTRIBUTE:
2222 if ((mask & (OMP_CLAUSE_MASK_1
2223 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) == 0)
2224 continue;
2225 break;
2226 case C_OMP_CLAUSE_SPLIT_PARALLEL:
2227 if ((mask & (OMP_CLAUSE_MASK_1
2228 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) == 0)
2229 continue;
2230 break;
2231 case C_OMP_CLAUSE_SPLIT_FOR:
2232 STATIC_ASSERT (C_OMP_CLAUSE_SPLIT_SECTIONS
2233 == C_OMP_CLAUSE_SPLIT_FOR
2234 && (C_OMP_CLAUSE_SPLIT_TASKLOOP
2235 == C_OMP_CLAUSE_SPLIT_FOR)
2236 && (C_OMP_CLAUSE_SPLIT_LOOP
2237 == C_OMP_CLAUSE_SPLIT_FOR));
2238 if (code == OMP_SECTIONS)
2239 break;
2240 if ((mask & (OMP_CLAUSE_MASK_1
2241 << PRAGMA_OMP_CLAUSE_SCHEDULE)) != 0)
2242 break;
2243 if ((mask & (OMP_CLAUSE_MASK_1
2244 << PRAGMA_OMP_CLAUSE_NOGROUP)) != 0)
2245 break;
2246 continue;
2247 case C_OMP_CLAUSE_SPLIT_SIMD:
2248 continue;
2249 default:
2250 gcc_unreachable ();
2251 }
2252 if (s != C_OMP_CLAUSE_SPLIT_COUNT)
2253 {
2254 c = build_omp_clause (OMP_CLAUSE_LOCATION (clauses),
2255 OMP_CLAUSE_ALLOCATE);
2256 OMP_CLAUSE_DECL (c)
2257 = OMP_CLAUSE_DECL (clauses);
2258 OMP_CLAUSE_ALLOCATE_ALLOCATOR (c)
2259 = OMP_CLAUSE_ALLOCATE_ALLOCATOR (clauses);
2260 OMP_CLAUSE_CHAIN (c) = cclauses[s];
2261 cclauses[s] = c;
2262 has_dup_allocate = true;
2263 }
2264 s = (enum c_omp_clause_split) i;
2265 }
2266 gcc_assert (s != C_OMP_CLAUSE_SPLIT_COUNT);
2267 break;
2268 default:
2269 gcc_unreachable ();
2270 }
2271 OMP_CLAUSE_CHAIN (clauses) = cclauses[s];
2272 cclauses[s] = clauses;
2273 }
2274
2275 if (has_dup_allocate)
2276 {
2277 bool need_prune = false;
2278 bitmap_obstack_initialize (NULL);
2279 for (i = 0; i < C_OMP_CLAUSE_SPLIT_SIMD - (code == OMP_LOOP); i++)
2280 if (cclauses[i])
2281 {
2282 bitmap_head allocate_head;
2283 bitmap_initialize (&allocate_head, &bitmap_default_obstack);
2284 for (c = cclauses[i]; c; c = OMP_CLAUSE_CHAIN (c))
2285 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_ALLOCATE
2286 && DECL_P (OMP_CLAUSE_DECL (c)))
2287 bitmap_set_bit (&allocate_head,
2288 DECL_UID (OMP_CLAUSE_DECL (c)));
2289 for (c = cclauses[i]; c; c = OMP_CLAUSE_CHAIN (c))
2290 switch (OMP_CLAUSE_CODE (c))
2291 {
2292 case OMP_CLAUSE_REDUCTION:
2293 case OMP_CLAUSE_IN_REDUCTION:
2294 case OMP_CLAUSE_TASK_REDUCTION:
2295 if (TREE_CODE (OMP_CLAUSE_DECL (c)) == MEM_REF)
2296 {
2297 tree t = TREE_OPERAND (OMP_CLAUSE_DECL (c), 0);
2298 if (TREE_CODE (t) == POINTER_PLUS_EXPR)
2299 t = TREE_OPERAND (t, 0);
2300 if (TREE_CODE (t) == ADDR_EXPR
2301 || TREE_CODE (t) == INDIRECT_REF)
2302 t = TREE_OPERAND (t, 0);
2303 if (DECL_P (t))
2304 bitmap_clear_bit (&allocate_head, DECL_UID (t));
2305 break;
2306 }
2307 else if (TREE_CODE (OMP_CLAUSE_DECL (c)) == TREE_LIST)
2308 {
2309 tree t;
2310 for (t = OMP_CLAUSE_DECL (c);
2311 TREE_CODE (t) == TREE_LIST; t = TREE_CHAIN (t))
2312 ;
2313 if (DECL_P (t))
2314 bitmap_clear_bit (&allocate_head, DECL_UID (t));
2315 break;
2316 }
2317 /* FALLTHRU */
2318 case OMP_CLAUSE_PRIVATE:
2319 case OMP_CLAUSE_FIRSTPRIVATE:
2320 case OMP_CLAUSE_LASTPRIVATE:
2321 case OMP_CLAUSE_LINEAR:
2322 if (DECL_P (OMP_CLAUSE_DECL (c)))
2323 bitmap_clear_bit (&allocate_head,
2324 DECL_UID (OMP_CLAUSE_DECL (c)));
2325 break;
2326 default:
2327 break;
2328 }
2329 for (c = cclauses[i]; c; c = OMP_CLAUSE_CHAIN (c))
2330 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_ALLOCATE
2331 && DECL_P (OMP_CLAUSE_DECL (c))
2332 && bitmap_bit_p (&allocate_head,
2333 DECL_UID (OMP_CLAUSE_DECL (c))))
2334 {
2335 /* Mark allocate clauses which don't have corresponding
2336 explicit data sharing clause. */
2337 OMP_CLAUSE_ALLOCATE_COMBINED (c) = 1;
2338 need_prune = true;
2339 }
2340 }
2341 bitmap_obstack_release (NULL);
2342 if (need_prune)
2343 {
2344 /* At least one allocate clause has been marked. Walk all the
2345 duplicated allocate clauses in sync. If it is marked in all
2346 constituent constructs, diagnose it as invalid and remove
2347 them. Otherwise, remove all marked inner clauses inside
2348 a construct that doesn't have them marked. Keep the outer
2349 marked ones, because some clause duplication is done only
2350 during gimplification. */
2351 tree *p[C_OMP_CLAUSE_SPLIT_COUNT];
2352 for (i = 0; i < C_OMP_CLAUSE_SPLIT_COUNT; i++)
2353 if (cclauses[i] == NULL_TREE
2354 || i == C_OMP_CLAUSE_SPLIT_SIMD
2355 || (i == C_OMP_CLAUSE_SPLIT_LOOP && code == OMP_LOOP))
2356 p[i] = NULL;
2357 else
2358 p[i] = &cclauses[i];
2359 do
2360 {
2361 int j = -1;
2362 tree seen = NULL_TREE;
2363 for (i = C_OMP_CLAUSE_SPLIT_COUNT - 1; i >= 0; i--)
2364 if (p[i])
2365 {
2366 while (*p[i]
2367 && OMP_CLAUSE_CODE (*p[i]) != OMP_CLAUSE_ALLOCATE)
2368 p[i] = &OMP_CLAUSE_CHAIN (*p[i]);
2369 if (*p[i] == NULL_TREE)
2370 {
2371 i = C_OMP_CLAUSE_SPLIT_COUNT;
2372 break;
2373 }
2374 if (!OMP_CLAUSE_ALLOCATE_COMBINED (*p[i]) && j == -1)
2375 j = i;
2376 seen = *p[i];
2377 }
2378 if (i == C_OMP_CLAUSE_SPLIT_COUNT)
2379 break;
2380 if (j == -1)
2381 error_at (OMP_CLAUSE_LOCATION (seen),
2382 "%qD specified in %<allocate%> clause but not in "
2383 "an explicit privatization clause",
2384 OMP_CLAUSE_DECL (seen));
2385 for (i = 0; i < C_OMP_CLAUSE_SPLIT_COUNT; i++)
2386 if (p[i])
2387 {
2388 if (i > j)
2389 /* Remove. */
2390 *p[i] = OMP_CLAUSE_CHAIN (*p[i]);
2391 else
2392 /* Keep. */
2393 p[i] = &OMP_CLAUSE_CHAIN (*p[i]);
2394 }
2395 }
2396 while (1);
2397 }
2398 }
2399
2400 if (!flag_checking)
2401 return;
2402
2403 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP)) == 0)
2404 gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_TARGET] == NULL_TREE);
2405 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS)) == 0)
2406 gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_TEAMS] == NULL_TREE);
2407 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)) == 0)
2408 gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE] == NULL_TREE);
2409 if ((mask & (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS)) == 0)
2410 gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] == NULL_TREE);
2411 if ((mask & ((OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE)
2412 | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOGROUP))) == 0
2413 && code != OMP_SECTIONS
2414 && code != OMP_LOOP)
2415 gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_FOR] == NULL_TREE);
2416 if (code != OMP_SIMD)
2417 gcc_assert (cclauses[C_OMP_CLAUSE_SPLIT_SIMD] == NULL_TREE);
2418 }
2419
2420
2421 /* qsort callback to compare #pragma omp declare simd clauses. */
2422
2423 static int
2424 c_omp_declare_simd_clause_cmp (const void *p, const void *q)
2425 {
2426 tree a = *(const tree *) p;
2427 tree b = *(const tree *) q;
2428 if (OMP_CLAUSE_CODE (a) != OMP_CLAUSE_CODE (b))
2429 {
2430 if (OMP_CLAUSE_CODE (a) > OMP_CLAUSE_CODE (b))
2431 return -1;
2432 return 1;
2433 }
2434 if (OMP_CLAUSE_CODE (a) != OMP_CLAUSE_SIMDLEN
2435 && OMP_CLAUSE_CODE (a) != OMP_CLAUSE_INBRANCH
2436 && OMP_CLAUSE_CODE (a) != OMP_CLAUSE_NOTINBRANCH)
2437 {
2438 int c = tree_to_shwi (OMP_CLAUSE_DECL (a));
2439 int d = tree_to_shwi (OMP_CLAUSE_DECL (b));
2440 if (c < d)
2441 return 1;
2442 if (c > d)
2443 return -1;
2444 }
2445 return 0;
2446 }
2447
2448 /* Change PARM_DECLs in OMP_CLAUSE_DECL of #pragma omp declare simd
2449 CLAUSES on FNDECL into argument indexes and sort them. */
2450
2451 tree
2452 c_omp_declare_simd_clauses_to_numbers (tree parms, tree clauses)
2453 {
2454 tree c;
2455 vec<tree> clvec = vNULL;
2456
2457 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
2458 {
2459 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_SIMDLEN
2460 && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_INBRANCH
2461 && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_NOTINBRANCH)
2462 {
2463 tree decl = OMP_CLAUSE_DECL (c);
2464 tree arg;
2465 int idx;
2466 for (arg = parms, idx = 0; arg;
2467 arg = TREE_CHAIN (arg), idx++)
2468 if (arg == decl)
2469 break;
2470 if (arg == NULL_TREE)
2471 {
2472 error_at (OMP_CLAUSE_LOCATION (c),
2473 "%qD is not a function argument", decl);
2474 continue;
2475 }
2476 OMP_CLAUSE_DECL (c) = build_int_cst (integer_type_node, idx);
2477 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
2478 && OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (c))
2479 {
2480 decl = OMP_CLAUSE_LINEAR_STEP (c);
2481 for (arg = parms, idx = 0; arg;
2482 arg = TREE_CHAIN (arg), idx++)
2483 if (arg == decl)
2484 break;
2485 if (arg == NULL_TREE)
2486 {
2487 error_at (OMP_CLAUSE_LOCATION (c),
2488 "%qD is not a function argument", decl);
2489 continue;
2490 }
2491 OMP_CLAUSE_LINEAR_STEP (c)
2492 = build_int_cst (integer_type_node, idx);
2493 }
2494 }
2495 clvec.safe_push (c);
2496 }
2497 if (!clvec.is_empty ())
2498 {
2499 unsigned int len = clvec.length (), i;
2500 clvec.qsort (c_omp_declare_simd_clause_cmp);
2501 clauses = clvec[0];
2502 for (i = 0; i < len; i++)
2503 OMP_CLAUSE_CHAIN (clvec[i]) = (i < len - 1) ? clvec[i + 1] : NULL_TREE;
2504 }
2505 else
2506 clauses = NULL_TREE;
2507 clvec.release ();
2508 return clauses;
2509 }
2510
2511 /* Change argument indexes in CLAUSES of FNDECL back to PARM_DECLs. */
2512
2513 void
2514 c_omp_declare_simd_clauses_to_decls (tree fndecl, tree clauses)
2515 {
2516 tree c;
2517
2518 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
2519 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_SIMDLEN
2520 && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_INBRANCH
2521 && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_NOTINBRANCH)
2522 {
2523 int idx = tree_to_shwi (OMP_CLAUSE_DECL (c)), i;
2524 tree arg;
2525 for (arg = DECL_ARGUMENTS (fndecl), i = 0; arg;
2526 arg = TREE_CHAIN (arg), i++)
2527 if (i == idx)
2528 break;
2529 gcc_assert (arg);
2530 OMP_CLAUSE_DECL (c) = arg;
2531 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
2532 && OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (c))
2533 {
2534 idx = tree_to_shwi (OMP_CLAUSE_LINEAR_STEP (c));
2535 for (arg = DECL_ARGUMENTS (fndecl), i = 0; arg;
2536 arg = TREE_CHAIN (arg), i++)
2537 if (i == idx)
2538 break;
2539 gcc_assert (arg);
2540 OMP_CLAUSE_LINEAR_STEP (c) = arg;
2541 }
2542 }
2543 }
2544
2545 /* Return true for __func__ and similar function-local predefined
2546 variables (which are in OpenMP predetermined shared, allowed in
2547 shared/firstprivate clauses). */
2548
2549 bool
2550 c_omp_predefined_variable (tree decl)
2551 {
2552 if (VAR_P (decl)
2553 && DECL_ARTIFICIAL (decl)
2554 && TREE_READONLY (decl)
2555 && TREE_STATIC (decl)
2556 && DECL_NAME (decl)
2557 && (DECL_NAME (decl) == ridpointers[RID_C99_FUNCTION_NAME]
2558 || DECL_NAME (decl) == ridpointers[RID_FUNCTION_NAME]
2559 || DECL_NAME (decl) == ridpointers[RID_PRETTY_FUNCTION_NAME]))
2560 return true;
2561 return false;
2562 }
2563
2564 /* OMP_CLAUSE_DEFAULT_UNSPECIFIED unless OpenMP sharing attribute of DECL
2565 is predetermined. */
2566
2567 enum omp_clause_default_kind
2568 c_omp_predetermined_sharing (tree decl)
2569 {
2570 /* Predetermine artificial variables holding integral values, those
2571 are usually result of gimplify_one_sizepos or SAVE_EXPR
2572 gimplification. */
2573 if (VAR_P (decl)
2574 && DECL_ARTIFICIAL (decl)
2575 && INTEGRAL_TYPE_P (TREE_TYPE (decl)))
2576 return OMP_CLAUSE_DEFAULT_SHARED;
2577
2578 if (c_omp_predefined_variable (decl))
2579 return OMP_CLAUSE_DEFAULT_SHARED;
2580
2581 return OMP_CLAUSE_DEFAULT_UNSPECIFIED;
2582 }
2583
2584 /* OMP_CLAUSE_DEFAULTMAP_CATEGORY_UNSPECIFIED unless OpenMP mapping attribute
2585 of DECL is predetermined. */
2586
2587 enum omp_clause_defaultmap_kind
2588 c_omp_predetermined_mapping (tree decl)
2589 {
2590 /* Predetermine artificial variables holding integral values, those
2591 are usually result of gimplify_one_sizepos or SAVE_EXPR
2592 gimplification. */
2593 if (VAR_P (decl)
2594 && DECL_ARTIFICIAL (decl)
2595 && INTEGRAL_TYPE_P (TREE_TYPE (decl)))
2596 return OMP_CLAUSE_DEFAULTMAP_FIRSTPRIVATE;
2597
2598 if (c_omp_predefined_variable (decl))
2599 return OMP_CLAUSE_DEFAULTMAP_TO;
2600
2601 return OMP_CLAUSE_DEFAULTMAP_CATEGORY_UNSPECIFIED;
2602 }
2603
2604
2605 /* Diagnose errors in an OpenMP context selector, return CTX if
2606 it is correct or error_mark_node otherwise. */
2607
2608 tree
2609 c_omp_check_context_selector (location_t loc, tree ctx)
2610 {
2611 /* Each trait-set-selector-name can only be specified once.
2612 There are just 4 set names. */
2613 for (tree t1 = ctx; t1; t1 = TREE_CHAIN (t1))
2614 for (tree t2 = TREE_CHAIN (t1); t2; t2 = TREE_CHAIN (t2))
2615 if (TREE_PURPOSE (t1) == TREE_PURPOSE (t2))
2616 {
2617 error_at (loc, "selector set %qs specified more than once",
2618 IDENTIFIER_POINTER (TREE_PURPOSE (t1)));
2619 return error_mark_node;
2620 }
2621 for (tree t = ctx; t; t = TREE_CHAIN (t))
2622 {
2623 /* Each trait-selector-name can only be specified once. */
2624 if (list_length (TREE_VALUE (t)) < 5)
2625 {
2626 for (tree t1 = TREE_VALUE (t); t1; t1 = TREE_CHAIN (t1))
2627 for (tree t2 = TREE_CHAIN (t1); t2; t2 = TREE_CHAIN (t2))
2628 if (TREE_PURPOSE (t1) == TREE_PURPOSE (t2))
2629 {
2630 error_at (loc,
2631 "selector %qs specified more than once in set %qs",
2632 IDENTIFIER_POINTER (TREE_PURPOSE (t1)),
2633 IDENTIFIER_POINTER (TREE_PURPOSE (t)));
2634 return error_mark_node;
2635 }
2636 }
2637 else
2638 {
2639 hash_set<tree> pset;
2640 for (tree t1 = TREE_VALUE (t); t1; t1 = TREE_CHAIN (t1))
2641 if (pset.add (TREE_PURPOSE (t1)))
2642 {
2643 error_at (loc,
2644 "selector %qs specified more than once in set %qs",
2645 IDENTIFIER_POINTER (TREE_PURPOSE (t1)),
2646 IDENTIFIER_POINTER (TREE_PURPOSE (t)));
2647 return error_mark_node;
2648 }
2649 }
2650
2651 static const char *const kind[] = {
2652 "host", "nohost", "cpu", "gpu", "fpga", "any", NULL };
2653 static const char *const vendor[] = {
2654 "amd", "arm", "bsc", "cray", "fujitsu", "gnu", "ibm", "intel",
2655 "llvm", "nvidia", "pgi", "ti", "unknown", NULL };
2656 static const char *const extension[] = { NULL };
2657 static const char *const atomic_default_mem_order[] = {
2658 "seq_cst", "relaxed", "acq_rel", NULL };
2659 struct known_properties { const char *set; const char *selector;
2660 const char *const *props; };
2661 known_properties props[] = {
2662 { "device", "kind", kind },
2663 { "implementation", "vendor", vendor },
2664 { "implementation", "extension", extension },
2665 { "implementation", "atomic_default_mem_order",
2666 atomic_default_mem_order } };
2667 for (tree t1 = TREE_VALUE (t); t1; t1 = TREE_CHAIN (t1))
2668 for (unsigned i = 0; i < ARRAY_SIZE (props); i++)
2669 if (!strcmp (IDENTIFIER_POINTER (TREE_PURPOSE (t1)),
2670 props[i].selector)
2671 && !strcmp (IDENTIFIER_POINTER (TREE_PURPOSE (t)),
2672 props[i].set))
2673 for (tree t2 = TREE_VALUE (t1); t2; t2 = TREE_CHAIN (t2))
2674 for (unsigned j = 0; ; j++)
2675 {
2676 if (props[i].props[j] == NULL)
2677 {
2678 if (TREE_PURPOSE (t2)
2679 && !strcmp (IDENTIFIER_POINTER (TREE_PURPOSE (t2)),
2680 " score"))
2681 break;
2682 if (props[i].props == atomic_default_mem_order)
2683 {
2684 error_at (loc,
2685 "incorrect property %qs of %qs selector",
2686 IDENTIFIER_POINTER (TREE_PURPOSE (t2)),
2687 "atomic_default_mem_order");
2688 return error_mark_node;
2689 }
2690 else if (TREE_PURPOSE (t2))
2691 warning_at (loc, 0,
2692 "unknown property %qs of %qs selector",
2693 IDENTIFIER_POINTER (TREE_PURPOSE (t2)),
2694 props[i].selector);
2695 else
2696 warning_at (loc, 0,
2697 "unknown property %qE of %qs selector",
2698 TREE_VALUE (t2), props[i].selector);
2699 break;
2700 }
2701 else if (TREE_PURPOSE (t2) == NULL_TREE)
2702 {
2703 const char *str = TREE_STRING_POINTER (TREE_VALUE (t2));
2704 if (!strcmp (str, props[i].props[j])
2705 && ((size_t) TREE_STRING_LENGTH (TREE_VALUE (t2))
2706 == strlen (str) + 1))
2707 break;
2708 }
2709 else if (!strcmp (IDENTIFIER_POINTER (TREE_PURPOSE (t2)),
2710 props[i].props[j]))
2711 break;
2712 }
2713 }
2714 return ctx;
2715 }
2716
2717 /* Register VARIANT as variant of some base function marked with
2718 #pragma omp declare variant. CONSTRUCT is corresponding construct
2719 selector set. */
2720
2721 void
2722 c_omp_mark_declare_variant (location_t loc, tree variant, tree construct)
2723 {
2724 tree attr = lookup_attribute ("omp declare variant variant",
2725 DECL_ATTRIBUTES (variant));
2726 if (attr == NULL_TREE)
2727 {
2728 attr = tree_cons (get_identifier ("omp declare variant variant"),
2729 unshare_expr (construct),
2730 DECL_ATTRIBUTES (variant));
2731 DECL_ATTRIBUTES (variant) = attr;
2732 return;
2733 }
2734 if ((TREE_VALUE (attr) != NULL_TREE) != (construct != NULL_TREE)
2735 || (construct != NULL_TREE
2736 && omp_context_selector_set_compare ("construct", TREE_VALUE (attr),
2737 construct)))
2738 error_at (loc, "%qD used as a variant with incompatible %<construct%> "
2739 "selector sets", variant);
2740 }
2741
2742 /* For OpenACC, the OMP_CLAUSE_MAP_KIND of an OMP_CLAUSE_MAP is used internally
2743 to distinguish clauses as seen by the user. Return the "friendly" clause
2744 name for error messages etc., where possible. See also
2745 c/c-parser.c:c_parser_oacc_data_clause and
2746 cp/parser.c:cp_parser_oacc_data_clause. */
2747
2748 const char *
2749 c_omp_map_clause_name (tree clause, bool oacc)
2750 {
2751 if (oacc && OMP_CLAUSE_CODE (clause) == OMP_CLAUSE_MAP)
2752 switch (OMP_CLAUSE_MAP_KIND (clause))
2753 {
2754 case GOMP_MAP_FORCE_ALLOC:
2755 case GOMP_MAP_ALLOC: return "create";
2756 case GOMP_MAP_FORCE_TO:
2757 case GOMP_MAP_TO: return "copyin";
2758 case GOMP_MAP_FORCE_FROM:
2759 case GOMP_MAP_FROM: return "copyout";
2760 case GOMP_MAP_FORCE_TOFROM:
2761 case GOMP_MAP_TOFROM: return "copy";
2762 case GOMP_MAP_RELEASE: return "delete";
2763 case GOMP_MAP_FORCE_PRESENT: return "present";
2764 case GOMP_MAP_ATTACH: return "attach";
2765 case GOMP_MAP_FORCE_DETACH:
2766 case GOMP_MAP_DETACH: return "detach";
2767 case GOMP_MAP_DEVICE_RESIDENT: return "device_resident";
2768 case GOMP_MAP_LINK: return "link";
2769 case GOMP_MAP_FORCE_DEVICEPTR: return "deviceptr";
2770 default: break;
2771 }
2772 return omp_clause_code_name[OMP_CLAUSE_CODE (clause)];
2773 }
2774
2775 /* Used to merge map clause information in c_omp_adjust_map_clauses. */
2776 struct map_clause
2777 {
2778 tree clause;
2779 bool firstprivate_ptr_p;
2780 bool decl_mapped;
2781 bool omp_declare_target;
2782 map_clause (void) : clause (NULL_TREE), firstprivate_ptr_p (false),
2783 decl_mapped (false), omp_declare_target (false) { }
2784 };
2785
2786 /* Adjust map clauses after normal clause parsing, mainly to turn specific
2787 base-pointer map cases into attach/detach and mark them addressable. */
2788 void
2789 c_omp_adjust_map_clauses (tree clauses, bool is_target)
2790 {
2791 if (!is_target)
2792 {
2793 /* If this is not a target construct, just turn firstprivate pointers
2794 into attach/detach, the runtime will check and do the rest. */
2795
2796 for (tree c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
2797 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
2798 && OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_POINTER
2799 && DECL_P (OMP_CLAUSE_DECL (c))
2800 && POINTER_TYPE_P (TREE_TYPE (OMP_CLAUSE_DECL (c))))
2801 {
2802 tree ptr = OMP_CLAUSE_DECL (c);
2803 OMP_CLAUSE_SET_MAP_KIND (c, GOMP_MAP_ATTACH_DETACH);
2804 c_common_mark_addressable_vec (ptr);
2805 }
2806 return;
2807 }
2808
2809 hash_map<tree, map_clause> maps;
2810
2811 for (tree c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
2812 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
2813 && DECL_P (OMP_CLAUSE_DECL (c)))
2814 {
2815 /* If this is for a target construct, the firstprivate pointer
2816 is changed to attach/detach if either is true:
2817 (1) the base-pointer is mapped in this same construct, or
2818 (2) the base-pointer is a variable place on the device by
2819 "declare target" directives.
2820
2821 Here we iterate through all map clauses collecting these cases,
2822 and merge them with a hash_map to process below. */
2823
2824 if (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_POINTER
2825 && POINTER_TYPE_P (TREE_TYPE (OMP_CLAUSE_DECL (c))))
2826 {
2827 tree ptr = OMP_CLAUSE_DECL (c);
2828 map_clause &mc = maps.get_or_insert (ptr);
2829 if (mc.clause == NULL_TREE)
2830 mc.clause = c;
2831 mc.firstprivate_ptr_p = true;
2832
2833 if (is_global_var (ptr)
2834 && lookup_attribute ("omp declare target",
2835 DECL_ATTRIBUTES (ptr)))
2836 mc.omp_declare_target = true;
2837 }
2838 else if (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ALLOC
2839 || OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_TO
2840 || OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FROM
2841 || OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_TOFROM
2842 || OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ALWAYS_TO
2843 || OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ALWAYS_FROM
2844 || OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ALWAYS_TOFROM)
2845 {
2846 map_clause &mc = maps.get_or_insert (OMP_CLAUSE_DECL (c));
2847 mc.decl_mapped = true;
2848 }
2849 }
2850
2851 for (hash_map<tree, map_clause>::iterator i = maps.begin ();
2852 i != maps.end (); ++i)
2853 {
2854 map_clause &mc = (*i).second;
2855
2856 if (mc.firstprivate_ptr_p
2857 && (mc.decl_mapped || mc.omp_declare_target))
2858 {
2859 OMP_CLAUSE_SET_MAP_KIND (mc.clause, GOMP_MAP_ATTACH_DETACH);
2860 c_common_mark_addressable_vec (OMP_CLAUSE_DECL (mc.clause));
2861 }
2862 }
2863 }