[omp] Move NE_EXPR handling to omp_adjust_for_condition
[gcc.git] / gcc / omp-general.c
1 /* General types and functions that are uselful for processing of OpenMP,
2 OpenACC and similar directivers at various stages of compilation.
3
4 Copyright (C) 2005-2019 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 /* Find an OMP clause of type KIND within CLAUSES. */
23
24 #include "config.h"
25 #include "system.h"
26 #include "coretypes.h"
27 #include "backend.h"
28 #include "target.h"
29 #include "tree.h"
30 #include "gimple.h"
31 #include "ssa.h"
32 #include "diagnostic-core.h"
33 #include "fold-const.h"
34 #include "langhooks.h"
35 #include "omp-general.h"
36 #include "stringpool.h"
37 #include "attribs.h"
38
39 enum omp_requires omp_requires_mask;
40
41 tree
42 omp_find_clause (tree clauses, enum omp_clause_code kind)
43 {
44 for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
45 if (OMP_CLAUSE_CODE (clauses) == kind)
46 return clauses;
47
48 return NULL_TREE;
49 }
50
51 /* Return true if DECL is a reference type. */
52
53 bool
54 omp_is_reference (tree decl)
55 {
56 return lang_hooks.decls.omp_privatize_by_reference (decl);
57 }
58
59 /* Adjust *COND_CODE and *N2 so that the former is either LT_EXPR or GT_EXPR,
60 given that V is the loop index variable and STEP is loop step. */
61
62 void
63 omp_adjust_for_condition (location_t loc, enum tree_code *cond_code, tree *n2,
64 tree v, tree step)
65 {
66 switch (*cond_code)
67 {
68 case LT_EXPR:
69 case GT_EXPR:
70 break;
71
72 case NE_EXPR:
73 gcc_assert (TREE_CODE (step) == INTEGER_CST);
74 if (TREE_CODE (TREE_TYPE (v)) == INTEGER_TYPE)
75 {
76 if (integer_onep (step))
77 *cond_code = LT_EXPR;
78 else
79 {
80 gcc_assert (integer_minus_onep (step));
81 *cond_code = GT_EXPR;
82 }
83 }
84 else
85 {
86 tree unit = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (v)));
87 gcc_assert (TREE_CODE (unit) == INTEGER_CST);
88 if (tree_int_cst_equal (unit, step))
89 *cond_code = LT_EXPR;
90 else
91 {
92 gcc_assert (wi::neg (wi::to_widest (unit))
93 == wi::to_widest (step));
94 *cond_code = GT_EXPR;
95 }
96 }
97
98 break;
99
100 case LE_EXPR:
101 if (POINTER_TYPE_P (TREE_TYPE (*n2)))
102 *n2 = fold_build_pointer_plus_hwi_loc (loc, *n2, 1);
103 else
104 *n2 = fold_build2_loc (loc, PLUS_EXPR, TREE_TYPE (*n2), *n2,
105 build_int_cst (TREE_TYPE (*n2), 1));
106 *cond_code = LT_EXPR;
107 break;
108 case GE_EXPR:
109 if (POINTER_TYPE_P (TREE_TYPE (*n2)))
110 *n2 = fold_build_pointer_plus_hwi_loc (loc, *n2, -1);
111 else
112 *n2 = fold_build2_loc (loc, MINUS_EXPR, TREE_TYPE (*n2), *n2,
113 build_int_cst (TREE_TYPE (*n2), 1));
114 *cond_code = GT_EXPR;
115 break;
116 default:
117 gcc_unreachable ();
118 }
119 }
120
121 /* Return the looping step from INCR, extracted from the step of a gimple omp
122 for statement. */
123
124 tree
125 omp_get_for_step_from_incr (location_t loc, tree incr)
126 {
127 tree step;
128 switch (TREE_CODE (incr))
129 {
130 case PLUS_EXPR:
131 step = TREE_OPERAND (incr, 1);
132 break;
133 case POINTER_PLUS_EXPR:
134 step = fold_convert (ssizetype, TREE_OPERAND (incr, 1));
135 break;
136 case MINUS_EXPR:
137 step = TREE_OPERAND (incr, 1);
138 step = fold_build1_loc (loc, NEGATE_EXPR, TREE_TYPE (step), step);
139 break;
140 default:
141 gcc_unreachable ();
142 }
143 return step;
144 }
145
146 /* Extract the header elements of parallel loop FOR_STMT and store
147 them into *FD. */
148
149 void
150 omp_extract_for_data (gomp_for *for_stmt, struct omp_for_data *fd,
151 struct omp_for_data_loop *loops)
152 {
153 tree t, var, *collapse_iter, *collapse_count;
154 tree count = NULL_TREE, iter_type = long_integer_type_node;
155 struct omp_for_data_loop *loop;
156 int i;
157 struct omp_for_data_loop dummy_loop;
158 location_t loc = gimple_location (for_stmt);
159 bool simd = gimple_omp_for_kind (for_stmt) & GF_OMP_FOR_SIMD;
160 bool distribute = gimple_omp_for_kind (for_stmt)
161 == GF_OMP_FOR_KIND_DISTRIBUTE;
162 bool taskloop = gimple_omp_for_kind (for_stmt)
163 == GF_OMP_FOR_KIND_TASKLOOP;
164 tree iterv, countv;
165
166 fd->for_stmt = for_stmt;
167 fd->pre = NULL;
168 fd->have_nowait = distribute || simd;
169 fd->have_ordered = false;
170 fd->have_reductemp = false;
171 fd->tiling = NULL_TREE;
172 fd->collapse = 1;
173 fd->ordered = 0;
174 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
175 fd->sched_modifiers = 0;
176 fd->chunk_size = NULL_TREE;
177 fd->simd_schedule = false;
178 collapse_iter = NULL;
179 collapse_count = NULL;
180
181 for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
182 switch (OMP_CLAUSE_CODE (t))
183 {
184 case OMP_CLAUSE_NOWAIT:
185 fd->have_nowait = true;
186 break;
187 case OMP_CLAUSE_ORDERED:
188 fd->have_ordered = true;
189 if (OMP_CLAUSE_ORDERED_EXPR (t))
190 fd->ordered = tree_to_shwi (OMP_CLAUSE_ORDERED_EXPR (t));
191 break;
192 case OMP_CLAUSE_SCHEDULE:
193 gcc_assert (!distribute && !taskloop);
194 fd->sched_kind
195 = (enum omp_clause_schedule_kind)
196 (OMP_CLAUSE_SCHEDULE_KIND (t) & OMP_CLAUSE_SCHEDULE_MASK);
197 fd->sched_modifiers = (OMP_CLAUSE_SCHEDULE_KIND (t)
198 & ~OMP_CLAUSE_SCHEDULE_MASK);
199 fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
200 fd->simd_schedule = OMP_CLAUSE_SCHEDULE_SIMD (t);
201 break;
202 case OMP_CLAUSE_DIST_SCHEDULE:
203 gcc_assert (distribute);
204 fd->chunk_size = OMP_CLAUSE_DIST_SCHEDULE_CHUNK_EXPR (t);
205 break;
206 case OMP_CLAUSE_COLLAPSE:
207 fd->collapse = tree_to_shwi (OMP_CLAUSE_COLLAPSE_EXPR (t));
208 if (fd->collapse > 1)
209 {
210 collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
211 collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
212 }
213 break;
214 case OMP_CLAUSE_TILE:
215 fd->tiling = OMP_CLAUSE_TILE_LIST (t);
216 fd->collapse = list_length (fd->tiling);
217 gcc_assert (fd->collapse);
218 collapse_iter = &OMP_CLAUSE_TILE_ITERVAR (t);
219 collapse_count = &OMP_CLAUSE_TILE_COUNT (t);
220 break;
221 case OMP_CLAUSE__REDUCTEMP_:
222 fd->have_reductemp = true;
223 default:
224 break;
225 }
226
227 if (fd->collapse > 1 || fd->tiling)
228 fd->loops = loops;
229 else
230 fd->loops = &fd->loop;
231
232 if (fd->ordered && fd->collapse == 1 && loops != NULL)
233 {
234 fd->loops = loops;
235 iterv = NULL_TREE;
236 countv = NULL_TREE;
237 collapse_iter = &iterv;
238 collapse_count = &countv;
239 }
240
241 /* FIXME: for now map schedule(auto) to schedule(static).
242 There should be analysis to determine whether all iterations
243 are approximately the same amount of work (then schedule(static)
244 is best) or if it varies (then schedule(dynamic,N) is better). */
245 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
246 {
247 fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
248 gcc_assert (fd->chunk_size == NULL);
249 }
250 gcc_assert ((fd->collapse == 1 && !fd->tiling) || collapse_iter != NULL);
251 if (taskloop)
252 fd->sched_kind = OMP_CLAUSE_SCHEDULE_RUNTIME;
253 if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
254 gcc_assert (fd->chunk_size == NULL);
255 else if (fd->chunk_size == NULL)
256 {
257 /* We only need to compute a default chunk size for ordered
258 static loops and dynamic loops. */
259 if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
260 || fd->have_ordered)
261 fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
262 ? integer_zero_node : integer_one_node;
263 }
264
265 int cnt = fd->ordered ? fd->ordered : fd->collapse;
266 for (i = 0; i < cnt; i++)
267 {
268 if (i == 0
269 && fd->collapse == 1
270 && !fd->tiling
271 && (fd->ordered == 0 || loops == NULL))
272 loop = &fd->loop;
273 else if (loops != NULL)
274 loop = loops + i;
275 else
276 loop = &dummy_loop;
277
278 loop->v = gimple_omp_for_index (for_stmt, i);
279 gcc_assert (SSA_VAR_P (loop->v));
280 gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
281 || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
282 var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
283 loop->n1 = gimple_omp_for_initial (for_stmt, i);
284
285 loop->cond_code = gimple_omp_for_cond (for_stmt, i);
286 loop->n2 = gimple_omp_for_final (for_stmt, i);
287 gcc_assert (loop->cond_code != NE_EXPR
288 || (gimple_omp_for_kind (for_stmt)
289 != GF_OMP_FOR_KIND_OACC_LOOP));
290
291 t = gimple_omp_for_incr (for_stmt, i);
292 gcc_assert (TREE_OPERAND (t, 0) == var);
293 loop->step = omp_get_for_step_from_incr (loc, t);
294
295 omp_adjust_for_condition (loc, &loop->cond_code, &loop->n2, loop->v,
296 loop->step);
297
298 if (simd
299 || (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
300 && !fd->have_ordered))
301 {
302 if (fd->collapse == 1 && !fd->tiling)
303 iter_type = TREE_TYPE (loop->v);
304 else if (i == 0
305 || TYPE_PRECISION (iter_type)
306 < TYPE_PRECISION (TREE_TYPE (loop->v)))
307 iter_type
308 = build_nonstandard_integer_type
309 (TYPE_PRECISION (TREE_TYPE (loop->v)), 1);
310 }
311 else if (iter_type != long_long_unsigned_type_node)
312 {
313 if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
314 iter_type = long_long_unsigned_type_node;
315 else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
316 && TYPE_PRECISION (TREE_TYPE (loop->v))
317 >= TYPE_PRECISION (iter_type))
318 {
319 tree n;
320
321 if (loop->cond_code == LT_EXPR)
322 n = fold_build2_loc (loc, PLUS_EXPR, TREE_TYPE (loop->v),
323 loop->n2, loop->step);
324 else
325 n = loop->n1;
326 if (TREE_CODE (n) != INTEGER_CST
327 || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
328 iter_type = long_long_unsigned_type_node;
329 }
330 else if (TYPE_PRECISION (TREE_TYPE (loop->v))
331 > TYPE_PRECISION (iter_type))
332 {
333 tree n1, n2;
334
335 if (loop->cond_code == LT_EXPR)
336 {
337 n1 = loop->n1;
338 n2 = fold_build2_loc (loc, PLUS_EXPR, TREE_TYPE (loop->v),
339 loop->n2, loop->step);
340 }
341 else
342 {
343 n1 = fold_build2_loc (loc, MINUS_EXPR, TREE_TYPE (loop->v),
344 loop->n2, loop->step);
345 n2 = loop->n1;
346 }
347 if (TREE_CODE (n1) != INTEGER_CST
348 || TREE_CODE (n2) != INTEGER_CST
349 || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
350 || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
351 iter_type = long_long_unsigned_type_node;
352 }
353 }
354
355 if (i >= fd->collapse)
356 continue;
357
358 if (collapse_count && *collapse_count == NULL)
359 {
360 t = fold_binary (loop->cond_code, boolean_type_node,
361 fold_convert (TREE_TYPE (loop->v), loop->n1),
362 fold_convert (TREE_TYPE (loop->v), loop->n2));
363 if (t && integer_zerop (t))
364 count = build_zero_cst (long_long_unsigned_type_node);
365 else if ((i == 0 || count != NULL_TREE)
366 && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
367 && TREE_CONSTANT (loop->n1)
368 && TREE_CONSTANT (loop->n2)
369 && TREE_CODE (loop->step) == INTEGER_CST)
370 {
371 tree itype = TREE_TYPE (loop->v);
372
373 if (POINTER_TYPE_P (itype))
374 itype = signed_type_for (itype);
375 t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
376 t = fold_build2_loc (loc, PLUS_EXPR, itype,
377 fold_convert_loc (loc, itype, loop->step),
378 t);
379 t = fold_build2_loc (loc, PLUS_EXPR, itype, t,
380 fold_convert_loc (loc, itype, loop->n2));
381 t = fold_build2_loc (loc, MINUS_EXPR, itype, t,
382 fold_convert_loc (loc, itype, loop->n1));
383 if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
384 {
385 tree step = fold_convert_loc (loc, itype, loop->step);
386 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype,
387 fold_build1_loc (loc, NEGATE_EXPR,
388 itype, t),
389 fold_build1_loc (loc, NEGATE_EXPR,
390 itype, step));
391 }
392 else
393 t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, t,
394 fold_convert_loc (loc, itype,
395 loop->step));
396 t = fold_convert_loc (loc, long_long_unsigned_type_node, t);
397 if (count != NULL_TREE)
398 count = fold_build2_loc (loc, MULT_EXPR,
399 long_long_unsigned_type_node,
400 count, t);
401 else
402 count = t;
403 if (TREE_CODE (count) != INTEGER_CST)
404 count = NULL_TREE;
405 }
406 else if (count && !integer_zerop (count))
407 count = NULL_TREE;
408 }
409 }
410
411 if (count
412 && !simd
413 && (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
414 || fd->have_ordered))
415 {
416 if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
417 iter_type = long_long_unsigned_type_node;
418 else
419 iter_type = long_integer_type_node;
420 }
421 else if (collapse_iter && *collapse_iter != NULL)
422 iter_type = TREE_TYPE (*collapse_iter);
423 fd->iter_type = iter_type;
424 if (collapse_iter && *collapse_iter == NULL)
425 *collapse_iter = create_tmp_var (iter_type, ".iter");
426 if (collapse_count && *collapse_count == NULL)
427 {
428 if (count)
429 *collapse_count = fold_convert_loc (loc, iter_type, count);
430 else
431 *collapse_count = create_tmp_var (iter_type, ".count");
432 }
433
434 if (fd->collapse > 1 || fd->tiling || (fd->ordered && loops))
435 {
436 fd->loop.v = *collapse_iter;
437 fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
438 fd->loop.n2 = *collapse_count;
439 fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
440 fd->loop.cond_code = LT_EXPR;
441 }
442 else if (loops)
443 loops[0] = fd->loop;
444 }
445
446 /* Build a call to GOMP_barrier. */
447
448 gimple *
449 omp_build_barrier (tree lhs)
450 {
451 tree fndecl = builtin_decl_explicit (lhs ? BUILT_IN_GOMP_BARRIER_CANCEL
452 : BUILT_IN_GOMP_BARRIER);
453 gcall *g = gimple_build_call (fndecl, 0);
454 if (lhs)
455 gimple_call_set_lhs (g, lhs);
456 return g;
457 }
458
459 /* Return maximum possible vectorization factor for the target. */
460
461 poly_uint64
462 omp_max_vf (void)
463 {
464 if (!optimize
465 || optimize_debug
466 || !flag_tree_loop_optimize
467 || (!flag_tree_loop_vectorize
468 && global_options_set.x_flag_tree_loop_vectorize))
469 return 1;
470
471 auto_vector_sizes sizes;
472 targetm.vectorize.autovectorize_vector_sizes (&sizes);
473 if (!sizes.is_empty ())
474 {
475 poly_uint64 vf = 0;
476 for (unsigned int i = 0; i < sizes.length (); ++i)
477 vf = ordered_max (vf, sizes[i]);
478 return vf;
479 }
480
481 machine_mode vqimode = targetm.vectorize.preferred_simd_mode (QImode);
482 if (GET_MODE_CLASS (vqimode) == MODE_VECTOR_INT)
483 return GET_MODE_NUNITS (vqimode);
484
485 return 1;
486 }
487
488 /* Return maximum SIMT width if offloading may target SIMT hardware. */
489
490 int
491 omp_max_simt_vf (void)
492 {
493 if (!optimize)
494 return 0;
495 if (ENABLE_OFFLOADING)
496 for (const char *c = getenv ("OFFLOAD_TARGET_NAMES"); c;)
497 {
498 if (!strncmp (c, "nvptx", strlen ("nvptx")))
499 return 32;
500 else if ((c = strchr (c, ',')))
501 c++;
502 }
503 return 0;
504 }
505
506 /* Encode an oacc launch argument. This matches the GOMP_LAUNCH_PACK
507 macro on gomp-constants.h. We do not check for overflow. */
508
509 tree
510 oacc_launch_pack (unsigned code, tree device, unsigned op)
511 {
512 tree res;
513
514 res = build_int_cst (unsigned_type_node, GOMP_LAUNCH_PACK (code, 0, op));
515 if (device)
516 {
517 device = fold_build2 (LSHIFT_EXPR, unsigned_type_node,
518 device, build_int_cst (unsigned_type_node,
519 GOMP_LAUNCH_DEVICE_SHIFT));
520 res = fold_build2 (BIT_IOR_EXPR, unsigned_type_node, res, device);
521 }
522 return res;
523 }
524
525 /* FIXME: What is the following comment for? */
526 /* Look for compute grid dimension clauses and convert to an attribute
527 attached to FN. This permits the target-side code to (a) massage
528 the dimensions, (b) emit that data and (c) optimize. Non-constant
529 dimensions are pushed onto ARGS.
530
531 The attribute value is a TREE_LIST. A set of dimensions is
532 represented as a list of INTEGER_CST. Those that are runtime
533 exprs are represented as an INTEGER_CST of zero.
534
535 TODO: Normally the attribute will just contain a single such list. If
536 however it contains a list of lists, this will represent the use of
537 device_type. Each member of the outer list is an assoc list of
538 dimensions, keyed by the device type. The first entry will be the
539 default. Well, that's the plan. */
540
541 /* Replace any existing oacc fn attribute with updated dimensions. */
542
543 void
544 oacc_replace_fn_attrib (tree fn, tree dims)
545 {
546 tree ident = get_identifier (OACC_FN_ATTRIB);
547 tree attribs = DECL_ATTRIBUTES (fn);
548
549 /* If we happen to be present as the first attrib, drop it. */
550 if (attribs && TREE_PURPOSE (attribs) == ident)
551 attribs = TREE_CHAIN (attribs);
552 DECL_ATTRIBUTES (fn) = tree_cons (ident, dims, attribs);
553 }
554
555 /* Scan CLAUSES for launch dimensions and attach them to the oacc
556 function attribute. Push any that are non-constant onto the ARGS
557 list, along with an appropriate GOMP_LAUNCH_DIM tag. */
558
559 void
560 oacc_set_fn_attrib (tree fn, tree clauses, vec<tree> *args)
561 {
562 /* Must match GOMP_DIM ordering. */
563 static const omp_clause_code ids[]
564 = { OMP_CLAUSE_NUM_GANGS, OMP_CLAUSE_NUM_WORKERS,
565 OMP_CLAUSE_VECTOR_LENGTH };
566 unsigned ix;
567 tree dims[GOMP_DIM_MAX];
568
569 tree attr = NULL_TREE;
570 unsigned non_const = 0;
571
572 for (ix = GOMP_DIM_MAX; ix--;)
573 {
574 tree clause = omp_find_clause (clauses, ids[ix]);
575 tree dim = NULL_TREE;
576
577 if (clause)
578 dim = OMP_CLAUSE_EXPR (clause, ids[ix]);
579 dims[ix] = dim;
580 if (dim && TREE_CODE (dim) != INTEGER_CST)
581 {
582 dim = integer_zero_node;
583 non_const |= GOMP_DIM_MASK (ix);
584 }
585 attr = tree_cons (NULL_TREE, dim, attr);
586 }
587
588 oacc_replace_fn_attrib (fn, attr);
589
590 if (non_const)
591 {
592 /* Push a dynamic argument set. */
593 args->safe_push (oacc_launch_pack (GOMP_LAUNCH_DIM,
594 NULL_TREE, non_const));
595 for (unsigned ix = 0; ix != GOMP_DIM_MAX; ix++)
596 if (non_const & GOMP_DIM_MASK (ix))
597 args->safe_push (dims[ix]);
598 }
599 }
600
601 /* Process the routine's dimension clauess to generate an attribute
602 value. Issue diagnostics as appropriate. We default to SEQ
603 (OpenACC 2.5 clarifies this). All dimensions have a size of zero
604 (dynamic). TREE_PURPOSE is set to indicate whether that dimension
605 can have a loop partitioned on it. non-zero indicates
606 yes, zero indicates no. By construction once a non-zero has been
607 reached, further inner dimensions must also be non-zero. We set
608 TREE_VALUE to zero for the dimensions that may be partitioned and
609 1 for the other ones -- if a loop is (erroneously) spawned at
610 an outer level, we don't want to try and partition it. */
611
612 tree
613 oacc_build_routine_dims (tree clauses)
614 {
615 /* Must match GOMP_DIM ordering. */
616 static const omp_clause_code ids[]
617 = {OMP_CLAUSE_GANG, OMP_CLAUSE_WORKER, OMP_CLAUSE_VECTOR, OMP_CLAUSE_SEQ};
618 int ix;
619 int level = -1;
620
621 for (; clauses; clauses = OMP_CLAUSE_CHAIN (clauses))
622 for (ix = GOMP_DIM_MAX + 1; ix--;)
623 if (OMP_CLAUSE_CODE (clauses) == ids[ix])
624 {
625 if (level >= 0)
626 error_at (OMP_CLAUSE_LOCATION (clauses),
627 "multiple loop axes specified for routine");
628 level = ix;
629 break;
630 }
631
632 /* Default to SEQ. */
633 if (level < 0)
634 level = GOMP_DIM_MAX;
635
636 tree dims = NULL_TREE;
637
638 for (ix = GOMP_DIM_MAX; ix--;)
639 dims = tree_cons (build_int_cst (boolean_type_node, ix >= level),
640 build_int_cst (integer_type_node, ix < level), dims);
641
642 return dims;
643 }
644
645 /* Retrieve the oacc function attrib and return it. Non-oacc
646 functions will return NULL. */
647
648 tree
649 oacc_get_fn_attrib (tree fn)
650 {
651 return lookup_attribute (OACC_FN_ATTRIB, DECL_ATTRIBUTES (fn));
652 }
653
654 /* Return true if FN is an OpenMP or OpenACC offloading function. */
655
656 bool
657 offloading_function_p (tree fn)
658 {
659 tree attrs = DECL_ATTRIBUTES (fn);
660 return (lookup_attribute ("omp declare target", attrs)
661 || lookup_attribute ("omp target entrypoint", attrs));
662 }
663
664 /* Extract an oacc execution dimension from FN. FN must be an
665 offloaded function or routine that has already had its execution
666 dimensions lowered to the target-specific values. */
667
668 int
669 oacc_get_fn_dim_size (tree fn, int axis)
670 {
671 tree attrs = oacc_get_fn_attrib (fn);
672
673 gcc_assert (axis < GOMP_DIM_MAX);
674
675 tree dims = TREE_VALUE (attrs);
676 while (axis--)
677 dims = TREE_CHAIN (dims);
678
679 int size = TREE_INT_CST_LOW (TREE_VALUE (dims));
680
681 return size;
682 }
683
684 /* Extract the dimension axis from an IFN_GOACC_DIM_POS or
685 IFN_GOACC_DIM_SIZE call. */
686
687 int
688 oacc_get_ifn_dim_arg (const gimple *stmt)
689 {
690 gcc_checking_assert (gimple_call_internal_fn (stmt) == IFN_GOACC_DIM_SIZE
691 || gimple_call_internal_fn (stmt) == IFN_GOACC_DIM_POS);
692 tree arg = gimple_call_arg (stmt, 0);
693 HOST_WIDE_INT axis = TREE_INT_CST_LOW (arg);
694
695 gcc_checking_assert (axis >= 0 && axis < GOMP_DIM_MAX);
696 return (int) axis;
697 }