b8516afc5346cc4bb00df423595ad8f98a994cc9
[gcc.git] / gcc / fortran / trans-array.c
1 /* Array translation routines
2 Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
3 2011, 2012
4 Free Software Foundation, Inc.
5 Contributed by Paul Brook <paul@nowt.org>
6 and Steven Bosscher <s.bosscher@student.tudelft.nl>
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
14
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
23
24 /* trans-array.c-- Various array related code, including scalarization,
25 allocation, initialization and other support routines. */
26
27 /* How the scalarizer works.
28 In gfortran, array expressions use the same core routines as scalar
29 expressions.
30 First, a Scalarization State (SS) chain is built. This is done by walking
31 the expression tree, and building a linear list of the terms in the
32 expression. As the tree is walked, scalar subexpressions are translated.
33
34 The scalarization parameters are stored in a gfc_loopinfo structure.
35 First the start and stride of each term is calculated by
36 gfc_conv_ss_startstride. During this process the expressions for the array
37 descriptors and data pointers are also translated.
38
39 If the expression is an assignment, we must then resolve any dependencies.
40 In fortran all the rhs values of an assignment must be evaluated before
41 any assignments take place. This can require a temporary array to store the
42 values. We also require a temporary when we are passing array expressions
43 or vector subscripts as procedure parameters.
44
45 Array sections are passed without copying to a temporary. These use the
46 scalarizer to determine the shape of the section. The flag
47 loop->array_parameter tells the scalarizer that the actual values and loop
48 variables will not be required.
49
50 The function gfc_conv_loop_setup generates the scalarization setup code.
51 It determines the range of the scalarizing loop variables. If a temporary
52 is required, this is created and initialized. Code for scalar expressions
53 taken outside the loop is also generated at this time. Next the offset and
54 scaling required to translate from loop variables to array indices for each
55 term is calculated.
56
57 A call to gfc_start_scalarized_body marks the start of the scalarized
58 expression. This creates a scope and declares the loop variables. Before
59 calling this gfc_make_ss_chain_used must be used to indicate which terms
60 will be used inside this loop.
61
62 The scalar gfc_conv_* functions are then used to build the main body of the
63 scalarization loop. Scalarization loop variables and precalculated scalar
64 values are automatically substituted. Note that gfc_advance_se_ss_chain
65 must be used, rather than changing the se->ss directly.
66
67 For assignment expressions requiring a temporary two sub loops are
68 generated. The first stores the result of the expression in the temporary,
69 the second copies it to the result. A call to
70 gfc_trans_scalarized_loop_boundary marks the end of the main loop code and
71 the start of the copying loop. The temporary may be less than full rank.
72
73 Finally gfc_trans_scalarizing_loops is called to generate the implicit do
74 loops. The loops are added to the pre chain of the loopinfo. The post
75 chain may still contain cleanup code.
76
77 After the loop code has been added into its parent scope gfc_cleanup_loop
78 is called to free all the SS allocated by the scalarizer. */
79
80 #include "config.h"
81 #include "system.h"
82 #include "coretypes.h"
83 #include "tree.h"
84 #include "gimple.h"
85 #include "diagnostic-core.h" /* For internal_error/fatal_error. */
86 #include "flags.h"
87 #include "gfortran.h"
88 #include "constructor.h"
89 #include "trans.h"
90 #include "trans-stmt.h"
91 #include "trans-types.h"
92 #include "trans-array.h"
93 #include "trans-const.h"
94 #include "dependency.h"
95
96 static bool gfc_get_array_constructor_size (mpz_t *, gfc_constructor_base);
97
98 /* The contents of this structure aren't actually used, just the address. */
99 static gfc_ss gfc_ss_terminator_var;
100 gfc_ss * const gfc_ss_terminator = &gfc_ss_terminator_var;
101
102
103 static tree
104 gfc_array_dataptr_type (tree desc)
105 {
106 return (GFC_TYPE_ARRAY_DATAPTR_TYPE (TREE_TYPE (desc)));
107 }
108
109
110 /* Build expressions to access the members of an array descriptor.
111 It's surprisingly easy to mess up here, so never access
112 an array descriptor by "brute force", always use these
113 functions. This also avoids problems if we change the format
114 of an array descriptor.
115
116 To understand these magic numbers, look at the comments
117 before gfc_build_array_type() in trans-types.c.
118
119 The code within these defines should be the only code which knows the format
120 of an array descriptor.
121
122 Any code just needing to read obtain the bounds of an array should use
123 gfc_conv_array_* rather than the following functions as these will return
124 know constant values, and work with arrays which do not have descriptors.
125
126 Don't forget to #undef these! */
127
128 #define DATA_FIELD 0
129 #define OFFSET_FIELD 1
130 #define DTYPE_FIELD 2
131 #define DIMENSION_FIELD 3
132 #define CAF_TOKEN_FIELD 4
133
134 #define STRIDE_SUBFIELD 0
135 #define LBOUND_SUBFIELD 1
136 #define UBOUND_SUBFIELD 2
137
138 /* This provides READ-ONLY access to the data field. The field itself
139 doesn't have the proper type. */
140
141 tree
142 gfc_conv_descriptor_data_get (tree desc)
143 {
144 tree field, type, t;
145
146 type = TREE_TYPE (desc);
147 gcc_assert (GFC_DESCRIPTOR_TYPE_P (type));
148
149 field = TYPE_FIELDS (type);
150 gcc_assert (DATA_FIELD == 0);
151
152 t = fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field), desc,
153 field, NULL_TREE);
154 t = fold_convert (GFC_TYPE_ARRAY_DATAPTR_TYPE (type), t);
155
156 return t;
157 }
158
159 /* This provides WRITE access to the data field.
160
161 TUPLES_P is true if we are generating tuples.
162
163 This function gets called through the following macros:
164 gfc_conv_descriptor_data_set
165 gfc_conv_descriptor_data_set. */
166
167 void
168 gfc_conv_descriptor_data_set (stmtblock_t *block, tree desc, tree value)
169 {
170 tree field, type, t;
171
172 type = TREE_TYPE (desc);
173 gcc_assert (GFC_DESCRIPTOR_TYPE_P (type));
174
175 field = TYPE_FIELDS (type);
176 gcc_assert (DATA_FIELD == 0);
177
178 t = fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field), desc,
179 field, NULL_TREE);
180 gfc_add_modify (block, t, fold_convert (TREE_TYPE (field), value));
181 }
182
183
184 /* This provides address access to the data field. This should only be
185 used by array allocation, passing this on to the runtime. */
186
187 tree
188 gfc_conv_descriptor_data_addr (tree desc)
189 {
190 tree field, type, t;
191
192 type = TREE_TYPE (desc);
193 gcc_assert (GFC_DESCRIPTOR_TYPE_P (type));
194
195 field = TYPE_FIELDS (type);
196 gcc_assert (DATA_FIELD == 0);
197
198 t = fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field), desc,
199 field, NULL_TREE);
200 return gfc_build_addr_expr (NULL_TREE, t);
201 }
202
203 static tree
204 gfc_conv_descriptor_offset (tree desc)
205 {
206 tree type;
207 tree field;
208
209 type = TREE_TYPE (desc);
210 gcc_assert (GFC_DESCRIPTOR_TYPE_P (type));
211
212 field = gfc_advance_chain (TYPE_FIELDS (type), OFFSET_FIELD);
213 gcc_assert (field != NULL_TREE && TREE_TYPE (field) == gfc_array_index_type);
214
215 return fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field),
216 desc, field, NULL_TREE);
217 }
218
219 tree
220 gfc_conv_descriptor_offset_get (tree desc)
221 {
222 return gfc_conv_descriptor_offset (desc);
223 }
224
225 void
226 gfc_conv_descriptor_offset_set (stmtblock_t *block, tree desc,
227 tree value)
228 {
229 tree t = gfc_conv_descriptor_offset (desc);
230 gfc_add_modify (block, t, fold_convert (TREE_TYPE (t), value));
231 }
232
233
234 tree
235 gfc_conv_descriptor_dtype (tree desc)
236 {
237 tree field;
238 tree type;
239
240 type = TREE_TYPE (desc);
241 gcc_assert (GFC_DESCRIPTOR_TYPE_P (type));
242
243 field = gfc_advance_chain (TYPE_FIELDS (type), DTYPE_FIELD);
244 gcc_assert (field != NULL_TREE && TREE_TYPE (field) == gfc_array_index_type);
245
246 return fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field),
247 desc, field, NULL_TREE);
248 }
249
250 static tree
251 gfc_conv_descriptor_dimension (tree desc, tree dim)
252 {
253 tree field;
254 tree type;
255 tree tmp;
256
257 type = TREE_TYPE (desc);
258 gcc_assert (GFC_DESCRIPTOR_TYPE_P (type));
259
260 field = gfc_advance_chain (TYPE_FIELDS (type), DIMENSION_FIELD);
261 gcc_assert (field != NULL_TREE
262 && TREE_CODE (TREE_TYPE (field)) == ARRAY_TYPE
263 && TREE_CODE (TREE_TYPE (TREE_TYPE (field))) == RECORD_TYPE);
264
265 tmp = fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field),
266 desc, field, NULL_TREE);
267 tmp = gfc_build_array_ref (tmp, dim, NULL);
268 return tmp;
269 }
270
271
272 tree
273 gfc_conv_descriptor_token (tree desc)
274 {
275 tree type;
276 tree field;
277
278 type = TREE_TYPE (desc);
279 gcc_assert (GFC_DESCRIPTOR_TYPE_P (type));
280 gcc_assert (GFC_TYPE_ARRAY_AKIND (type) == GFC_ARRAY_ALLOCATABLE);
281 gcc_assert (gfc_option.coarray == GFC_FCOARRAY_LIB);
282 field = gfc_advance_chain (TYPE_FIELDS (type), CAF_TOKEN_FIELD);
283 gcc_assert (field != NULL_TREE && TREE_TYPE (field) == prvoid_type_node);
284
285 return fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field),
286 desc, field, NULL_TREE);
287 }
288
289
290 static tree
291 gfc_conv_descriptor_stride (tree desc, tree dim)
292 {
293 tree tmp;
294 tree field;
295
296 tmp = gfc_conv_descriptor_dimension (desc, dim);
297 field = TYPE_FIELDS (TREE_TYPE (tmp));
298 field = gfc_advance_chain (field, STRIDE_SUBFIELD);
299 gcc_assert (field != NULL_TREE && TREE_TYPE (field) == gfc_array_index_type);
300
301 tmp = fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field),
302 tmp, field, NULL_TREE);
303 return tmp;
304 }
305
306 tree
307 gfc_conv_descriptor_stride_get (tree desc, tree dim)
308 {
309 tree type = TREE_TYPE (desc);
310 gcc_assert (GFC_DESCRIPTOR_TYPE_P (type));
311 if (integer_zerop (dim)
312 && (GFC_TYPE_ARRAY_AKIND (type) == GFC_ARRAY_ALLOCATABLE
313 ||GFC_TYPE_ARRAY_AKIND (type) == GFC_ARRAY_ASSUMED_SHAPE_CONT
314 ||GFC_TYPE_ARRAY_AKIND (type) == GFC_ARRAY_POINTER_CONT))
315 return gfc_index_one_node;
316
317 return gfc_conv_descriptor_stride (desc, dim);
318 }
319
320 void
321 gfc_conv_descriptor_stride_set (stmtblock_t *block, tree desc,
322 tree dim, tree value)
323 {
324 tree t = gfc_conv_descriptor_stride (desc, dim);
325 gfc_add_modify (block, t, fold_convert (TREE_TYPE (t), value));
326 }
327
328 static tree
329 gfc_conv_descriptor_lbound (tree desc, tree dim)
330 {
331 tree tmp;
332 tree field;
333
334 tmp = gfc_conv_descriptor_dimension (desc, dim);
335 field = TYPE_FIELDS (TREE_TYPE (tmp));
336 field = gfc_advance_chain (field, LBOUND_SUBFIELD);
337 gcc_assert (field != NULL_TREE && TREE_TYPE (field) == gfc_array_index_type);
338
339 tmp = fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field),
340 tmp, field, NULL_TREE);
341 return tmp;
342 }
343
344 tree
345 gfc_conv_descriptor_lbound_get (tree desc, tree dim)
346 {
347 return gfc_conv_descriptor_lbound (desc, dim);
348 }
349
350 void
351 gfc_conv_descriptor_lbound_set (stmtblock_t *block, tree desc,
352 tree dim, tree value)
353 {
354 tree t = gfc_conv_descriptor_lbound (desc, dim);
355 gfc_add_modify (block, t, fold_convert (TREE_TYPE (t), value));
356 }
357
358 static tree
359 gfc_conv_descriptor_ubound (tree desc, tree dim)
360 {
361 tree tmp;
362 tree field;
363
364 tmp = gfc_conv_descriptor_dimension (desc, dim);
365 field = TYPE_FIELDS (TREE_TYPE (tmp));
366 field = gfc_advance_chain (field, UBOUND_SUBFIELD);
367 gcc_assert (field != NULL_TREE && TREE_TYPE (field) == gfc_array_index_type);
368
369 tmp = fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field),
370 tmp, field, NULL_TREE);
371 return tmp;
372 }
373
374 tree
375 gfc_conv_descriptor_ubound_get (tree desc, tree dim)
376 {
377 return gfc_conv_descriptor_ubound (desc, dim);
378 }
379
380 void
381 gfc_conv_descriptor_ubound_set (stmtblock_t *block, tree desc,
382 tree dim, tree value)
383 {
384 tree t = gfc_conv_descriptor_ubound (desc, dim);
385 gfc_add_modify (block, t, fold_convert (TREE_TYPE (t), value));
386 }
387
388 /* Build a null array descriptor constructor. */
389
390 tree
391 gfc_build_null_descriptor (tree type)
392 {
393 tree field;
394 tree tmp;
395
396 gcc_assert (GFC_DESCRIPTOR_TYPE_P (type));
397 gcc_assert (DATA_FIELD == 0);
398 field = TYPE_FIELDS (type);
399
400 /* Set a NULL data pointer. */
401 tmp = build_constructor_single (type, field, null_pointer_node);
402 TREE_CONSTANT (tmp) = 1;
403 /* All other fields are ignored. */
404
405 return tmp;
406 }
407
408
409 /* Modify a descriptor such that the lbound of a given dimension is the value
410 specified. This also updates ubound and offset accordingly. */
411
412 void
413 gfc_conv_shift_descriptor_lbound (stmtblock_t* block, tree desc,
414 int dim, tree new_lbound)
415 {
416 tree offs, ubound, lbound, stride;
417 tree diff, offs_diff;
418
419 new_lbound = fold_convert (gfc_array_index_type, new_lbound);
420
421 offs = gfc_conv_descriptor_offset_get (desc);
422 lbound = gfc_conv_descriptor_lbound_get (desc, gfc_rank_cst[dim]);
423 ubound = gfc_conv_descriptor_ubound_get (desc, gfc_rank_cst[dim]);
424 stride = gfc_conv_descriptor_stride_get (desc, gfc_rank_cst[dim]);
425
426 /* Get difference (new - old) by which to shift stuff. */
427 diff = fold_build2_loc (input_location, MINUS_EXPR, gfc_array_index_type,
428 new_lbound, lbound);
429
430 /* Shift ubound and offset accordingly. This has to be done before
431 updating the lbound, as they depend on the lbound expression! */
432 ubound = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
433 ubound, diff);
434 gfc_conv_descriptor_ubound_set (block, desc, gfc_rank_cst[dim], ubound);
435 offs_diff = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
436 diff, stride);
437 offs = fold_build2_loc (input_location, MINUS_EXPR, gfc_array_index_type,
438 offs, offs_diff);
439 gfc_conv_descriptor_offset_set (block, desc, offs);
440
441 /* Finally set lbound to value we want. */
442 gfc_conv_descriptor_lbound_set (block, desc, gfc_rank_cst[dim], new_lbound);
443 }
444
445
446 /* Cleanup those #defines. */
447
448 #undef DATA_FIELD
449 #undef OFFSET_FIELD
450 #undef DTYPE_FIELD
451 #undef DIMENSION_FIELD
452 #undef CAF_TOKEN_FIELD
453 #undef STRIDE_SUBFIELD
454 #undef LBOUND_SUBFIELD
455 #undef UBOUND_SUBFIELD
456
457
458 /* Mark a SS chain as used. Flags specifies in which loops the SS is used.
459 flags & 1 = Main loop body.
460 flags & 2 = temp copy loop. */
461
462 void
463 gfc_mark_ss_chain_used (gfc_ss * ss, unsigned flags)
464 {
465 for (; ss != gfc_ss_terminator; ss = ss->next)
466 ss->info->useflags = flags;
467 }
468
469
470 /* Free a gfc_ss chain. */
471
472 void
473 gfc_free_ss_chain (gfc_ss * ss)
474 {
475 gfc_ss *next;
476
477 while (ss != gfc_ss_terminator)
478 {
479 gcc_assert (ss != NULL);
480 next = ss->next;
481 gfc_free_ss (ss);
482 ss = next;
483 }
484 }
485
486
487 static void
488 free_ss_info (gfc_ss_info *ss_info)
489 {
490 ss_info->refcount--;
491 if (ss_info->refcount > 0)
492 return;
493
494 gcc_assert (ss_info->refcount == 0);
495 free (ss_info);
496 }
497
498
499 /* Free a SS. */
500
501 void
502 gfc_free_ss (gfc_ss * ss)
503 {
504 gfc_ss_info *ss_info;
505 int n;
506
507 ss_info = ss->info;
508
509 switch (ss_info->type)
510 {
511 case GFC_SS_SECTION:
512 for (n = 0; n < ss->dimen; n++)
513 {
514 if (ss_info->data.array.subscript[ss->dim[n]])
515 gfc_free_ss_chain (ss_info->data.array.subscript[ss->dim[n]]);
516 }
517 break;
518
519 default:
520 break;
521 }
522
523 free_ss_info (ss_info);
524 free (ss);
525 }
526
527
528 /* Creates and initializes an array type gfc_ss struct. */
529
530 gfc_ss *
531 gfc_get_array_ss (gfc_ss *next, gfc_expr *expr, int dimen, gfc_ss_type type)
532 {
533 gfc_ss *ss;
534 gfc_ss_info *ss_info;
535 int i;
536
537 ss_info = gfc_get_ss_info ();
538 ss_info->refcount++;
539 ss_info->type = type;
540 ss_info->expr = expr;
541
542 ss = gfc_get_ss ();
543 ss->info = ss_info;
544 ss->next = next;
545 ss->dimen = dimen;
546 for (i = 0; i < ss->dimen; i++)
547 ss->dim[i] = i;
548
549 return ss;
550 }
551
552
553 /* Creates and initializes a temporary type gfc_ss struct. */
554
555 gfc_ss *
556 gfc_get_temp_ss (tree type, tree string_length, int dimen)
557 {
558 gfc_ss *ss;
559 gfc_ss_info *ss_info;
560 int i;
561
562 ss_info = gfc_get_ss_info ();
563 ss_info->refcount++;
564 ss_info->type = GFC_SS_TEMP;
565 ss_info->string_length = string_length;
566 ss_info->data.temp.type = type;
567
568 ss = gfc_get_ss ();
569 ss->info = ss_info;
570 ss->next = gfc_ss_terminator;
571 ss->dimen = dimen;
572 for (i = 0; i < ss->dimen; i++)
573 ss->dim[i] = i;
574
575 return ss;
576 }
577
578
579 /* Creates and initializes a scalar type gfc_ss struct. */
580
581 gfc_ss *
582 gfc_get_scalar_ss (gfc_ss *next, gfc_expr *expr)
583 {
584 gfc_ss *ss;
585 gfc_ss_info *ss_info;
586
587 ss_info = gfc_get_ss_info ();
588 ss_info->refcount++;
589 ss_info->type = GFC_SS_SCALAR;
590 ss_info->expr = expr;
591
592 ss = gfc_get_ss ();
593 ss->info = ss_info;
594 ss->next = next;
595
596 return ss;
597 }
598
599
600 /* Free all the SS associated with a loop. */
601
602 void
603 gfc_cleanup_loop (gfc_loopinfo * loop)
604 {
605 gfc_loopinfo *loop_next, **ploop;
606 gfc_ss *ss;
607 gfc_ss *next;
608
609 ss = loop->ss;
610 while (ss != gfc_ss_terminator)
611 {
612 gcc_assert (ss != NULL);
613 next = ss->loop_chain;
614 gfc_free_ss (ss);
615 ss = next;
616 }
617
618 /* Remove reference to self in the parent loop. */
619 if (loop->parent)
620 for (ploop = &loop->parent->nested; *ploop; ploop = &(*ploop)->next)
621 if (*ploop == loop)
622 {
623 *ploop = loop->next;
624 break;
625 }
626
627 /* Free non-freed nested loops. */
628 for (loop = loop->nested; loop; loop = loop_next)
629 {
630 loop_next = loop->next;
631 gfc_cleanup_loop (loop);
632 free (loop);
633 }
634 }
635
636
637 static void
638 set_ss_loop (gfc_ss *ss, gfc_loopinfo *loop)
639 {
640 int n;
641
642 for (; ss != gfc_ss_terminator; ss = ss->next)
643 {
644 ss->loop = loop;
645
646 if (ss->info->type == GFC_SS_SCALAR
647 || ss->info->type == GFC_SS_REFERENCE
648 || ss->info->type == GFC_SS_TEMP)
649 continue;
650
651 for (n = 0; n < GFC_MAX_DIMENSIONS; n++)
652 if (ss->info->data.array.subscript[n] != NULL)
653 set_ss_loop (ss->info->data.array.subscript[n], loop);
654 }
655 }
656
657
658 /* Associate a SS chain with a loop. */
659
660 void
661 gfc_add_ss_to_loop (gfc_loopinfo * loop, gfc_ss * head)
662 {
663 gfc_ss *ss;
664 gfc_loopinfo *nested_loop;
665
666 if (head == gfc_ss_terminator)
667 return;
668
669 set_ss_loop (head, loop);
670
671 ss = head;
672 for (; ss && ss != gfc_ss_terminator; ss = ss->next)
673 {
674 if (ss->nested_ss)
675 {
676 nested_loop = ss->nested_ss->loop;
677
678 /* More than one ss can belong to the same loop. Hence, we add the
679 loop to the chain only if it is different from the previously
680 added one, to avoid duplicate nested loops. */
681 if (nested_loop != loop->nested)
682 {
683 gcc_assert (nested_loop->parent == NULL);
684 nested_loop->parent = loop;
685
686 gcc_assert (nested_loop->next == NULL);
687 nested_loop->next = loop->nested;
688 loop->nested = nested_loop;
689 }
690 else
691 gcc_assert (nested_loop->parent == loop);
692 }
693
694 if (ss->next == gfc_ss_terminator)
695 ss->loop_chain = loop->ss;
696 else
697 ss->loop_chain = ss->next;
698 }
699 gcc_assert (ss == gfc_ss_terminator);
700 loop->ss = head;
701 }
702
703
704 /* Generate an initializer for a static pointer or allocatable array. */
705
706 void
707 gfc_trans_static_array_pointer (gfc_symbol * sym)
708 {
709 tree type;
710
711 gcc_assert (TREE_STATIC (sym->backend_decl));
712 /* Just zero the data member. */
713 type = TREE_TYPE (sym->backend_decl);
714 DECL_INITIAL (sym->backend_decl) = gfc_build_null_descriptor (type);
715 }
716
717
718 /* If the bounds of SE's loop have not yet been set, see if they can be
719 determined from array spec AS, which is the array spec of a called
720 function. MAPPING maps the callee's dummy arguments to the values
721 that the caller is passing. Add any initialization and finalization
722 code to SE. */
723
724 void
725 gfc_set_loop_bounds_from_array_spec (gfc_interface_mapping * mapping,
726 gfc_se * se, gfc_array_spec * as)
727 {
728 int n, dim, total_dim;
729 gfc_se tmpse;
730 gfc_ss *ss;
731 tree lower;
732 tree upper;
733 tree tmp;
734
735 total_dim = 0;
736
737 if (!as || as->type != AS_EXPLICIT)
738 return;
739
740 for (ss = se->ss; ss; ss = ss->parent)
741 {
742 total_dim += ss->loop->dimen;
743 for (n = 0; n < ss->loop->dimen; n++)
744 {
745 /* The bound is known, nothing to do. */
746 if (ss->loop->to[n] != NULL_TREE)
747 continue;
748
749 dim = ss->dim[n];
750 gcc_assert (dim < as->rank);
751 gcc_assert (ss->loop->dimen <= as->rank);
752
753 /* Evaluate the lower bound. */
754 gfc_init_se (&tmpse, NULL);
755 gfc_apply_interface_mapping (mapping, &tmpse, as->lower[dim]);
756 gfc_add_block_to_block (&se->pre, &tmpse.pre);
757 gfc_add_block_to_block (&se->post, &tmpse.post);
758 lower = fold_convert (gfc_array_index_type, tmpse.expr);
759
760 /* ...and the upper bound. */
761 gfc_init_se (&tmpse, NULL);
762 gfc_apply_interface_mapping (mapping, &tmpse, as->upper[dim]);
763 gfc_add_block_to_block (&se->pre, &tmpse.pre);
764 gfc_add_block_to_block (&se->post, &tmpse.post);
765 upper = fold_convert (gfc_array_index_type, tmpse.expr);
766
767 /* Set the upper bound of the loop to UPPER - LOWER. */
768 tmp = fold_build2_loc (input_location, MINUS_EXPR,
769 gfc_array_index_type, upper, lower);
770 tmp = gfc_evaluate_now (tmp, &se->pre);
771 ss->loop->to[n] = tmp;
772 }
773 }
774
775 gcc_assert (total_dim == as->rank);
776 }
777
778
779 /* Generate code to allocate an array temporary, or create a variable to
780 hold the data. If size is NULL, zero the descriptor so that the
781 callee will allocate the array. If DEALLOC is true, also generate code to
782 free the array afterwards.
783
784 If INITIAL is not NULL, it is packed using internal_pack and the result used
785 as data instead of allocating a fresh, unitialized area of memory.
786
787 Initialization code is added to PRE and finalization code to POST.
788 DYNAMIC is true if the caller may want to extend the array later
789 using realloc. This prevents us from putting the array on the stack. */
790
791 static void
792 gfc_trans_allocate_array_storage (stmtblock_t * pre, stmtblock_t * post,
793 gfc_array_info * info, tree size, tree nelem,
794 tree initial, bool dynamic, bool dealloc)
795 {
796 tree tmp;
797 tree desc;
798 bool onstack;
799
800 desc = info->descriptor;
801 info->offset = gfc_index_zero_node;
802 if (size == NULL_TREE || integer_zerop (size))
803 {
804 /* A callee allocated array. */
805 gfc_conv_descriptor_data_set (pre, desc, null_pointer_node);
806 onstack = FALSE;
807 }
808 else
809 {
810 /* Allocate the temporary. */
811 onstack = !dynamic && initial == NULL_TREE
812 && (gfc_option.flag_stack_arrays
813 || gfc_can_put_var_on_stack (size));
814
815 if (onstack)
816 {
817 /* Make a temporary variable to hold the data. */
818 tmp = fold_build2_loc (input_location, MINUS_EXPR, TREE_TYPE (nelem),
819 nelem, gfc_index_one_node);
820 tmp = gfc_evaluate_now (tmp, pre);
821 tmp = build_range_type (gfc_array_index_type, gfc_index_zero_node,
822 tmp);
823 tmp = build_array_type (gfc_get_element_type (TREE_TYPE (desc)),
824 tmp);
825 tmp = gfc_create_var (tmp, "A");
826 /* If we're here only because of -fstack-arrays we have to
827 emit a DECL_EXPR to make the gimplifier emit alloca calls. */
828 if (!gfc_can_put_var_on_stack (size))
829 gfc_add_expr_to_block (pre,
830 fold_build1_loc (input_location,
831 DECL_EXPR, TREE_TYPE (tmp),
832 tmp));
833 tmp = gfc_build_addr_expr (NULL_TREE, tmp);
834 gfc_conv_descriptor_data_set (pre, desc, tmp);
835 }
836 else
837 {
838 /* Allocate memory to hold the data or call internal_pack. */
839 if (initial == NULL_TREE)
840 {
841 tmp = gfc_call_malloc (pre, NULL, size);
842 tmp = gfc_evaluate_now (tmp, pre);
843 }
844 else
845 {
846 tree packed;
847 tree source_data;
848 tree was_packed;
849 stmtblock_t do_copying;
850
851 tmp = TREE_TYPE (initial); /* Pointer to descriptor. */
852 gcc_assert (TREE_CODE (tmp) == POINTER_TYPE);
853 tmp = TREE_TYPE (tmp); /* The descriptor itself. */
854 tmp = gfc_get_element_type (tmp);
855 gcc_assert (tmp == gfc_get_element_type (TREE_TYPE (desc)));
856 packed = gfc_create_var (build_pointer_type (tmp), "data");
857
858 tmp = build_call_expr_loc (input_location,
859 gfor_fndecl_in_pack, 1, initial);
860 tmp = fold_convert (TREE_TYPE (packed), tmp);
861 gfc_add_modify (pre, packed, tmp);
862
863 tmp = build_fold_indirect_ref_loc (input_location,
864 initial);
865 source_data = gfc_conv_descriptor_data_get (tmp);
866
867 /* internal_pack may return source->data without any allocation
868 or copying if it is already packed. If that's the case, we
869 need to allocate and copy manually. */
870
871 gfc_start_block (&do_copying);
872 tmp = gfc_call_malloc (&do_copying, NULL, size);
873 tmp = fold_convert (TREE_TYPE (packed), tmp);
874 gfc_add_modify (&do_copying, packed, tmp);
875 tmp = gfc_build_memcpy_call (packed, source_data, size);
876 gfc_add_expr_to_block (&do_copying, tmp);
877
878 was_packed = fold_build2_loc (input_location, EQ_EXPR,
879 boolean_type_node, packed,
880 source_data);
881 tmp = gfc_finish_block (&do_copying);
882 tmp = build3_v (COND_EXPR, was_packed, tmp,
883 build_empty_stmt (input_location));
884 gfc_add_expr_to_block (pre, tmp);
885
886 tmp = fold_convert (pvoid_type_node, packed);
887 }
888
889 gfc_conv_descriptor_data_set (pre, desc, tmp);
890 }
891 }
892 info->data = gfc_conv_descriptor_data_get (desc);
893
894 /* The offset is zero because we create temporaries with a zero
895 lower bound. */
896 gfc_conv_descriptor_offset_set (pre, desc, gfc_index_zero_node);
897
898 if (dealloc && !onstack)
899 {
900 /* Free the temporary. */
901 tmp = gfc_conv_descriptor_data_get (desc);
902 tmp = gfc_call_free (fold_convert (pvoid_type_node, tmp));
903 gfc_add_expr_to_block (post, tmp);
904 }
905 }
906
907
908 /* Get the scalarizer array dimension corresponding to actual array dimension
909 given by ARRAY_DIM.
910
911 For example, if SS represents the array ref a(1,:,:,1), it is a
912 bidimensional scalarizer array, and the result would be 0 for ARRAY_DIM=1,
913 and 1 for ARRAY_DIM=2.
914 If SS represents transpose(a(:,1,1,:)), it is again a bidimensional
915 scalarizer array, and the result would be 1 for ARRAY_DIM=0 and 0 for
916 ARRAY_DIM=3.
917 If SS represents sum(a(:,:,:,1), dim=1), it is a 2+1-dimensional scalarizer
918 array. If called on the inner ss, the result would be respectively 0,1,2 for
919 ARRAY_DIM=0,1,2. If called on the outer ss, the result would be 0,1
920 for ARRAY_DIM=1,2. */
921
922 static int
923 get_scalarizer_dim_for_array_dim (gfc_ss *ss, int array_dim)
924 {
925 int array_ref_dim;
926 int n;
927
928 array_ref_dim = 0;
929
930 for (; ss; ss = ss->parent)
931 for (n = 0; n < ss->dimen; n++)
932 if (ss->dim[n] < array_dim)
933 array_ref_dim++;
934
935 return array_ref_dim;
936 }
937
938
939 static gfc_ss *
940 innermost_ss (gfc_ss *ss)
941 {
942 while (ss->nested_ss != NULL)
943 ss = ss->nested_ss;
944
945 return ss;
946 }
947
948
949
950 /* Get the array reference dimension corresponding to the given loop dimension.
951 It is different from the true array dimension given by the dim array in
952 the case of a partial array reference (i.e. a(:,:,1,:) for example)
953 It is different from the loop dimension in the case of a transposed array.
954 */
955
956 static int
957 get_array_ref_dim_for_loop_dim (gfc_ss *ss, int loop_dim)
958 {
959 return get_scalarizer_dim_for_array_dim (innermost_ss (ss),
960 ss->dim[loop_dim]);
961 }
962
963
964 /* Generate code to create and initialize the descriptor for a temporary
965 array. This is used for both temporaries needed by the scalarizer, and
966 functions returning arrays. Adjusts the loop variables to be
967 zero-based, and calculates the loop bounds for callee allocated arrays.
968 Allocate the array unless it's callee allocated (we have a callee
969 allocated array if 'callee_alloc' is true, or if loop->to[n] is
970 NULL_TREE for any n). Also fills in the descriptor, data and offset
971 fields of info if known. Returns the size of the array, or NULL for a
972 callee allocated array.
973
974 'eltype' == NULL signals that the temporary should be a class object.
975 The 'initial' expression is used to obtain the size of the dynamic
976 type; otehrwise the allocation and initialisation proceeds as for any
977 other expression
978
979 PRE, POST, INITIAL, DYNAMIC and DEALLOC are as for
980 gfc_trans_allocate_array_storage. */
981
982 tree
983 gfc_trans_create_temp_array (stmtblock_t * pre, stmtblock_t * post, gfc_ss * ss,
984 tree eltype, tree initial, bool dynamic,
985 bool dealloc, bool callee_alloc, locus * where)
986 {
987 gfc_loopinfo *loop;
988 gfc_ss *s;
989 gfc_array_info *info;
990 tree from[GFC_MAX_DIMENSIONS], to[GFC_MAX_DIMENSIONS];
991 tree type;
992 tree desc;
993 tree tmp;
994 tree size;
995 tree nelem;
996 tree cond;
997 tree or_expr;
998 tree class_expr = NULL_TREE;
999 int n, dim, tmp_dim;
1000 int total_dim = 0;
1001
1002 /* This signals a class array for which we need the size of the
1003 dynamic type. Generate an eltype and then the class expression. */
1004 if (eltype == NULL_TREE && initial)
1005 {
1006 if (POINTER_TYPE_P (TREE_TYPE (initial)))
1007 class_expr = build_fold_indirect_ref_loc (input_location, initial);
1008 eltype = TREE_TYPE (class_expr);
1009 eltype = gfc_get_element_type (eltype);
1010 /* Obtain the structure (class) expression. */
1011 class_expr = TREE_OPERAND (class_expr, 0);
1012 gcc_assert (class_expr);
1013 }
1014
1015 memset (from, 0, sizeof (from));
1016 memset (to, 0, sizeof (to));
1017
1018 info = &ss->info->data.array;
1019
1020 gcc_assert (ss->dimen > 0);
1021 gcc_assert (ss->loop->dimen == ss->dimen);
1022
1023 if (gfc_option.warn_array_temp && where)
1024 gfc_warning ("Creating array temporary at %L", where);
1025
1026 /* Set the lower bound to zero. */
1027 for (s = ss; s; s = s->parent)
1028 {
1029 loop = s->loop;
1030
1031 total_dim += loop->dimen;
1032 for (n = 0; n < loop->dimen; n++)
1033 {
1034 dim = s->dim[n];
1035
1036 /* Callee allocated arrays may not have a known bound yet. */
1037 if (loop->to[n])
1038 loop->to[n] = gfc_evaluate_now (
1039 fold_build2_loc (input_location, MINUS_EXPR,
1040 gfc_array_index_type,
1041 loop->to[n], loop->from[n]),
1042 pre);
1043 loop->from[n] = gfc_index_zero_node;
1044
1045 /* We have just changed the loop bounds, we must clear the
1046 corresponding specloop, so that delta calculation is not skipped
1047 later in gfc_set_delta. */
1048 loop->specloop[n] = NULL;
1049
1050 /* We are constructing the temporary's descriptor based on the loop
1051 dimensions. As the dimensions may be accessed in arbitrary order
1052 (think of transpose) the size taken from the n'th loop may not map
1053 to the n'th dimension of the array. We need to reconstruct loop
1054 infos in the right order before using it to set the descriptor
1055 bounds. */
1056 tmp_dim = get_scalarizer_dim_for_array_dim (ss, dim);
1057 from[tmp_dim] = loop->from[n];
1058 to[tmp_dim] = loop->to[n];
1059
1060 info->delta[dim] = gfc_index_zero_node;
1061 info->start[dim] = gfc_index_zero_node;
1062 info->end[dim] = gfc_index_zero_node;
1063 info->stride[dim] = gfc_index_one_node;
1064 }
1065 }
1066
1067 /* Initialize the descriptor. */
1068 type =
1069 gfc_get_array_type_bounds (eltype, total_dim, 0, from, to, 1,
1070 GFC_ARRAY_UNKNOWN, true);
1071 desc = gfc_create_var (type, "atmp");
1072 GFC_DECL_PACKED_ARRAY (desc) = 1;
1073
1074 info->descriptor = desc;
1075 size = gfc_index_one_node;
1076
1077 /* Fill in the array dtype. */
1078 tmp = gfc_conv_descriptor_dtype (desc);
1079 gfc_add_modify (pre, tmp, gfc_get_dtype (TREE_TYPE (desc)));
1080
1081 /*
1082 Fill in the bounds and stride. This is a packed array, so:
1083
1084 size = 1;
1085 for (n = 0; n < rank; n++)
1086 {
1087 stride[n] = size
1088 delta = ubound[n] + 1 - lbound[n];
1089 size = size * delta;
1090 }
1091 size = size * sizeof(element);
1092 */
1093
1094 or_expr = NULL_TREE;
1095
1096 /* If there is at least one null loop->to[n], it is a callee allocated
1097 array. */
1098 for (n = 0; n < total_dim; n++)
1099 if (to[n] == NULL_TREE)
1100 {
1101 size = NULL_TREE;
1102 break;
1103 }
1104
1105 if (size == NULL_TREE)
1106 for (s = ss; s; s = s->parent)
1107 for (n = 0; n < s->loop->dimen; n++)
1108 {
1109 dim = get_scalarizer_dim_for_array_dim (ss, s->dim[n]);
1110
1111 /* For a callee allocated array express the loop bounds in terms
1112 of the descriptor fields. */
1113 tmp = fold_build2_loc (input_location,
1114 MINUS_EXPR, gfc_array_index_type,
1115 gfc_conv_descriptor_ubound_get (desc, gfc_rank_cst[dim]),
1116 gfc_conv_descriptor_lbound_get (desc, gfc_rank_cst[dim]));
1117 s->loop->to[n] = tmp;
1118 }
1119 else
1120 {
1121 for (n = 0; n < total_dim; n++)
1122 {
1123 /* Store the stride and bound components in the descriptor. */
1124 gfc_conv_descriptor_stride_set (pre, desc, gfc_rank_cst[n], size);
1125
1126 gfc_conv_descriptor_lbound_set (pre, desc, gfc_rank_cst[n],
1127 gfc_index_zero_node);
1128
1129 gfc_conv_descriptor_ubound_set (pre, desc, gfc_rank_cst[n], to[n]);
1130
1131 tmp = fold_build2_loc (input_location, PLUS_EXPR,
1132 gfc_array_index_type,
1133 to[n], gfc_index_one_node);
1134
1135 /* Check whether the size for this dimension is negative. */
1136 cond = fold_build2_loc (input_location, LE_EXPR, boolean_type_node,
1137 tmp, gfc_index_zero_node);
1138 cond = gfc_evaluate_now (cond, pre);
1139
1140 if (n == 0)
1141 or_expr = cond;
1142 else
1143 or_expr = fold_build2_loc (input_location, TRUTH_OR_EXPR,
1144 boolean_type_node, or_expr, cond);
1145
1146 size = fold_build2_loc (input_location, MULT_EXPR,
1147 gfc_array_index_type, size, tmp);
1148 size = gfc_evaluate_now (size, pre);
1149 }
1150 }
1151
1152 /* Get the size of the array. */
1153 if (size && !callee_alloc)
1154 {
1155 tree elemsize;
1156 /* If or_expr is true, then the extent in at least one
1157 dimension is zero and the size is set to zero. */
1158 size = fold_build3_loc (input_location, COND_EXPR, gfc_array_index_type,
1159 or_expr, gfc_index_zero_node, size);
1160
1161 nelem = size;
1162 if (class_expr == NULL_TREE)
1163 elemsize = fold_convert (gfc_array_index_type,
1164 TYPE_SIZE_UNIT (gfc_get_element_type (type)));
1165 else
1166 elemsize = gfc_vtable_size_get (class_expr);
1167
1168 size = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
1169 size, elemsize);
1170 }
1171 else
1172 {
1173 nelem = size;
1174 size = NULL_TREE;
1175 }
1176
1177 gfc_trans_allocate_array_storage (pre, post, info, size, nelem, initial,
1178 dynamic, dealloc);
1179
1180 while (ss->parent)
1181 ss = ss->parent;
1182
1183 if (ss->dimen > ss->loop->temp_dim)
1184 ss->loop->temp_dim = ss->dimen;
1185
1186 return size;
1187 }
1188
1189
1190 /* Return the number of iterations in a loop that starts at START,
1191 ends at END, and has step STEP. */
1192
1193 static tree
1194 gfc_get_iteration_count (tree start, tree end, tree step)
1195 {
1196 tree tmp;
1197 tree type;
1198
1199 type = TREE_TYPE (step);
1200 tmp = fold_build2_loc (input_location, MINUS_EXPR, type, end, start);
1201 tmp = fold_build2_loc (input_location, FLOOR_DIV_EXPR, type, tmp, step);
1202 tmp = fold_build2_loc (input_location, PLUS_EXPR, type, tmp,
1203 build_int_cst (type, 1));
1204 tmp = fold_build2_loc (input_location, MAX_EXPR, type, tmp,
1205 build_int_cst (type, 0));
1206 return fold_convert (gfc_array_index_type, tmp);
1207 }
1208
1209
1210 /* Extend the data in array DESC by EXTRA elements. */
1211
1212 static void
1213 gfc_grow_array (stmtblock_t * pblock, tree desc, tree extra)
1214 {
1215 tree arg0, arg1;
1216 tree tmp;
1217 tree size;
1218 tree ubound;
1219
1220 if (integer_zerop (extra))
1221 return;
1222
1223 ubound = gfc_conv_descriptor_ubound_get (desc, gfc_rank_cst[0]);
1224
1225 /* Add EXTRA to the upper bound. */
1226 tmp = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
1227 ubound, extra);
1228 gfc_conv_descriptor_ubound_set (pblock, desc, gfc_rank_cst[0], tmp);
1229
1230 /* Get the value of the current data pointer. */
1231 arg0 = gfc_conv_descriptor_data_get (desc);
1232
1233 /* Calculate the new array size. */
1234 size = TYPE_SIZE_UNIT (gfc_get_element_type (TREE_TYPE (desc)));
1235 tmp = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
1236 ubound, gfc_index_one_node);
1237 arg1 = fold_build2_loc (input_location, MULT_EXPR, size_type_node,
1238 fold_convert (size_type_node, tmp),
1239 fold_convert (size_type_node, size));
1240
1241 /* Call the realloc() function. */
1242 tmp = gfc_call_realloc (pblock, arg0, arg1);
1243 gfc_conv_descriptor_data_set (pblock, desc, tmp);
1244 }
1245
1246
1247 /* Return true if the bounds of iterator I can only be determined
1248 at run time. */
1249
1250 static inline bool
1251 gfc_iterator_has_dynamic_bounds (gfc_iterator * i)
1252 {
1253 return (i->start->expr_type != EXPR_CONSTANT
1254 || i->end->expr_type != EXPR_CONSTANT
1255 || i->step->expr_type != EXPR_CONSTANT);
1256 }
1257
1258
1259 /* Split the size of constructor element EXPR into the sum of two terms,
1260 one of which can be determined at compile time and one of which must
1261 be calculated at run time. Set *SIZE to the former and return true
1262 if the latter might be nonzero. */
1263
1264 static bool
1265 gfc_get_array_constructor_element_size (mpz_t * size, gfc_expr * expr)
1266 {
1267 if (expr->expr_type == EXPR_ARRAY)
1268 return gfc_get_array_constructor_size (size, expr->value.constructor);
1269 else if (expr->rank > 0)
1270 {
1271 /* Calculate everything at run time. */
1272 mpz_set_ui (*size, 0);
1273 return true;
1274 }
1275 else
1276 {
1277 /* A single element. */
1278 mpz_set_ui (*size, 1);
1279 return false;
1280 }
1281 }
1282
1283
1284 /* Like gfc_get_array_constructor_element_size, but applied to the whole
1285 of array constructor C. */
1286
1287 static bool
1288 gfc_get_array_constructor_size (mpz_t * size, gfc_constructor_base base)
1289 {
1290 gfc_constructor *c;
1291 gfc_iterator *i;
1292 mpz_t val;
1293 mpz_t len;
1294 bool dynamic;
1295
1296 mpz_set_ui (*size, 0);
1297 mpz_init (len);
1298 mpz_init (val);
1299
1300 dynamic = false;
1301 for (c = gfc_constructor_first (base); c; c = gfc_constructor_next (c))
1302 {
1303 i = c->iterator;
1304 if (i && gfc_iterator_has_dynamic_bounds (i))
1305 dynamic = true;
1306 else
1307 {
1308 dynamic |= gfc_get_array_constructor_element_size (&len, c->expr);
1309 if (i)
1310 {
1311 /* Multiply the static part of the element size by the
1312 number of iterations. */
1313 mpz_sub (val, i->end->value.integer, i->start->value.integer);
1314 mpz_fdiv_q (val, val, i->step->value.integer);
1315 mpz_add_ui (val, val, 1);
1316 if (mpz_sgn (val) > 0)
1317 mpz_mul (len, len, val);
1318 else
1319 mpz_set_ui (len, 0);
1320 }
1321 mpz_add (*size, *size, len);
1322 }
1323 }
1324 mpz_clear (len);
1325 mpz_clear (val);
1326 return dynamic;
1327 }
1328
1329
1330 /* Make sure offset is a variable. */
1331
1332 static void
1333 gfc_put_offset_into_var (stmtblock_t * pblock, tree * poffset,
1334 tree * offsetvar)
1335 {
1336 /* We should have already created the offset variable. We cannot
1337 create it here because we may be in an inner scope. */
1338 gcc_assert (*offsetvar != NULL_TREE);
1339 gfc_add_modify (pblock, *offsetvar, *poffset);
1340 *poffset = *offsetvar;
1341 TREE_USED (*offsetvar) = 1;
1342 }
1343
1344
1345 /* Variables needed for bounds-checking. */
1346 static bool first_len;
1347 static tree first_len_val;
1348 static bool typespec_chararray_ctor;
1349
1350 static void
1351 gfc_trans_array_ctor_element (stmtblock_t * pblock, tree desc,
1352 tree offset, gfc_se * se, gfc_expr * expr)
1353 {
1354 tree tmp;
1355
1356 gfc_conv_expr (se, expr);
1357
1358 /* Store the value. */
1359 tmp = build_fold_indirect_ref_loc (input_location,
1360 gfc_conv_descriptor_data_get (desc));
1361 tmp = gfc_build_array_ref (tmp, offset, NULL);
1362
1363 if (expr->ts.type == BT_CHARACTER)
1364 {
1365 int i = gfc_validate_kind (BT_CHARACTER, expr->ts.kind, false);
1366 tree esize;
1367
1368 esize = size_in_bytes (gfc_get_element_type (TREE_TYPE (desc)));
1369 esize = fold_convert (gfc_charlen_type_node, esize);
1370 esize = fold_build2_loc (input_location, TRUNC_DIV_EXPR,
1371 gfc_charlen_type_node, esize,
1372 build_int_cst (gfc_charlen_type_node,
1373 gfc_character_kinds[i].bit_size / 8));
1374
1375 gfc_conv_string_parameter (se);
1376 if (POINTER_TYPE_P (TREE_TYPE (tmp)))
1377 {
1378 /* The temporary is an array of pointers. */
1379 se->expr = fold_convert (TREE_TYPE (tmp), se->expr);
1380 gfc_add_modify (&se->pre, tmp, se->expr);
1381 }
1382 else
1383 {
1384 /* The temporary is an array of string values. */
1385 tmp = gfc_build_addr_expr (gfc_get_pchar_type (expr->ts.kind), tmp);
1386 /* We know the temporary and the value will be the same length,
1387 so can use memcpy. */
1388 gfc_trans_string_copy (&se->pre, esize, tmp, expr->ts.kind,
1389 se->string_length, se->expr, expr->ts.kind);
1390 }
1391 if ((gfc_option.rtcheck & GFC_RTCHECK_BOUNDS) && !typespec_chararray_ctor)
1392 {
1393 if (first_len)
1394 {
1395 gfc_add_modify (&se->pre, first_len_val,
1396 se->string_length);
1397 first_len = false;
1398 }
1399 else
1400 {
1401 /* Verify that all constructor elements are of the same
1402 length. */
1403 tree cond = fold_build2_loc (input_location, NE_EXPR,
1404 boolean_type_node, first_len_val,
1405 se->string_length);
1406 gfc_trans_runtime_check
1407 (true, false, cond, &se->pre, &expr->where,
1408 "Different CHARACTER lengths (%ld/%ld) in array constructor",
1409 fold_convert (long_integer_type_node, first_len_val),
1410 fold_convert (long_integer_type_node, se->string_length));
1411 }
1412 }
1413 }
1414 else
1415 {
1416 /* TODO: Should the frontend already have done this conversion? */
1417 se->expr = fold_convert (TREE_TYPE (tmp), se->expr);
1418 gfc_add_modify (&se->pre, tmp, se->expr);
1419 }
1420
1421 gfc_add_block_to_block (pblock, &se->pre);
1422 gfc_add_block_to_block (pblock, &se->post);
1423 }
1424
1425
1426 /* Add the contents of an array to the constructor. DYNAMIC is as for
1427 gfc_trans_array_constructor_value. */
1428
1429 static void
1430 gfc_trans_array_constructor_subarray (stmtblock_t * pblock,
1431 tree type ATTRIBUTE_UNUSED,
1432 tree desc, gfc_expr * expr,
1433 tree * poffset, tree * offsetvar,
1434 bool dynamic)
1435 {
1436 gfc_se se;
1437 gfc_ss *ss;
1438 gfc_loopinfo loop;
1439 stmtblock_t body;
1440 tree tmp;
1441 tree size;
1442 int n;
1443
1444 /* We need this to be a variable so we can increment it. */
1445 gfc_put_offset_into_var (pblock, poffset, offsetvar);
1446
1447 gfc_init_se (&se, NULL);
1448
1449 /* Walk the array expression. */
1450 ss = gfc_walk_expr (expr);
1451 gcc_assert (ss != gfc_ss_terminator);
1452
1453 /* Initialize the scalarizer. */
1454 gfc_init_loopinfo (&loop);
1455 gfc_add_ss_to_loop (&loop, ss);
1456
1457 /* Initialize the loop. */
1458 gfc_conv_ss_startstride (&loop);
1459 gfc_conv_loop_setup (&loop, &expr->where);
1460
1461 /* Make sure the constructed array has room for the new data. */
1462 if (dynamic)
1463 {
1464 /* Set SIZE to the total number of elements in the subarray. */
1465 size = gfc_index_one_node;
1466 for (n = 0; n < loop.dimen; n++)
1467 {
1468 tmp = gfc_get_iteration_count (loop.from[n], loop.to[n],
1469 gfc_index_one_node);
1470 size = fold_build2_loc (input_location, MULT_EXPR,
1471 gfc_array_index_type, size, tmp);
1472 }
1473
1474 /* Grow the constructed array by SIZE elements. */
1475 gfc_grow_array (&loop.pre, desc, size);
1476 }
1477
1478 /* Make the loop body. */
1479 gfc_mark_ss_chain_used (ss, 1);
1480 gfc_start_scalarized_body (&loop, &body);
1481 gfc_copy_loopinfo_to_se (&se, &loop);
1482 se.ss = ss;
1483
1484 gfc_trans_array_ctor_element (&body, desc, *poffset, &se, expr);
1485 gcc_assert (se.ss == gfc_ss_terminator);
1486
1487 /* Increment the offset. */
1488 tmp = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
1489 *poffset, gfc_index_one_node);
1490 gfc_add_modify (&body, *poffset, tmp);
1491
1492 /* Finish the loop. */
1493 gfc_trans_scalarizing_loops (&loop, &body);
1494 gfc_add_block_to_block (&loop.pre, &loop.post);
1495 tmp = gfc_finish_block (&loop.pre);
1496 gfc_add_expr_to_block (pblock, tmp);
1497
1498 gfc_cleanup_loop (&loop);
1499 }
1500
1501
1502 /* Assign the values to the elements of an array constructor. DYNAMIC
1503 is true if descriptor DESC only contains enough data for the static
1504 size calculated by gfc_get_array_constructor_size. When true, memory
1505 for the dynamic parts must be allocated using realloc. */
1506
1507 static void
1508 gfc_trans_array_constructor_value (stmtblock_t * pblock, tree type,
1509 tree desc, gfc_constructor_base base,
1510 tree * poffset, tree * offsetvar,
1511 bool dynamic)
1512 {
1513 tree tmp;
1514 stmtblock_t body;
1515 gfc_se se;
1516 mpz_t size;
1517 gfc_constructor *c;
1518
1519 tree shadow_loopvar = NULL_TREE;
1520 gfc_saved_var saved_loopvar;
1521
1522 mpz_init (size);
1523 for (c = gfc_constructor_first (base); c; c = gfc_constructor_next (c))
1524 {
1525 /* If this is an iterator or an array, the offset must be a variable. */
1526 if ((c->iterator || c->expr->rank > 0) && INTEGER_CST_P (*poffset))
1527 gfc_put_offset_into_var (pblock, poffset, offsetvar);
1528
1529 /* Shadowing the iterator avoids changing its value and saves us from
1530 keeping track of it. Further, it makes sure that there's always a
1531 backend-decl for the symbol, even if there wasn't one before,
1532 e.g. in the case of an iterator that appears in a specification
1533 expression in an interface mapping. */
1534 if (c->iterator)
1535 {
1536 gfc_symbol *sym = c->iterator->var->symtree->n.sym;
1537 tree type = gfc_typenode_for_spec (&sym->ts);
1538
1539 shadow_loopvar = gfc_create_var (type, "shadow_loopvar");
1540 gfc_shadow_sym (sym, shadow_loopvar, &saved_loopvar);
1541 }
1542
1543 gfc_start_block (&body);
1544
1545 if (c->expr->expr_type == EXPR_ARRAY)
1546 {
1547 /* Array constructors can be nested. */
1548 gfc_trans_array_constructor_value (&body, type, desc,
1549 c->expr->value.constructor,
1550 poffset, offsetvar, dynamic);
1551 }
1552 else if (c->expr->rank > 0)
1553 {
1554 gfc_trans_array_constructor_subarray (&body, type, desc, c->expr,
1555 poffset, offsetvar, dynamic);
1556 }
1557 else
1558 {
1559 /* This code really upsets the gimplifier so don't bother for now. */
1560 gfc_constructor *p;
1561 HOST_WIDE_INT n;
1562 HOST_WIDE_INT size;
1563
1564 p = c;
1565 n = 0;
1566 while (p && !(p->iterator || p->expr->expr_type != EXPR_CONSTANT))
1567 {
1568 p = gfc_constructor_next (p);
1569 n++;
1570 }
1571 if (n < 4)
1572 {
1573 /* Scalar values. */
1574 gfc_init_se (&se, NULL);
1575 gfc_trans_array_ctor_element (&body, desc, *poffset,
1576 &se, c->expr);
1577
1578 *poffset = fold_build2_loc (input_location, PLUS_EXPR,
1579 gfc_array_index_type,
1580 *poffset, gfc_index_one_node);
1581 }
1582 else
1583 {
1584 /* Collect multiple scalar constants into a constructor. */
1585 VEC(constructor_elt,gc) *v = NULL;
1586 tree init;
1587 tree bound;
1588 tree tmptype;
1589 HOST_WIDE_INT idx = 0;
1590
1591 p = c;
1592 /* Count the number of consecutive scalar constants. */
1593 while (p && !(p->iterator
1594 || p->expr->expr_type != EXPR_CONSTANT))
1595 {
1596 gfc_init_se (&se, NULL);
1597 gfc_conv_constant (&se, p->expr);
1598
1599 if (c->expr->ts.type != BT_CHARACTER)
1600 se.expr = fold_convert (type, se.expr);
1601 /* For constant character array constructors we build
1602 an array of pointers. */
1603 else if (POINTER_TYPE_P (type))
1604 se.expr = gfc_build_addr_expr
1605 (gfc_get_pchar_type (p->expr->ts.kind),
1606 se.expr);
1607
1608 CONSTRUCTOR_APPEND_ELT (v,
1609 build_int_cst (gfc_array_index_type,
1610 idx++),
1611 se.expr);
1612 c = p;
1613 p = gfc_constructor_next (p);
1614 }
1615
1616 bound = size_int (n - 1);
1617 /* Create an array type to hold them. */
1618 tmptype = build_range_type (gfc_array_index_type,
1619 gfc_index_zero_node, bound);
1620 tmptype = build_array_type (type, tmptype);
1621
1622 init = build_constructor (tmptype, v);
1623 TREE_CONSTANT (init) = 1;
1624 TREE_STATIC (init) = 1;
1625 /* Create a static variable to hold the data. */
1626 tmp = gfc_create_var (tmptype, "data");
1627 TREE_STATIC (tmp) = 1;
1628 TREE_CONSTANT (tmp) = 1;
1629 TREE_READONLY (tmp) = 1;
1630 DECL_INITIAL (tmp) = init;
1631 init = tmp;
1632
1633 /* Use BUILTIN_MEMCPY to assign the values. */
1634 tmp = gfc_conv_descriptor_data_get (desc);
1635 tmp = build_fold_indirect_ref_loc (input_location,
1636 tmp);
1637 tmp = gfc_build_array_ref (tmp, *poffset, NULL);
1638 tmp = gfc_build_addr_expr (NULL_TREE, tmp);
1639 init = gfc_build_addr_expr (NULL_TREE, init);
1640
1641 size = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (type));
1642 bound = build_int_cst (size_type_node, n * size);
1643 tmp = build_call_expr_loc (input_location,
1644 builtin_decl_explicit (BUILT_IN_MEMCPY),
1645 3, tmp, init, bound);
1646 gfc_add_expr_to_block (&body, tmp);
1647
1648 *poffset = fold_build2_loc (input_location, PLUS_EXPR,
1649 gfc_array_index_type, *poffset,
1650 build_int_cst (gfc_array_index_type, n));
1651 }
1652 if (!INTEGER_CST_P (*poffset))
1653 {
1654 gfc_add_modify (&body, *offsetvar, *poffset);
1655 *poffset = *offsetvar;
1656 }
1657 }
1658
1659 /* The frontend should already have done any expansions
1660 at compile-time. */
1661 if (!c->iterator)
1662 {
1663 /* Pass the code as is. */
1664 tmp = gfc_finish_block (&body);
1665 gfc_add_expr_to_block (pblock, tmp);
1666 }
1667 else
1668 {
1669 /* Build the implied do-loop. */
1670 stmtblock_t implied_do_block;
1671 tree cond;
1672 tree end;
1673 tree step;
1674 tree exit_label;
1675 tree loopbody;
1676 tree tmp2;
1677
1678 loopbody = gfc_finish_block (&body);
1679
1680 /* Create a new block that holds the implied-do loop. A temporary
1681 loop-variable is used. */
1682 gfc_start_block(&implied_do_block);
1683
1684 /* Initialize the loop. */
1685 gfc_init_se (&se, NULL);
1686 gfc_conv_expr_val (&se, c->iterator->start);
1687 gfc_add_block_to_block (&implied_do_block, &se.pre);
1688 gfc_add_modify (&implied_do_block, shadow_loopvar, se.expr);
1689
1690 gfc_init_se (&se, NULL);
1691 gfc_conv_expr_val (&se, c->iterator->end);
1692 gfc_add_block_to_block (&implied_do_block, &se.pre);
1693 end = gfc_evaluate_now (se.expr, &implied_do_block);
1694
1695 gfc_init_se (&se, NULL);
1696 gfc_conv_expr_val (&se, c->iterator->step);
1697 gfc_add_block_to_block (&implied_do_block, &se.pre);
1698 step = gfc_evaluate_now (se.expr, &implied_do_block);
1699
1700 /* If this array expands dynamically, and the number of iterations
1701 is not constant, we won't have allocated space for the static
1702 part of C->EXPR's size. Do that now. */
1703 if (dynamic && gfc_iterator_has_dynamic_bounds (c->iterator))
1704 {
1705 /* Get the number of iterations. */
1706 tmp = gfc_get_iteration_count (shadow_loopvar, end, step);
1707
1708 /* Get the static part of C->EXPR's size. */
1709 gfc_get_array_constructor_element_size (&size, c->expr);
1710 tmp2 = gfc_conv_mpz_to_tree (size, gfc_index_integer_kind);
1711
1712 /* Grow the array by TMP * TMP2 elements. */
1713 tmp = fold_build2_loc (input_location, MULT_EXPR,
1714 gfc_array_index_type, tmp, tmp2);
1715 gfc_grow_array (&implied_do_block, desc, tmp);
1716 }
1717
1718 /* Generate the loop body. */
1719 exit_label = gfc_build_label_decl (NULL_TREE);
1720 gfc_start_block (&body);
1721
1722 /* Generate the exit condition. Depending on the sign of
1723 the step variable we have to generate the correct
1724 comparison. */
1725 tmp = fold_build2_loc (input_location, GT_EXPR, boolean_type_node,
1726 step, build_int_cst (TREE_TYPE (step), 0));
1727 cond = fold_build3_loc (input_location, COND_EXPR,
1728 boolean_type_node, tmp,
1729 fold_build2_loc (input_location, GT_EXPR,
1730 boolean_type_node, shadow_loopvar, end),
1731 fold_build2_loc (input_location, LT_EXPR,
1732 boolean_type_node, shadow_loopvar, end));
1733 tmp = build1_v (GOTO_EXPR, exit_label);
1734 TREE_USED (exit_label) = 1;
1735 tmp = build3_v (COND_EXPR, cond, tmp,
1736 build_empty_stmt (input_location));
1737 gfc_add_expr_to_block (&body, tmp);
1738
1739 /* The main loop body. */
1740 gfc_add_expr_to_block (&body, loopbody);
1741
1742 /* Increase loop variable by step. */
1743 tmp = fold_build2_loc (input_location, PLUS_EXPR,
1744 TREE_TYPE (shadow_loopvar), shadow_loopvar,
1745 step);
1746 gfc_add_modify (&body, shadow_loopvar, tmp);
1747
1748 /* Finish the loop. */
1749 tmp = gfc_finish_block (&body);
1750 tmp = build1_v (LOOP_EXPR, tmp);
1751 gfc_add_expr_to_block (&implied_do_block, tmp);
1752
1753 /* Add the exit label. */
1754 tmp = build1_v (LABEL_EXPR, exit_label);
1755 gfc_add_expr_to_block (&implied_do_block, tmp);
1756
1757 /* Finishe the implied-do loop. */
1758 tmp = gfc_finish_block(&implied_do_block);
1759 gfc_add_expr_to_block(pblock, tmp);
1760
1761 gfc_restore_sym (c->iterator->var->symtree->n.sym, &saved_loopvar);
1762 }
1763 }
1764 mpz_clear (size);
1765 }
1766
1767
1768 /* A catch-all to obtain the string length for anything that is not a
1769 a substring of non-constant length, a constant, array or variable. */
1770
1771 static void
1772 get_array_ctor_all_strlen (stmtblock_t *block, gfc_expr *e, tree *len)
1773 {
1774 gfc_se se;
1775 gfc_ss *ss;
1776
1777 /* Don't bother if we already know the length is a constant. */
1778 if (*len && INTEGER_CST_P (*len))
1779 return;
1780
1781 if (!e->ref && e->ts.u.cl && e->ts.u.cl->length
1782 && e->ts.u.cl->length->expr_type == EXPR_CONSTANT)
1783 {
1784 /* This is easy. */
1785 gfc_conv_const_charlen (e->ts.u.cl);
1786 *len = e->ts.u.cl->backend_decl;
1787 }
1788 else
1789 {
1790 /* Otherwise, be brutal even if inefficient. */
1791 ss = gfc_walk_expr (e);
1792 gfc_init_se (&se, NULL);
1793
1794 /* No function call, in case of side effects. */
1795 se.no_function_call = 1;
1796 if (ss == gfc_ss_terminator)
1797 gfc_conv_expr (&se, e);
1798 else
1799 gfc_conv_expr_descriptor (&se, e, ss);
1800
1801 /* Fix the value. */
1802 *len = gfc_evaluate_now (se.string_length, &se.pre);
1803
1804 gfc_add_block_to_block (block, &se.pre);
1805 gfc_add_block_to_block (block, &se.post);
1806
1807 e->ts.u.cl->backend_decl = *len;
1808 }
1809 }
1810
1811
1812 /* Figure out the string length of a variable reference expression.
1813 Used by get_array_ctor_strlen. */
1814
1815 static void
1816 get_array_ctor_var_strlen (stmtblock_t *block, gfc_expr * expr, tree * len)
1817 {
1818 gfc_ref *ref;
1819 gfc_typespec *ts;
1820 mpz_t char_len;
1821
1822 /* Don't bother if we already know the length is a constant. */
1823 if (*len && INTEGER_CST_P (*len))
1824 return;
1825
1826 ts = &expr->symtree->n.sym->ts;
1827 for (ref = expr->ref; ref; ref = ref->next)
1828 {
1829 switch (ref->type)
1830 {
1831 case REF_ARRAY:
1832 /* Array references don't change the string length. */
1833 break;
1834
1835 case REF_COMPONENT:
1836 /* Use the length of the component. */
1837 ts = &ref->u.c.component->ts;
1838 break;
1839
1840 case REF_SUBSTRING:
1841 if (ref->u.ss.start->expr_type != EXPR_CONSTANT
1842 || ref->u.ss.end->expr_type != EXPR_CONSTANT)
1843 {
1844 /* Note that this might evaluate expr. */
1845 get_array_ctor_all_strlen (block, expr, len);
1846 return;
1847 }
1848 mpz_init_set_ui (char_len, 1);
1849 mpz_add (char_len, char_len, ref->u.ss.end->value.integer);
1850 mpz_sub (char_len, char_len, ref->u.ss.start->value.integer);
1851 *len = gfc_conv_mpz_to_tree (char_len, gfc_default_integer_kind);
1852 *len = convert (gfc_charlen_type_node, *len);
1853 mpz_clear (char_len);
1854 return;
1855
1856 default:
1857 gcc_unreachable ();
1858 }
1859 }
1860
1861 *len = ts->u.cl->backend_decl;
1862 }
1863
1864
1865 /* Figure out the string length of a character array constructor.
1866 If len is NULL, don't calculate the length; this happens for recursive calls
1867 when a sub-array-constructor is an element but not at the first position,
1868 so when we're not interested in the length.
1869 Returns TRUE if all elements are character constants. */
1870
1871 bool
1872 get_array_ctor_strlen (stmtblock_t *block, gfc_constructor_base base, tree * len)
1873 {
1874 gfc_constructor *c;
1875 bool is_const;
1876
1877 is_const = TRUE;
1878
1879 if (gfc_constructor_first (base) == NULL)
1880 {
1881 if (len)
1882 *len = build_int_cstu (gfc_charlen_type_node, 0);
1883 return is_const;
1884 }
1885
1886 /* Loop over all constructor elements to find out is_const, but in len we
1887 want to store the length of the first, not the last, element. We can
1888 of course exit the loop as soon as is_const is found to be false. */
1889 for (c = gfc_constructor_first (base);
1890 c && is_const; c = gfc_constructor_next (c))
1891 {
1892 switch (c->expr->expr_type)
1893 {
1894 case EXPR_CONSTANT:
1895 if (len && !(*len && INTEGER_CST_P (*len)))
1896 *len = build_int_cstu (gfc_charlen_type_node,
1897 c->expr->value.character.length);
1898 break;
1899
1900 case EXPR_ARRAY:
1901 if (!get_array_ctor_strlen (block, c->expr->value.constructor, len))
1902 is_const = false;
1903 break;
1904
1905 case EXPR_VARIABLE:
1906 is_const = false;
1907 if (len)
1908 get_array_ctor_var_strlen (block, c->expr, len);
1909 break;
1910
1911 default:
1912 is_const = false;
1913 if (len)
1914 get_array_ctor_all_strlen (block, c->expr, len);
1915 break;
1916 }
1917
1918 /* After the first iteration, we don't want the length modified. */
1919 len = NULL;
1920 }
1921
1922 return is_const;
1923 }
1924
1925 /* Check whether the array constructor C consists entirely of constant
1926 elements, and if so returns the number of those elements, otherwise
1927 return zero. Note, an empty or NULL array constructor returns zero. */
1928
1929 unsigned HOST_WIDE_INT
1930 gfc_constant_array_constructor_p (gfc_constructor_base base)
1931 {
1932 unsigned HOST_WIDE_INT nelem = 0;
1933
1934 gfc_constructor *c = gfc_constructor_first (base);
1935 while (c)
1936 {
1937 if (c->iterator
1938 || c->expr->rank > 0
1939 || c->expr->expr_type != EXPR_CONSTANT)
1940 return 0;
1941 c = gfc_constructor_next (c);
1942 nelem++;
1943 }
1944 return nelem;
1945 }
1946
1947
1948 /* Given EXPR, the constant array constructor specified by an EXPR_ARRAY,
1949 and the tree type of it's elements, TYPE, return a static constant
1950 variable that is compile-time initialized. */
1951
1952 tree
1953 gfc_build_constant_array_constructor (gfc_expr * expr, tree type)
1954 {
1955 tree tmptype, init, tmp;
1956 HOST_WIDE_INT nelem;
1957 gfc_constructor *c;
1958 gfc_array_spec as;
1959 gfc_se se;
1960 int i;
1961 VEC(constructor_elt,gc) *v = NULL;
1962
1963 /* First traverse the constructor list, converting the constants
1964 to tree to build an initializer. */
1965 nelem = 0;
1966 c = gfc_constructor_first (expr->value.constructor);
1967 while (c)
1968 {
1969 gfc_init_se (&se, NULL);
1970 gfc_conv_constant (&se, c->expr);
1971 if (c->expr->ts.type != BT_CHARACTER)
1972 se.expr = fold_convert (type, se.expr);
1973 else if (POINTER_TYPE_P (type))
1974 se.expr = gfc_build_addr_expr (gfc_get_pchar_type (c->expr->ts.kind),
1975 se.expr);
1976 CONSTRUCTOR_APPEND_ELT (v, build_int_cst (gfc_array_index_type, nelem),
1977 se.expr);
1978 c = gfc_constructor_next (c);
1979 nelem++;
1980 }
1981
1982 /* Next determine the tree type for the array. We use the gfortran
1983 front-end's gfc_get_nodesc_array_type in order to create a suitable
1984 GFC_ARRAY_TYPE_P that may be used by the scalarizer. */
1985
1986 memset (&as, 0, sizeof (gfc_array_spec));
1987
1988 as.rank = expr->rank;
1989 as.type = AS_EXPLICIT;
1990 if (!expr->shape)
1991 {
1992 as.lower[0] = gfc_get_int_expr (gfc_default_integer_kind, NULL, 0);
1993 as.upper[0] = gfc_get_int_expr (gfc_default_integer_kind,
1994 NULL, nelem - 1);
1995 }
1996 else
1997 for (i = 0; i < expr->rank; i++)
1998 {
1999 int tmp = (int) mpz_get_si (expr->shape[i]);
2000 as.lower[i] = gfc_get_int_expr (gfc_default_integer_kind, NULL, 0);
2001 as.upper[i] = gfc_get_int_expr (gfc_default_integer_kind,
2002 NULL, tmp - 1);
2003 }
2004
2005 tmptype = gfc_get_nodesc_array_type (type, &as, PACKED_STATIC, true);
2006
2007 /* as is not needed anymore. */
2008 for (i = 0; i < as.rank + as.corank; i++)
2009 {
2010 gfc_free_expr (as.lower[i]);
2011 gfc_free_expr (as.upper[i]);
2012 }
2013
2014 init = build_constructor (tmptype, v);
2015
2016 TREE_CONSTANT (init) = 1;
2017 TREE_STATIC (init) = 1;
2018
2019 tmp = gfc_create_var (tmptype, "A");
2020 TREE_STATIC (tmp) = 1;
2021 TREE_CONSTANT (tmp) = 1;
2022 TREE_READONLY (tmp) = 1;
2023 DECL_INITIAL (tmp) = init;
2024
2025 return tmp;
2026 }
2027
2028
2029 /* Translate a constant EXPR_ARRAY array constructor for the scalarizer.
2030 This mostly initializes the scalarizer state info structure with the
2031 appropriate values to directly use the array created by the function
2032 gfc_build_constant_array_constructor. */
2033
2034 static void
2035 trans_constant_array_constructor (gfc_ss * ss, tree type)
2036 {
2037 gfc_array_info *info;
2038 tree tmp;
2039 int i;
2040
2041 tmp = gfc_build_constant_array_constructor (ss->info->expr, type);
2042
2043 info = &ss->info->data.array;
2044
2045 info->descriptor = tmp;
2046 info->data = gfc_build_addr_expr (NULL_TREE, tmp);
2047 info->offset = gfc_index_zero_node;
2048
2049 for (i = 0; i < ss->dimen; i++)
2050 {
2051 info->delta[i] = gfc_index_zero_node;
2052 info->start[i] = gfc_index_zero_node;
2053 info->end[i] = gfc_index_zero_node;
2054 info->stride[i] = gfc_index_one_node;
2055 }
2056 }
2057
2058
2059 static int
2060 get_rank (gfc_loopinfo *loop)
2061 {
2062 int rank;
2063
2064 rank = 0;
2065 for (; loop; loop = loop->parent)
2066 rank += loop->dimen;
2067
2068 return rank;
2069 }
2070
2071
2072 /* Helper routine of gfc_trans_array_constructor to determine if the
2073 bounds of the loop specified by LOOP are constant and simple enough
2074 to use with trans_constant_array_constructor. Returns the
2075 iteration count of the loop if suitable, and NULL_TREE otherwise. */
2076
2077 static tree
2078 constant_array_constructor_loop_size (gfc_loopinfo * l)
2079 {
2080 gfc_loopinfo *loop;
2081 tree size = gfc_index_one_node;
2082 tree tmp;
2083 int i, total_dim;
2084
2085 total_dim = get_rank (l);
2086
2087 for (loop = l; loop; loop = loop->parent)
2088 {
2089 for (i = 0; i < loop->dimen; i++)
2090 {
2091 /* If the bounds aren't constant, return NULL_TREE. */
2092 if (!INTEGER_CST_P (loop->from[i]) || !INTEGER_CST_P (loop->to[i]))
2093 return NULL_TREE;
2094 if (!integer_zerop (loop->from[i]))
2095 {
2096 /* Only allow nonzero "from" in one-dimensional arrays. */
2097 if (total_dim != 1)
2098 return NULL_TREE;
2099 tmp = fold_build2_loc (input_location, MINUS_EXPR,
2100 gfc_array_index_type,
2101 loop->to[i], loop->from[i]);
2102 }
2103 else
2104 tmp = loop->to[i];
2105 tmp = fold_build2_loc (input_location, PLUS_EXPR,
2106 gfc_array_index_type, tmp, gfc_index_one_node);
2107 size = fold_build2_loc (input_location, MULT_EXPR,
2108 gfc_array_index_type, size, tmp);
2109 }
2110 }
2111
2112 return size;
2113 }
2114
2115
2116 static tree *
2117 get_loop_upper_bound_for_array (gfc_ss *array, int array_dim)
2118 {
2119 gfc_ss *ss;
2120 int n;
2121
2122 gcc_assert (array->nested_ss == NULL);
2123
2124 for (ss = array; ss; ss = ss->parent)
2125 for (n = 0; n < ss->loop->dimen; n++)
2126 if (array_dim == get_array_ref_dim_for_loop_dim (ss, n))
2127 return &(ss->loop->to[n]);
2128
2129 gcc_unreachable ();
2130 }
2131
2132
2133 static gfc_loopinfo *
2134 outermost_loop (gfc_loopinfo * loop)
2135 {
2136 while (loop->parent != NULL)
2137 loop = loop->parent;
2138
2139 return loop;
2140 }
2141
2142
2143 /* Array constructors are handled by constructing a temporary, then using that
2144 within the scalarization loop. This is not optimal, but seems by far the
2145 simplest method. */
2146
2147 static void
2148 trans_array_constructor (gfc_ss * ss, locus * where)
2149 {
2150 gfc_constructor_base c;
2151 tree offset;
2152 tree offsetvar;
2153 tree desc;
2154 tree type;
2155 tree tmp;
2156 tree *loop_ubound0;
2157 bool dynamic;
2158 bool old_first_len, old_typespec_chararray_ctor;
2159 tree old_first_len_val;
2160 gfc_loopinfo *loop, *outer_loop;
2161 gfc_ss_info *ss_info;
2162 gfc_expr *expr;
2163 gfc_ss *s;
2164
2165 /* Save the old values for nested checking. */
2166 old_first_len = first_len;
2167 old_first_len_val = first_len_val;
2168 old_typespec_chararray_ctor = typespec_chararray_ctor;
2169
2170 loop = ss->loop;
2171 outer_loop = outermost_loop (loop);
2172 ss_info = ss->info;
2173 expr = ss_info->expr;
2174
2175 /* Do bounds-checking here and in gfc_trans_array_ctor_element only if no
2176 typespec was given for the array constructor. */
2177 typespec_chararray_ctor = (expr->ts.u.cl
2178 && expr->ts.u.cl->length_from_typespec);
2179
2180 if ((gfc_option.rtcheck & GFC_RTCHECK_BOUNDS)
2181 && expr->ts.type == BT_CHARACTER && !typespec_chararray_ctor)
2182 {
2183 first_len_val = gfc_create_var (gfc_charlen_type_node, "len");
2184 first_len = true;
2185 }
2186
2187 gcc_assert (ss->dimen == ss->loop->dimen);
2188
2189 c = expr->value.constructor;
2190 if (expr->ts.type == BT_CHARACTER)
2191 {
2192 bool const_string;
2193
2194 /* get_array_ctor_strlen walks the elements of the constructor, if a
2195 typespec was given, we already know the string length and want the one
2196 specified there. */
2197 if (typespec_chararray_ctor && expr->ts.u.cl->length
2198 && expr->ts.u.cl->length->expr_type != EXPR_CONSTANT)
2199 {
2200 gfc_se length_se;
2201
2202 const_string = false;
2203 gfc_init_se (&length_se, NULL);
2204 gfc_conv_expr_type (&length_se, expr->ts.u.cl->length,
2205 gfc_charlen_type_node);
2206 ss_info->string_length = length_se.expr;
2207 gfc_add_block_to_block (&outer_loop->pre, &length_se.pre);
2208 gfc_add_block_to_block (&outer_loop->post, &length_se.post);
2209 }
2210 else
2211 const_string = get_array_ctor_strlen (&outer_loop->pre, c,
2212 &ss_info->string_length);
2213
2214 /* Complex character array constructors should have been taken care of
2215 and not end up here. */
2216 gcc_assert (ss_info->string_length);
2217
2218 expr->ts.u.cl->backend_decl = ss_info->string_length;
2219
2220 type = gfc_get_character_type_len (expr->ts.kind, ss_info->string_length);
2221 if (const_string)
2222 type = build_pointer_type (type);
2223 }
2224 else
2225 type = gfc_typenode_for_spec (&expr->ts);
2226
2227 /* See if the constructor determines the loop bounds. */
2228 dynamic = false;
2229
2230 loop_ubound0 = get_loop_upper_bound_for_array (ss, 0);
2231
2232 if (expr->shape && get_rank (loop) > 1 && *loop_ubound0 == NULL_TREE)
2233 {
2234 /* We have a multidimensional parameter. */
2235 for (s = ss; s; s = s->parent)
2236 {
2237 int n;
2238 for (n = 0; n < s->loop->dimen; n++)
2239 {
2240 s->loop->from[n] = gfc_index_zero_node;
2241 s->loop->to[n] = gfc_conv_mpz_to_tree (expr->shape[s->dim[n]],
2242 gfc_index_integer_kind);
2243 s->loop->to[n] = fold_build2_loc (input_location, MINUS_EXPR,
2244 gfc_array_index_type,
2245 s->loop->to[n],
2246 gfc_index_one_node);
2247 }
2248 }
2249 }
2250
2251 if (*loop_ubound0 == NULL_TREE)
2252 {
2253 mpz_t size;
2254
2255 /* We should have a 1-dimensional, zero-based loop. */
2256 gcc_assert (loop->parent == NULL && loop->nested == NULL);
2257 gcc_assert (loop->dimen == 1);
2258 gcc_assert (integer_zerop (loop->from[0]));
2259
2260 /* Split the constructor size into a static part and a dynamic part.
2261 Allocate the static size up-front and record whether the dynamic
2262 size might be nonzero. */
2263 mpz_init (size);
2264 dynamic = gfc_get_array_constructor_size (&size, c);
2265 mpz_sub_ui (size, size, 1);
2266 loop->to[0] = gfc_conv_mpz_to_tree (size, gfc_index_integer_kind);
2267 mpz_clear (size);
2268 }
2269
2270 /* Special case constant array constructors. */
2271 if (!dynamic)
2272 {
2273 unsigned HOST_WIDE_INT nelem = gfc_constant_array_constructor_p (c);
2274 if (nelem > 0)
2275 {
2276 tree size = constant_array_constructor_loop_size (loop);
2277 if (size && compare_tree_int (size, nelem) == 0)
2278 {
2279 trans_constant_array_constructor (ss, type);
2280 goto finish;
2281 }
2282 }
2283 }
2284
2285 if (TREE_CODE (*loop_ubound0) == VAR_DECL)
2286 dynamic = true;
2287
2288 gfc_trans_create_temp_array (&outer_loop->pre, &outer_loop->post, ss, type,
2289 NULL_TREE, dynamic, true, false, where);
2290
2291 desc = ss_info->data.array.descriptor;
2292 offset = gfc_index_zero_node;
2293 offsetvar = gfc_create_var_np (gfc_array_index_type, "offset");
2294 TREE_NO_WARNING (offsetvar) = 1;
2295 TREE_USED (offsetvar) = 0;
2296 gfc_trans_array_constructor_value (&outer_loop->pre, type, desc, c,
2297 &offset, &offsetvar, dynamic);
2298
2299 /* If the array grows dynamically, the upper bound of the loop variable
2300 is determined by the array's final upper bound. */
2301 if (dynamic)
2302 {
2303 tmp = fold_build2_loc (input_location, MINUS_EXPR,
2304 gfc_array_index_type,
2305 offsetvar, gfc_index_one_node);
2306 tmp = gfc_evaluate_now (tmp, &outer_loop->pre);
2307 gfc_conv_descriptor_ubound_set (&loop->pre, desc, gfc_rank_cst[0], tmp);
2308 if (*loop_ubound0 && TREE_CODE (*loop_ubound0) == VAR_DECL)
2309 gfc_add_modify (&outer_loop->pre, *loop_ubound0, tmp);
2310 else
2311 *loop_ubound0 = tmp;
2312 }
2313
2314 if (TREE_USED (offsetvar))
2315 pushdecl (offsetvar);
2316 else
2317 gcc_assert (INTEGER_CST_P (offset));
2318
2319 #if 0
2320 /* Disable bound checking for now because it's probably broken. */
2321 if (gfc_option.rtcheck & GFC_RTCHECK_BOUNDS)
2322 {
2323 gcc_unreachable ();
2324 }
2325 #endif
2326
2327 finish:
2328 /* Restore old values of globals. */
2329 first_len = old_first_len;
2330 first_len_val = old_first_len_val;
2331 typespec_chararray_ctor = old_typespec_chararray_ctor;
2332 }
2333
2334
2335 /* INFO describes a GFC_SS_SECTION in loop LOOP, and this function is
2336 called after evaluating all of INFO's vector dimensions. Go through
2337 each such vector dimension and see if we can now fill in any missing
2338 loop bounds. */
2339
2340 static void
2341 set_vector_loop_bounds (gfc_ss * ss)
2342 {
2343 gfc_loopinfo *loop, *outer_loop;
2344 gfc_array_info *info;
2345 gfc_se se;
2346 tree tmp;
2347 tree desc;
2348 tree zero;
2349 int n;
2350 int dim;
2351
2352 outer_loop = outermost_loop (ss->loop);
2353
2354 info = &ss->info->data.array;
2355
2356 for (; ss; ss = ss->parent)
2357 {
2358 loop = ss->loop;
2359
2360 for (n = 0; n < loop->dimen; n++)
2361 {
2362 dim = ss->dim[n];
2363 if (info->ref->u.ar.dimen_type[dim] != DIMEN_VECTOR
2364 || loop->to[n] != NULL)
2365 continue;
2366
2367 /* Loop variable N indexes vector dimension DIM, and we don't
2368 yet know the upper bound of loop variable N. Set it to the
2369 difference between the vector's upper and lower bounds. */
2370 gcc_assert (loop->from[n] == gfc_index_zero_node);
2371 gcc_assert (info->subscript[dim]
2372 && info->subscript[dim]->info->type == GFC_SS_VECTOR);
2373
2374 gfc_init_se (&se, NULL);
2375 desc = info->subscript[dim]->info->data.array.descriptor;
2376 zero = gfc_rank_cst[0];
2377 tmp = fold_build2_loc (input_location, MINUS_EXPR,
2378 gfc_array_index_type,
2379 gfc_conv_descriptor_ubound_get (desc, zero),
2380 gfc_conv_descriptor_lbound_get (desc, zero));
2381 tmp = gfc_evaluate_now (tmp, &outer_loop->pre);
2382 loop->to[n] = tmp;
2383 }
2384 }
2385 }
2386
2387
2388 /* Add the pre and post chains for all the scalar expressions in a SS chain
2389 to loop. This is called after the loop parameters have been calculated,
2390 but before the actual scalarizing loops. */
2391
2392 static void
2393 gfc_add_loop_ss_code (gfc_loopinfo * loop, gfc_ss * ss, bool subscript,
2394 locus * where)
2395 {
2396 gfc_loopinfo *nested_loop, *outer_loop;
2397 gfc_se se;
2398 gfc_ss_info *ss_info;
2399 gfc_array_info *info;
2400 gfc_expr *expr;
2401 bool skip_nested = false;
2402 int n;
2403
2404 outer_loop = outermost_loop (loop);
2405
2406 /* TODO: This can generate bad code if there are ordering dependencies,
2407 e.g., a callee allocated function and an unknown size constructor. */
2408 gcc_assert (ss != NULL);
2409
2410 for (; ss != gfc_ss_terminator; ss = ss->loop_chain)
2411 {
2412 gcc_assert (ss);
2413
2414 /* Cross loop arrays are handled from within the most nested loop. */
2415 if (ss->nested_ss != NULL)
2416 continue;
2417
2418 ss_info = ss->info;
2419 expr = ss_info->expr;
2420 info = &ss_info->data.array;
2421
2422 switch (ss_info->type)
2423 {
2424 case GFC_SS_SCALAR:
2425 /* Scalar expression. Evaluate this now. This includes elemental
2426 dimension indices, but not array section bounds. */
2427 gfc_init_se (&se, NULL);
2428 gfc_conv_expr (&se, expr);
2429 gfc_add_block_to_block (&outer_loop->pre, &se.pre);
2430
2431 if (expr->ts.type != BT_CHARACTER)
2432 {
2433 /* Move the evaluation of scalar expressions outside the
2434 scalarization loop, except for WHERE assignments. */
2435 if (subscript)
2436 se.expr = convert(gfc_array_index_type, se.expr);
2437 if (!ss_info->where)
2438 se.expr = gfc_evaluate_now (se.expr, &outer_loop->pre);
2439 gfc_add_block_to_block (&outer_loop->pre, &se.post);
2440 }
2441 else
2442 gfc_add_block_to_block (&outer_loop->post, &se.post);
2443
2444 ss_info->data.scalar.value = se.expr;
2445 ss_info->string_length = se.string_length;
2446 break;
2447
2448 case GFC_SS_REFERENCE:
2449 /* Scalar argument to elemental procedure. */
2450 gfc_init_se (&se, NULL);
2451 if (ss_info->data.scalar.can_be_null_ref)
2452 {
2453 /* If the actual argument can be absent (in other words, it can
2454 be a NULL reference), don't try to evaluate it; pass instead
2455 the reference directly. */
2456 gfc_conv_expr_reference (&se, expr);
2457 }
2458 else
2459 {
2460 /* Otherwise, evaluate the argument outside the loop and pass
2461 a reference to the value. */
2462 gfc_conv_expr (&se, expr);
2463 }
2464 gfc_add_block_to_block (&outer_loop->pre, &se.pre);
2465 gfc_add_block_to_block (&outer_loop->post, &se.post);
2466 if (gfc_is_class_scalar_expr (expr))
2467 /* This is necessary because the dynamic type will always be
2468 large than the declared type. In consequence, assigning
2469 the value to a temporary could segfault.
2470 OOP-TODO: see if this is generally correct or is the value
2471 has to be written to an allocated temporary, whose address
2472 is passed via ss_info. */
2473 ss_info->data.scalar.value = se.expr;
2474 else
2475 ss_info->data.scalar.value = gfc_evaluate_now (se.expr,
2476 &outer_loop->pre);
2477
2478 ss_info->string_length = se.string_length;
2479 break;
2480
2481 case GFC_SS_SECTION:
2482 /* Add the expressions for scalar and vector subscripts. */
2483 for (n = 0; n < GFC_MAX_DIMENSIONS; n++)
2484 if (info->subscript[n])
2485 {
2486 gfc_add_loop_ss_code (loop, info->subscript[n], true, where);
2487 /* The recursive call will have taken care of the nested loops.
2488 No need to do it twice. */
2489 skip_nested = true;
2490 }
2491
2492 set_vector_loop_bounds (ss);
2493 break;
2494
2495 case GFC_SS_VECTOR:
2496 /* Get the vector's descriptor and store it in SS. */
2497 gfc_init_se (&se, NULL);
2498 gfc_conv_expr_descriptor (&se, expr, gfc_walk_expr (expr));
2499 gfc_add_block_to_block (&outer_loop->pre, &se.pre);
2500 gfc_add_block_to_block (&outer_loop->post, &se.post);
2501 info->descriptor = se.expr;
2502 break;
2503
2504 case GFC_SS_INTRINSIC:
2505 gfc_add_intrinsic_ss_code (loop, ss);
2506 break;
2507
2508 case GFC_SS_FUNCTION:
2509 /* Array function return value. We call the function and save its
2510 result in a temporary for use inside the loop. */
2511 gfc_init_se (&se, NULL);
2512 se.loop = loop;
2513 se.ss = ss;
2514 gfc_conv_expr (&se, expr);
2515 gfc_add_block_to_block (&outer_loop->pre, &se.pre);
2516 gfc_add_block_to_block (&outer_loop->post, &se.post);
2517 ss_info->string_length = se.string_length;
2518 break;
2519
2520 case GFC_SS_CONSTRUCTOR:
2521 if (expr->ts.type == BT_CHARACTER
2522 && ss_info->string_length == NULL
2523 && expr->ts.u.cl
2524 && expr->ts.u.cl->length)
2525 {
2526 gfc_init_se (&se, NULL);
2527 gfc_conv_expr_type (&se, expr->ts.u.cl->length,
2528 gfc_charlen_type_node);
2529 ss_info->string_length = se.expr;
2530 gfc_add_block_to_block (&outer_loop->pre, &se.pre);
2531 gfc_add_block_to_block (&outer_loop->post, &se.post);
2532 }
2533 trans_array_constructor (ss, where);
2534 break;
2535
2536 case GFC_SS_TEMP:
2537 case GFC_SS_COMPONENT:
2538 /* Do nothing. These are handled elsewhere. */
2539 break;
2540
2541 default:
2542 gcc_unreachable ();
2543 }
2544 }
2545
2546 if (!skip_nested)
2547 for (nested_loop = loop->nested; nested_loop;
2548 nested_loop = nested_loop->next)
2549 gfc_add_loop_ss_code (nested_loop, nested_loop->ss, subscript, where);
2550 }
2551
2552
2553 /* Translate expressions for the descriptor and data pointer of a SS. */
2554 /*GCC ARRAYS*/
2555
2556 static void
2557 gfc_conv_ss_descriptor (stmtblock_t * block, gfc_ss * ss, int base)
2558 {
2559 gfc_se se;
2560 gfc_ss_info *ss_info;
2561 gfc_array_info *info;
2562 tree tmp;
2563
2564 ss_info = ss->info;
2565 info = &ss_info->data.array;
2566
2567 /* Get the descriptor for the array to be scalarized. */
2568 gcc_assert (ss_info->expr->expr_type == EXPR_VARIABLE);
2569 gfc_init_se (&se, NULL);
2570 se.descriptor_only = 1;
2571 gfc_conv_expr_lhs (&se, ss_info->expr);
2572 gfc_add_block_to_block (block, &se.pre);
2573 info->descriptor = se.expr;
2574 ss_info->string_length = se.string_length;
2575
2576 if (base)
2577 {
2578 /* Also the data pointer. */
2579 tmp = gfc_conv_array_data (se.expr);
2580 /* If this is a variable or address of a variable we use it directly.
2581 Otherwise we must evaluate it now to avoid breaking dependency
2582 analysis by pulling the expressions for elemental array indices
2583 inside the loop. */
2584 if (!(DECL_P (tmp)
2585 || (TREE_CODE (tmp) == ADDR_EXPR
2586 && DECL_P (TREE_OPERAND (tmp, 0)))))
2587 tmp = gfc_evaluate_now (tmp, block);
2588 info->data = tmp;
2589
2590 tmp = gfc_conv_array_offset (se.expr);
2591 info->offset = gfc_evaluate_now (tmp, block);
2592
2593 /* Make absolutely sure that the saved_offset is indeed saved
2594 so that the variable is still accessible after the loops
2595 are translated. */
2596 info->saved_offset = info->offset;
2597 }
2598 }
2599
2600
2601 /* Initialize a gfc_loopinfo structure. */
2602
2603 void
2604 gfc_init_loopinfo (gfc_loopinfo * loop)
2605 {
2606 int n;
2607
2608 memset (loop, 0, sizeof (gfc_loopinfo));
2609 gfc_init_block (&loop->pre);
2610 gfc_init_block (&loop->post);
2611
2612 /* Initially scalarize in order and default to no loop reversal. */
2613 for (n = 0; n < GFC_MAX_DIMENSIONS; n++)
2614 {
2615 loop->order[n] = n;
2616 loop->reverse[n] = GFC_INHIBIT_REVERSE;
2617 }
2618
2619 loop->ss = gfc_ss_terminator;
2620 }
2621
2622
2623 /* Copies the loop variable info to a gfc_se structure. Does not copy the SS
2624 chain. */
2625
2626 void
2627 gfc_copy_loopinfo_to_se (gfc_se * se, gfc_loopinfo * loop)
2628 {
2629 se->loop = loop;
2630 }
2631
2632
2633 /* Return an expression for the data pointer of an array. */
2634
2635 tree
2636 gfc_conv_array_data (tree descriptor)
2637 {
2638 tree type;
2639
2640 type = TREE_TYPE (descriptor);
2641 if (GFC_ARRAY_TYPE_P (type))
2642 {
2643 if (TREE_CODE (type) == POINTER_TYPE)
2644 return descriptor;
2645 else
2646 {
2647 /* Descriptorless arrays. */
2648 return gfc_build_addr_expr (NULL_TREE, descriptor);
2649 }
2650 }
2651 else
2652 return gfc_conv_descriptor_data_get (descriptor);
2653 }
2654
2655
2656 /* Return an expression for the base offset of an array. */
2657
2658 tree
2659 gfc_conv_array_offset (tree descriptor)
2660 {
2661 tree type;
2662
2663 type = TREE_TYPE (descriptor);
2664 if (GFC_ARRAY_TYPE_P (type))
2665 return GFC_TYPE_ARRAY_OFFSET (type);
2666 else
2667 return gfc_conv_descriptor_offset_get (descriptor);
2668 }
2669
2670
2671 /* Get an expression for the array stride. */
2672
2673 tree
2674 gfc_conv_array_stride (tree descriptor, int dim)
2675 {
2676 tree tmp;
2677 tree type;
2678
2679 type = TREE_TYPE (descriptor);
2680
2681 /* For descriptorless arrays use the array size. */
2682 tmp = GFC_TYPE_ARRAY_STRIDE (type, dim);
2683 if (tmp != NULL_TREE)
2684 return tmp;
2685
2686 tmp = gfc_conv_descriptor_stride_get (descriptor, gfc_rank_cst[dim]);
2687 return tmp;
2688 }
2689
2690
2691 /* Like gfc_conv_array_stride, but for the lower bound. */
2692
2693 tree
2694 gfc_conv_array_lbound (tree descriptor, int dim)
2695 {
2696 tree tmp;
2697 tree type;
2698
2699 type = TREE_TYPE (descriptor);
2700
2701 tmp = GFC_TYPE_ARRAY_LBOUND (type, dim);
2702 if (tmp != NULL_TREE)
2703 return tmp;
2704
2705 tmp = gfc_conv_descriptor_lbound_get (descriptor, gfc_rank_cst[dim]);
2706 return tmp;
2707 }
2708
2709
2710 /* Like gfc_conv_array_stride, but for the upper bound. */
2711
2712 tree
2713 gfc_conv_array_ubound (tree descriptor, int dim)
2714 {
2715 tree tmp;
2716 tree type;
2717
2718 type = TREE_TYPE (descriptor);
2719
2720 tmp = GFC_TYPE_ARRAY_UBOUND (type, dim);
2721 if (tmp != NULL_TREE)
2722 return tmp;
2723
2724 /* This should only ever happen when passing an assumed shape array
2725 as an actual parameter. The value will never be used. */
2726 if (GFC_ARRAY_TYPE_P (TREE_TYPE (descriptor)))
2727 return gfc_index_zero_node;
2728
2729 tmp = gfc_conv_descriptor_ubound_get (descriptor, gfc_rank_cst[dim]);
2730 return tmp;
2731 }
2732
2733
2734 /* Generate code to perform an array index bound check. */
2735
2736 static tree
2737 trans_array_bound_check (gfc_se * se, gfc_ss *ss, tree index, int n,
2738 locus * where, bool check_upper)
2739 {
2740 tree fault;
2741 tree tmp_lo, tmp_up;
2742 tree descriptor;
2743 char *msg;
2744 const char * name = NULL;
2745
2746 if (!(gfc_option.rtcheck & GFC_RTCHECK_BOUNDS))
2747 return index;
2748
2749 descriptor = ss->info->data.array.descriptor;
2750
2751 index = gfc_evaluate_now (index, &se->pre);
2752
2753 /* We find a name for the error message. */
2754 name = ss->info->expr->symtree->n.sym->name;
2755 gcc_assert (name != NULL);
2756
2757 if (TREE_CODE (descriptor) == VAR_DECL)
2758 name = IDENTIFIER_POINTER (DECL_NAME (descriptor));
2759
2760 /* If upper bound is present, include both bounds in the error message. */
2761 if (check_upper)
2762 {
2763 tmp_lo = gfc_conv_array_lbound (descriptor, n);
2764 tmp_up = gfc_conv_array_ubound (descriptor, n);
2765
2766 if (name)
2767 asprintf (&msg, "Index '%%ld' of dimension %d of array '%s' "
2768 "outside of expected range (%%ld:%%ld)", n+1, name);
2769 else
2770 asprintf (&msg, "Index '%%ld' of dimension %d "
2771 "outside of expected range (%%ld:%%ld)", n+1);
2772
2773 fault = fold_build2_loc (input_location, LT_EXPR, boolean_type_node,
2774 index, tmp_lo);
2775 gfc_trans_runtime_check (true, false, fault, &se->pre, where, msg,
2776 fold_convert (long_integer_type_node, index),
2777 fold_convert (long_integer_type_node, tmp_lo),
2778 fold_convert (long_integer_type_node, tmp_up));
2779 fault = fold_build2_loc (input_location, GT_EXPR, boolean_type_node,
2780 index, tmp_up);
2781 gfc_trans_runtime_check (true, false, fault, &se->pre, where, msg,
2782 fold_convert (long_integer_type_node, index),
2783 fold_convert (long_integer_type_node, tmp_lo),
2784 fold_convert (long_integer_type_node, tmp_up));
2785 free (msg);
2786 }
2787 else
2788 {
2789 tmp_lo = gfc_conv_array_lbound (descriptor, n);
2790
2791 if (name)
2792 asprintf (&msg, "Index '%%ld' of dimension %d of array '%s' "
2793 "below lower bound of %%ld", n+1, name);
2794 else
2795 asprintf (&msg, "Index '%%ld' of dimension %d "
2796 "below lower bound of %%ld", n+1);
2797
2798 fault = fold_build2_loc (input_location, LT_EXPR, boolean_type_node,
2799 index, tmp_lo);
2800 gfc_trans_runtime_check (true, false, fault, &se->pre, where, msg,
2801 fold_convert (long_integer_type_node, index),
2802 fold_convert (long_integer_type_node, tmp_lo));
2803 free (msg);
2804 }
2805
2806 return index;
2807 }
2808
2809
2810 /* Return the offset for an index. Performs bound checking for elemental
2811 dimensions. Single element references are processed separately.
2812 DIM is the array dimension, I is the loop dimension. */
2813
2814 static tree
2815 conv_array_index_offset (gfc_se * se, gfc_ss * ss, int dim, int i,
2816 gfc_array_ref * ar, tree stride)
2817 {
2818 gfc_array_info *info;
2819 tree index;
2820 tree desc;
2821 tree data;
2822
2823 info = &ss->info->data.array;
2824
2825 /* Get the index into the array for this dimension. */
2826 if (ar)
2827 {
2828 gcc_assert (ar->type != AR_ELEMENT);
2829 switch (ar->dimen_type[dim])
2830 {
2831 case DIMEN_THIS_IMAGE:
2832 gcc_unreachable ();
2833 break;
2834 case DIMEN_ELEMENT:
2835 /* Elemental dimension. */
2836 gcc_assert (info->subscript[dim]
2837 && info->subscript[dim]->info->type == GFC_SS_SCALAR);
2838 /* We've already translated this value outside the loop. */
2839 index = info->subscript[dim]->info->data.scalar.value;
2840
2841 index = trans_array_bound_check (se, ss, index, dim, &ar->where,
2842 ar->as->type != AS_ASSUMED_SIZE
2843 || dim < ar->dimen - 1);
2844 break;
2845
2846 case DIMEN_VECTOR:
2847 gcc_assert (info && se->loop);
2848 gcc_assert (info->subscript[dim]
2849 && info->subscript[dim]->info->type == GFC_SS_VECTOR);
2850 desc = info->subscript[dim]->info->data.array.descriptor;
2851
2852 /* Get a zero-based index into the vector. */
2853 index = fold_build2_loc (input_location, MINUS_EXPR,
2854 gfc_array_index_type,
2855 se->loop->loopvar[i], se->loop->from[i]);
2856
2857 /* Multiply the index by the stride. */
2858 index = fold_build2_loc (input_location, MULT_EXPR,
2859 gfc_array_index_type,
2860 index, gfc_conv_array_stride (desc, 0));
2861
2862 /* Read the vector to get an index into info->descriptor. */
2863 data = build_fold_indirect_ref_loc (input_location,
2864 gfc_conv_array_data (desc));
2865 index = gfc_build_array_ref (data, index, NULL);
2866 index = gfc_evaluate_now (index, &se->pre);
2867 index = fold_convert (gfc_array_index_type, index);
2868
2869 /* Do any bounds checking on the final info->descriptor index. */
2870 index = trans_array_bound_check (se, ss, index, dim, &ar->where,
2871 ar->as->type != AS_ASSUMED_SIZE
2872 || dim < ar->dimen - 1);
2873 break;
2874
2875 case DIMEN_RANGE:
2876 /* Scalarized dimension. */
2877 gcc_assert (info && se->loop);
2878
2879 /* Multiply the loop variable by the stride and delta. */
2880 index = se->loop->loopvar[i];
2881 if (!integer_onep (info->stride[dim]))
2882 index = fold_build2_loc (input_location, MULT_EXPR,
2883 gfc_array_index_type, index,
2884 info->stride[dim]);
2885 if (!integer_zerop (info->delta[dim]))
2886 index = fold_build2_loc (input_location, PLUS_EXPR,
2887 gfc_array_index_type, index,
2888 info->delta[dim]);
2889 break;
2890
2891 default:
2892 gcc_unreachable ();
2893 }
2894 }
2895 else
2896 {
2897 /* Temporary array or derived type component. */
2898 gcc_assert (se->loop);
2899 index = se->loop->loopvar[se->loop->order[i]];
2900
2901 /* Pointer functions can have stride[0] different from unity.
2902 Use the stride returned by the function call and stored in
2903 the descriptor for the temporary. */
2904 if (se->ss && se->ss->info->type == GFC_SS_FUNCTION
2905 && se->ss->info->expr
2906 && se->ss->info->expr->symtree
2907 && se->ss->info->expr->symtree->n.sym->result
2908 && se->ss->info->expr->symtree->n.sym->result->attr.pointer)
2909 stride = gfc_conv_descriptor_stride_get (info->descriptor,
2910 gfc_rank_cst[dim]);
2911
2912 if (!integer_zerop (info->delta[dim]))
2913 index = fold_build2_loc (input_location, PLUS_EXPR,
2914 gfc_array_index_type, index, info->delta[dim]);
2915 }
2916
2917 /* Multiply by the stride. */
2918 if (!integer_onep (stride))
2919 index = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
2920 index, stride);
2921
2922 return index;
2923 }
2924
2925
2926 /* Build a scalarized array reference using the vptr 'size'. */
2927
2928 static bool
2929 build_class_array_ref (gfc_se *se, tree base, tree index)
2930 {
2931 tree type;
2932 tree size;
2933 tree offset;
2934 tree decl;
2935 tree tmp;
2936 gfc_expr *expr = se->ss->info->expr;
2937 gfc_ref *ref;
2938 gfc_ref *class_ref;
2939 gfc_typespec *ts;
2940
2941 if (expr == NULL || expr->ts.type != BT_CLASS)
2942 return false;
2943
2944 if (expr->symtree && expr->symtree->n.sym->ts.type == BT_CLASS)
2945 ts = &expr->symtree->n.sym->ts;
2946 else
2947 ts = NULL;
2948 class_ref = NULL;
2949
2950 for (ref = expr->ref; ref; ref = ref->next)
2951 {
2952 if (ref->type == REF_COMPONENT
2953 && ref->u.c.component->ts.type == BT_CLASS
2954 && ref->next && ref->next->type == REF_COMPONENT
2955 && strcmp (ref->next->u.c.component->name, "_data") == 0
2956 && ref->next->next
2957 && ref->next->next->type == REF_ARRAY
2958 && ref->next->next->u.ar.type != AR_ELEMENT)
2959 {
2960 ts = &ref->u.c.component->ts;
2961 class_ref = ref;
2962 break;
2963 }
2964 }
2965
2966 if (ts == NULL)
2967 return false;
2968
2969 if (class_ref == NULL)
2970 decl = expr->symtree->n.sym->backend_decl;
2971 else
2972 {
2973 /* Remove everything after the last class reference, convert the
2974 expression and then recover its tailend once more. */
2975 gfc_se tmpse;
2976 ref = class_ref->next;
2977 class_ref->next = NULL;
2978 gfc_init_se (&tmpse, NULL);
2979 gfc_conv_expr (&tmpse, expr);
2980 decl = tmpse.expr;
2981 class_ref->next = ref;
2982 }
2983
2984 size = gfc_vtable_size_get (decl);
2985
2986 /* Build the address of the element. */
2987 type = TREE_TYPE (TREE_TYPE (base));
2988 size = fold_convert (TREE_TYPE (index), size);
2989 offset = fold_build2_loc (input_location, MULT_EXPR,
2990 gfc_array_index_type,
2991 index, size);
2992 tmp = gfc_build_addr_expr (pvoid_type_node, base);
2993 tmp = fold_build_pointer_plus_loc (input_location, tmp, offset);
2994 tmp = fold_convert (build_pointer_type (type), tmp);
2995
2996 /* Return the element in the se expression. */
2997 se->expr = build_fold_indirect_ref_loc (input_location, tmp);
2998 return true;
2999 }
3000
3001
3002 /* Build a scalarized reference to an array. */
3003
3004 static void
3005 gfc_conv_scalarized_array_ref (gfc_se * se, gfc_array_ref * ar)
3006 {
3007 gfc_array_info *info;
3008 tree decl = NULL_TREE;
3009 tree index;
3010 tree tmp;
3011 gfc_ss *ss;
3012 gfc_expr *expr;
3013 int n;
3014
3015 ss = se->ss;
3016 expr = ss->info->expr;
3017 info = &ss->info->data.array;
3018 if (ar)
3019 n = se->loop->order[0];
3020 else
3021 n = 0;
3022
3023 index = conv_array_index_offset (se, ss, ss->dim[n], n, ar, info->stride0);
3024 /* Add the offset for this dimension to the stored offset for all other
3025 dimensions. */
3026 if (!integer_zerop (info->offset))
3027 index = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
3028 index, info->offset);
3029
3030 if (expr && is_subref_array (expr))
3031 decl = expr->symtree->n.sym->backend_decl;
3032
3033 tmp = build_fold_indirect_ref_loc (input_location, info->data);
3034
3035 /* Use the vptr 'size' field to access a class the element of a class
3036 array. */
3037 if (build_class_array_ref (se, tmp, index))
3038 return;
3039
3040 se->expr = gfc_build_array_ref (tmp, index, decl);
3041 }
3042
3043
3044 /* Translate access of temporary array. */
3045
3046 void
3047 gfc_conv_tmp_array_ref (gfc_se * se)
3048 {
3049 se->string_length = se->ss->info->string_length;
3050 gfc_conv_scalarized_array_ref (se, NULL);
3051 gfc_advance_se_ss_chain (se);
3052 }
3053
3054 /* Add T to the offset pair *OFFSET, *CST_OFFSET. */
3055
3056 static void
3057 add_to_offset (tree *cst_offset, tree *offset, tree t)
3058 {
3059 if (TREE_CODE (t) == INTEGER_CST)
3060 *cst_offset = int_const_binop (PLUS_EXPR, *cst_offset, t);
3061 else
3062 {
3063 if (!integer_zerop (*offset))
3064 *offset = fold_build2_loc (input_location, PLUS_EXPR,
3065 gfc_array_index_type, *offset, t);
3066 else
3067 *offset = t;
3068 }
3069 }
3070
3071 /* Build an array reference. se->expr already holds the array descriptor.
3072 This should be either a variable, indirect variable reference or component
3073 reference. For arrays which do not have a descriptor, se->expr will be
3074 the data pointer.
3075 a(i, j, k) = base[offset + i * stride[0] + j * stride[1] + k * stride[2]]*/
3076
3077 void
3078 gfc_conv_array_ref (gfc_se * se, gfc_array_ref * ar, gfc_symbol * sym,
3079 locus * where)
3080 {
3081 int n;
3082 tree offset, cst_offset;
3083 tree tmp;
3084 tree stride;
3085 gfc_se indexse;
3086 gfc_se tmpse;
3087
3088 if (ar->dimen == 0)
3089 {
3090 gcc_assert (ar->codimen);
3091
3092 if (GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (se->expr)))
3093 se->expr = build_fold_indirect_ref (gfc_conv_array_data (se->expr));
3094 else
3095 {
3096 if (GFC_ARRAY_TYPE_P (TREE_TYPE (se->expr))
3097 && TREE_CODE (TREE_TYPE (se->expr)) == POINTER_TYPE)
3098 se->expr = build_fold_indirect_ref_loc (input_location, se->expr);
3099
3100 /* Use the actual tree type and not the wrapped coarray. */
3101 if (!se->want_pointer)
3102 se->expr = fold_convert (TYPE_MAIN_VARIANT (TREE_TYPE (se->expr)),
3103 se->expr);
3104 }
3105
3106 return;
3107 }
3108
3109 /* Handle scalarized references separately. */
3110 if (ar->type != AR_ELEMENT)
3111 {
3112 gfc_conv_scalarized_array_ref (se, ar);
3113 gfc_advance_se_ss_chain (se);
3114 return;
3115 }
3116
3117 cst_offset = offset = gfc_index_zero_node;
3118 add_to_offset (&cst_offset, &offset, gfc_conv_array_offset (se->expr));
3119
3120 /* Calculate the offsets from all the dimensions. Make sure to associate
3121 the final offset so that we form a chain of loop invariant summands. */
3122 for (n = ar->dimen - 1; n >= 0; n--)
3123 {
3124 /* Calculate the index for this dimension. */
3125 gfc_init_se (&indexse, se);
3126 gfc_conv_expr_type (&indexse, ar->start[n], gfc_array_index_type);
3127 gfc_add_block_to_block (&se->pre, &indexse.pre);
3128
3129 if (gfc_option.rtcheck & GFC_RTCHECK_BOUNDS)
3130 {
3131 /* Check array bounds. */
3132 tree cond;
3133 char *msg;
3134
3135 /* Evaluate the indexse.expr only once. */
3136 indexse.expr = save_expr (indexse.expr);
3137
3138 /* Lower bound. */
3139 tmp = gfc_conv_array_lbound (se->expr, n);
3140 if (sym->attr.temporary)
3141 {
3142 gfc_init_se (&tmpse, se);
3143 gfc_conv_expr_type (&tmpse, ar->as->lower[n],
3144 gfc_array_index_type);
3145 gfc_add_block_to_block (&se->pre, &tmpse.pre);
3146 tmp = tmpse.expr;
3147 }
3148
3149 cond = fold_build2_loc (input_location, LT_EXPR, boolean_type_node,
3150 indexse.expr, tmp);
3151 asprintf (&msg, "Index '%%ld' of dimension %d of array '%s' "
3152 "below lower bound of %%ld", n+1, sym->name);
3153 gfc_trans_runtime_check (true, false, cond, &se->pre, where, msg,
3154 fold_convert (long_integer_type_node,
3155 indexse.expr),
3156 fold_convert (long_integer_type_node, tmp));
3157 free (msg);
3158
3159 /* Upper bound, but not for the last dimension of assumed-size
3160 arrays. */
3161 if (n < ar->dimen - 1 || ar->as->type != AS_ASSUMED_SIZE)
3162 {
3163 tmp = gfc_conv_array_ubound (se->expr, n);
3164 if (sym->attr.temporary)
3165 {
3166 gfc_init_se (&tmpse, se);
3167 gfc_conv_expr_type (&tmpse, ar->as->upper[n],
3168 gfc_array_index_type);
3169 gfc_add_block_to_block (&se->pre, &tmpse.pre);
3170 tmp = tmpse.expr;
3171 }
3172
3173 cond = fold_build2_loc (input_location, GT_EXPR,
3174 boolean_type_node, indexse.expr, tmp);
3175 asprintf (&msg, "Index '%%ld' of dimension %d of array '%s' "
3176 "above upper bound of %%ld", n+1, sym->name);
3177 gfc_trans_runtime_check (true, false, cond, &se->pre, where, msg,
3178 fold_convert (long_integer_type_node,
3179 indexse.expr),
3180 fold_convert (long_integer_type_node, tmp));
3181 free (msg);
3182 }
3183 }
3184
3185 /* Multiply the index by the stride. */
3186 stride = gfc_conv_array_stride (se->expr, n);
3187 tmp = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
3188 indexse.expr, stride);
3189
3190 /* And add it to the total. */
3191 add_to_offset (&cst_offset, &offset, tmp);
3192 }
3193
3194 if (!integer_zerop (cst_offset))
3195 offset = fold_build2_loc (input_location, PLUS_EXPR,
3196 gfc_array_index_type, offset, cst_offset);
3197
3198 /* Access the calculated element. */
3199 tmp = gfc_conv_array_data (se->expr);
3200 tmp = build_fold_indirect_ref (tmp);
3201 se->expr = gfc_build_array_ref (tmp, offset, sym->backend_decl);
3202 }
3203
3204
3205 /* Add the offset corresponding to array's ARRAY_DIM dimension and loop's
3206 LOOP_DIM dimension (if any) to array's offset. */
3207
3208 static void
3209 add_array_offset (stmtblock_t *pblock, gfc_loopinfo *loop, gfc_ss *ss,
3210 gfc_array_ref *ar, int array_dim, int loop_dim)
3211 {
3212 gfc_se se;
3213 gfc_array_info *info;
3214 tree stride, index;
3215
3216 info = &ss->info->data.array;
3217
3218 gfc_init_se (&se, NULL);
3219 se.loop = loop;
3220 se.expr = info->descriptor;
3221 stride = gfc_conv_array_stride (info->descriptor, array_dim);
3222 index = conv_array_index_offset (&se, ss, array_dim, loop_dim, ar, stride);
3223 gfc_add_block_to_block (pblock, &se.pre);
3224
3225 info->offset = fold_build2_loc (input_location, PLUS_EXPR,
3226 gfc_array_index_type,
3227 info->offset, index);
3228 info->offset = gfc_evaluate_now (info->offset, pblock);
3229 }
3230
3231
3232 /* Generate the code to be executed immediately before entering a
3233 scalarization loop. */
3234
3235 static void
3236 gfc_trans_preloop_setup (gfc_loopinfo * loop, int dim, int flag,
3237 stmtblock_t * pblock)
3238 {
3239 tree stride;
3240 gfc_ss_info *ss_info;
3241 gfc_array_info *info;
3242 gfc_ss_type ss_type;
3243 gfc_ss *ss, *pss;
3244 gfc_loopinfo *ploop;
3245 gfc_array_ref *ar;
3246 int i;
3247
3248 /* This code will be executed before entering the scalarization loop
3249 for this dimension. */
3250 for (ss = loop->ss; ss != gfc_ss_terminator; ss = ss->loop_chain)
3251 {
3252 ss_info = ss->info;
3253
3254 if ((ss_info->useflags & flag) == 0)
3255 continue;
3256
3257 ss_type = ss_info->type;
3258 if (ss_type != GFC_SS_SECTION
3259 && ss_type != GFC_SS_FUNCTION
3260 && ss_type != GFC_SS_CONSTRUCTOR
3261 && ss_type != GFC_SS_COMPONENT)
3262 continue;
3263
3264 info = &ss_info->data.array;
3265
3266 gcc_assert (dim < ss->dimen);
3267 gcc_assert (ss->dimen == loop->dimen);
3268
3269 if (info->ref)
3270 ar = &info->ref->u.ar;
3271 else
3272 ar = NULL;
3273
3274 if (dim == loop->dimen - 1 && loop->parent != NULL)
3275 {
3276 /* If we are in the outermost dimension of this loop, the previous
3277 dimension shall be in the parent loop. */
3278 gcc_assert (ss->parent != NULL);
3279
3280 pss = ss->parent;
3281 ploop = loop->parent;
3282
3283 /* ss and ss->parent are about the same array. */
3284 gcc_assert (ss_info == pss->info);
3285 }
3286 else
3287 {
3288 ploop = loop;
3289 pss = ss;
3290 }
3291
3292 if (dim == loop->dimen - 1)
3293 i = 0;
3294 else
3295 i = dim + 1;
3296
3297 /* For the time being, there is no loop reordering. */
3298 gcc_assert (i == ploop->order[i]);
3299 i = ploop->order[i];
3300
3301 if (dim == loop->dimen - 1 && loop->parent == NULL)
3302 {
3303 stride = gfc_conv_array_stride (info->descriptor,
3304 innermost_ss (ss)->dim[i]);
3305
3306 /* Calculate the stride of the innermost loop. Hopefully this will
3307 allow the backend optimizers to do their stuff more effectively.
3308 */
3309 info->stride0 = gfc_evaluate_now (stride, pblock);
3310
3311 /* For the outermost loop calculate the offset due to any
3312 elemental dimensions. It will have been initialized with the
3313 base offset of the array. */
3314 if (info->ref)
3315 {
3316 for (i = 0; i < ar->dimen; i++)
3317 {
3318 if (ar->dimen_type[i] != DIMEN_ELEMENT)
3319 continue;
3320
3321 add_array_offset (pblock, loop, ss, ar, i, /* unused */ -1);
3322 }
3323 }
3324 }
3325 else
3326 /* Add the offset for the previous loop dimension. */
3327 add_array_offset (pblock, ploop, ss, ar, pss->dim[i], i);
3328
3329 /* Remember this offset for the second loop. */
3330 if (dim == loop->temp_dim - 1 && loop->parent == NULL)
3331 info->saved_offset = info->offset;
3332 }
3333 }
3334
3335
3336 /* Start a scalarized expression. Creates a scope and declares loop
3337 variables. */
3338
3339 void
3340 gfc_start_scalarized_body (gfc_loopinfo * loop, stmtblock_t * pbody)
3341 {
3342 int dim;
3343 int n;
3344 int flags;
3345
3346 gcc_assert (!loop->array_parameter);
3347
3348 for (dim = loop->dimen - 1; dim >= 0; dim--)
3349 {
3350 n = loop->order[dim];
3351
3352 gfc_start_block (&loop->code[n]);
3353
3354 /* Create the loop variable. */
3355 loop->loopvar[n] = gfc_create_var (gfc_array_index_type, "S");
3356
3357 if (dim < loop->temp_dim)
3358 flags = 3;
3359 else
3360 flags = 1;
3361 /* Calculate values that will be constant within this loop. */
3362 gfc_trans_preloop_setup (loop, dim, flags, &loop->code[n]);
3363 }
3364 gfc_start_block (pbody);
3365 }
3366
3367
3368 /* Generates the actual loop code for a scalarization loop. */
3369
3370 void
3371 gfc_trans_scalarized_loop_end (gfc_loopinfo * loop, int n,
3372 stmtblock_t * pbody)
3373 {
3374 stmtblock_t block;
3375 tree cond;
3376 tree tmp;
3377 tree loopbody;
3378 tree exit_label;
3379 tree stmt;
3380 tree init;
3381 tree incr;
3382
3383 if ((ompws_flags & (OMPWS_WORKSHARE_FLAG | OMPWS_SCALARIZER_WS))
3384 == (OMPWS_WORKSHARE_FLAG | OMPWS_SCALARIZER_WS)
3385 && n == loop->dimen - 1)
3386 {
3387 /* We create an OMP_FOR construct for the outermost scalarized loop. */
3388 init = make_tree_vec (1);
3389 cond = make_tree_vec (1);
3390 incr = make_tree_vec (1);
3391
3392 /* Cycle statement is implemented with a goto. Exit statement must not
3393 be present for this loop. */
3394 exit_label = gfc_build_label_decl (NULL_TREE);
3395 TREE_USED (exit_label) = 1;
3396
3397 /* Label for cycle statements (if needed). */
3398 tmp = build1_v (LABEL_EXPR, exit_label);
3399 gfc_add_expr_to_block (pbody, tmp);
3400
3401 stmt = make_node (OMP_FOR);
3402
3403 TREE_TYPE (stmt) = void_type_node;
3404 OMP_FOR_BODY (stmt) = loopbody = gfc_finish_block (pbody);
3405
3406 OMP_FOR_CLAUSES (stmt) = build_omp_clause (input_location,
3407 OMP_CLAUSE_SCHEDULE);
3408 OMP_CLAUSE_SCHEDULE_KIND (OMP_FOR_CLAUSES (stmt))
3409 = OMP_CLAUSE_SCHEDULE_STATIC;
3410 if (ompws_flags & OMPWS_NOWAIT)
3411 OMP_CLAUSE_CHAIN (OMP_FOR_CLAUSES (stmt))
3412 = build_omp_clause (input_location, OMP_CLAUSE_NOWAIT);
3413
3414 /* Initialize the loopvar. */
3415 TREE_VEC_ELT (init, 0) = build2_v (MODIFY_EXPR, loop->loopvar[n],
3416 loop->from[n]);
3417 OMP_FOR_INIT (stmt) = init;
3418 /* The exit condition. */
3419 TREE_VEC_ELT (cond, 0) = build2_loc (input_location, LE_EXPR,
3420 boolean_type_node,
3421 loop->loopvar[n], loop->to[n]);
3422 SET_EXPR_LOCATION (TREE_VEC_ELT (cond, 0), input_location);
3423 OMP_FOR_COND (stmt) = cond;
3424 /* Increment the loopvar. */
3425 tmp = build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
3426 loop->loopvar[n], gfc_index_one_node);
3427 TREE_VEC_ELT (incr, 0) = fold_build2_loc (input_location, MODIFY_EXPR,
3428 void_type_node, loop->loopvar[n], tmp);
3429 OMP_FOR_INCR (stmt) = incr;
3430
3431 ompws_flags &= ~OMPWS_CURR_SINGLEUNIT;
3432 gfc_add_expr_to_block (&loop->code[n], stmt);
3433 }
3434 else
3435 {
3436 bool reverse_loop = (loop->reverse[n] == GFC_REVERSE_SET)
3437 && (loop->temp_ss == NULL);
3438
3439 loopbody = gfc_finish_block (pbody);
3440
3441 if (reverse_loop)
3442 {
3443 tmp = loop->from[n];
3444 loop->from[n] = loop->to[n];
3445 loop->to[n] = tmp;
3446 }
3447
3448 /* Initialize the loopvar. */
3449 if (loop->loopvar[n] != loop->from[n])
3450 gfc_add_modify (&loop->code[n], loop->loopvar[n], loop->from[n]);
3451
3452 exit_label = gfc_build_label_decl (NULL_TREE);
3453
3454 /* Generate the loop body. */
3455 gfc_init_block (&block);
3456
3457 /* The exit condition. */
3458 cond = fold_build2_loc (input_location, reverse_loop ? LT_EXPR : GT_EXPR,
3459 boolean_type_node, loop->loopvar[n], loop->to[n]);
3460 tmp = build1_v (GOTO_EXPR, exit_label);
3461 TREE_USED (exit_label) = 1;
3462 tmp = build3_v (COND_EXPR, cond, tmp, build_empty_stmt (input_location));
3463 gfc_add_expr_to_block (&block, tmp);
3464
3465 /* The main body. */
3466 gfc_add_expr_to_block (&block, loopbody);
3467
3468 /* Increment the loopvar. */
3469 tmp = fold_build2_loc (input_location,
3470 reverse_loop ? MINUS_EXPR : PLUS_EXPR,
3471 gfc_array_index_type, loop->loopvar[n],
3472 gfc_index_one_node);
3473
3474 gfc_add_modify (&block, loop->loopvar[n], tmp);
3475
3476 /* Build the loop. */
3477 tmp = gfc_finish_block (&block);
3478 tmp = build1_v (LOOP_EXPR, tmp);
3479 gfc_add_expr_to_block (&loop->code[n], tmp);
3480
3481 /* Add the exit label. */
3482 tmp = build1_v (LABEL_EXPR, exit_label);
3483 gfc_add_expr_to_block (&loop->code[n], tmp);
3484 }
3485
3486 }
3487
3488
3489 /* Finishes and generates the loops for a scalarized expression. */
3490
3491 void
3492 gfc_trans_scalarizing_loops (gfc_loopinfo * loop, stmtblock_t * body)
3493 {
3494 int dim;
3495 int n;
3496 gfc_ss *ss;
3497 stmtblock_t *pblock;
3498 tree tmp;
3499
3500 pblock = body;
3501 /* Generate the loops. */
3502 for (dim = 0; dim < loop->dimen; dim++)
3503 {
3504 n = loop->order[dim];
3505 gfc_trans_scalarized_loop_end (loop, n, pblock);
3506 loop->loopvar[n] = NULL_TREE;
3507 pblock = &loop->code[n];
3508 }
3509
3510 tmp = gfc_finish_block (pblock);
3511 gfc_add_expr_to_block (&loop->pre, tmp);
3512
3513 /* Clear all the used flags. */
3514 for (ss = loop->ss; ss != gfc_ss_terminator; ss = ss->loop_chain)
3515 if (ss->parent == NULL)
3516 ss->info->useflags = 0;
3517 }
3518
3519
3520 /* Finish the main body of a scalarized expression, and start the secondary
3521 copying body. */
3522
3523 void
3524 gfc_trans_scalarized_loop_boundary (gfc_loopinfo * loop, stmtblock_t * body)
3525 {
3526 int dim;
3527 int n;
3528 stmtblock_t *pblock;
3529 gfc_ss *ss;
3530
3531 pblock = body;
3532 /* We finish as many loops as are used by the temporary. */
3533 for (dim = 0; dim < loop->temp_dim - 1; dim++)
3534 {
3535 n = loop->order[dim];
3536 gfc_trans_scalarized_loop_end (loop, n, pblock);
3537 loop->loopvar[n] = NULL_TREE;
3538 pblock = &loop->code[n];
3539 }
3540
3541 /* We don't want to finish the outermost loop entirely. */
3542 n = loop->order[loop->temp_dim - 1];
3543 gfc_trans_scalarized_loop_end (loop, n, pblock);
3544
3545 /* Restore the initial offsets. */
3546 for (ss = loop->ss; ss != gfc_ss_terminator; ss = ss->loop_chain)
3547 {
3548 gfc_ss_type ss_type;
3549 gfc_ss_info *ss_info;
3550
3551 ss_info = ss->info;
3552
3553 if ((ss_info->useflags & 2) == 0)
3554 continue;
3555
3556 ss_type = ss_info->type;
3557 if (ss_type != GFC_SS_SECTION
3558 && ss_type != GFC_SS_FUNCTION
3559 && ss_type != GFC_SS_CONSTRUCTOR
3560 && ss_type != GFC_SS_COMPONENT)
3561 continue;
3562
3563 ss_info->data.array.offset = ss_info->data.array.saved_offset;
3564 }
3565
3566 /* Restart all the inner loops we just finished. */
3567 for (dim = loop->temp_dim - 2; dim >= 0; dim--)
3568 {
3569 n = loop->order[dim];
3570
3571 gfc_start_block (&loop->code[n]);
3572
3573 loop->loopvar[n] = gfc_create_var (gfc_array_index_type, "Q");
3574
3575 gfc_trans_preloop_setup (loop, dim, 2, &loop->code[n]);
3576 }
3577
3578 /* Start a block for the secondary copying code. */
3579 gfc_start_block (body);
3580 }
3581
3582
3583 /* Precalculate (either lower or upper) bound of an array section.
3584 BLOCK: Block in which the (pre)calculation code will go.
3585 BOUNDS[DIM]: Where the bound value will be stored once evaluated.
3586 VALUES[DIM]: Specified bound (NULL <=> unspecified).
3587 DESC: Array descriptor from which the bound will be picked if unspecified
3588 (either lower or upper bound according to LBOUND). */
3589
3590 static void
3591 evaluate_bound (stmtblock_t *block, tree *bounds, gfc_expr ** values,
3592 tree desc, int dim, bool lbound)
3593 {
3594 gfc_se se;
3595 gfc_expr * input_val = values[dim];
3596 tree *output = &bounds[dim];
3597
3598
3599 if (input_val)
3600 {
3601 /* Specified section bound. */
3602 gfc_init_se (&se, NULL);
3603 gfc_conv_expr_type (&se, input_val, gfc_array_index_type);
3604 gfc_add_block_to_block (block, &se.pre);
3605 *output = se.expr;
3606 }
3607 else
3608 {
3609 /* No specific bound specified so use the bound of the array. */
3610 *output = lbound ? gfc_conv_array_lbound (desc, dim) :
3611 gfc_conv_array_ubound (desc, dim);
3612 }
3613 *output = gfc_evaluate_now (*output, block);
3614 }
3615
3616
3617 /* Calculate the lower bound of an array section. */
3618
3619 static void
3620 gfc_conv_section_startstride (gfc_loopinfo * loop, gfc_ss * ss, int dim)
3621 {
3622 gfc_expr *stride = NULL;
3623 tree desc;
3624 gfc_se se;
3625 gfc_array_info *info;
3626 gfc_array_ref *ar;
3627
3628 gcc_assert (ss->info->type == GFC_SS_SECTION);
3629
3630 info = &ss->info->data.array;
3631 ar = &info->ref->u.ar;
3632
3633 if (ar->dimen_type[dim] == DIMEN_VECTOR)
3634 {
3635 /* We use a zero-based index to access the vector. */
3636 info->start[dim] = gfc_index_zero_node;
3637 info->end[dim] = NULL;
3638 info->stride[dim] = gfc_index_one_node;
3639 return;
3640 }
3641
3642 gcc_assert (ar->dimen_type[dim] == DIMEN_RANGE
3643 || ar->dimen_type[dim] == DIMEN_THIS_IMAGE);
3644 desc = info->descriptor;
3645 stride = ar->stride[dim];
3646
3647 /* Calculate the start of the range. For vector subscripts this will
3648 be the range of the vector. */
3649 evaluate_bound (&loop->pre, info->start, ar->start, desc, dim, true);
3650
3651 /* Similarly calculate the end. Although this is not used in the
3652 scalarizer, it is needed when checking bounds and where the end
3653 is an expression with side-effects. */
3654 evaluate_bound (&loop->pre, info->end, ar->end, desc, dim, false);
3655
3656 /* Calculate the stride. */
3657 if (stride == NULL)
3658 info->stride[dim] = gfc_index_one_node;
3659 else
3660 {
3661 gfc_init_se (&se, NULL);
3662 gfc_conv_expr_type (&se, stride, gfc_array_index_type);
3663 gfc_add_block_to_block (&loop->pre, &se.pre);
3664 info->stride[dim] = gfc_evaluate_now (se.expr, &loop->pre);
3665 }
3666 }
3667
3668
3669 /* Calculates the range start and stride for a SS chain. Also gets the
3670 descriptor and data pointer. The range of vector subscripts is the size
3671 of the vector. Array bounds are also checked. */
3672
3673 void
3674 gfc_conv_ss_startstride (gfc_loopinfo * loop)
3675 {
3676 int n;
3677 tree tmp;
3678 gfc_ss *ss;
3679 tree desc;
3680
3681 loop->dimen = 0;
3682 /* Determine the rank of the loop. */
3683 for (ss = loop->ss; ss != gfc_ss_terminator; ss = ss->loop_chain)
3684 {
3685 switch (ss->info->type)
3686 {
3687 case GFC_SS_SECTION:
3688 case GFC_SS_CONSTRUCTOR:
3689 case GFC_SS_FUNCTION:
3690 case GFC_SS_COMPONENT:
3691 loop->dimen = ss->dimen;
3692 goto done;
3693
3694 /* As usual, lbound and ubound are exceptions!. */
3695 case GFC_SS_INTRINSIC:
3696 switch (ss->info->expr->value.function.isym->id)
3697 {
3698 case GFC_ISYM_LBOUND:
3699 case GFC_ISYM_UBOUND:
3700 case GFC_ISYM_LCOBOUND:
3701 case GFC_ISYM_UCOBOUND:
3702 case GFC_ISYM_THIS_IMAGE:
3703 loop->dimen = ss->dimen;
3704 goto done;
3705
3706 default:
3707 break;
3708 }
3709
3710 default:
3711 break;
3712 }
3713 }
3714
3715 /* We should have determined the rank of the expression by now. If
3716 not, that's bad news. */
3717 gcc_unreachable ();
3718
3719 done:
3720 /* Loop over all the SS in the chain. */
3721 for (ss = loop->ss; ss != gfc_ss_terminator; ss = ss->loop_chain)
3722 {
3723 gfc_ss_info *ss_info;
3724 gfc_array_info *info;
3725 gfc_expr *expr;
3726
3727 ss_info = ss->info;
3728 expr = ss_info->expr;
3729 info = &ss_info->data.array;
3730
3731 if (expr && expr->shape && !info->shape)
3732 info->shape = expr->shape;
3733
3734 switch (ss_info->type)
3735 {
3736 case GFC_SS_SECTION:
3737 /* Get the descriptor for the array. If it is a cross loops array,
3738 we got the descriptor already in the outermost loop. */
3739 if (ss->parent == NULL)
3740 gfc_conv_ss_descriptor (&loop->pre, ss, !loop->array_parameter);
3741
3742 for (n = 0; n < ss->dimen; n++)
3743 gfc_conv_section_startstride (loop, ss, ss->dim[n]);
3744 break;
3745
3746 case GFC_SS_INTRINSIC:
3747 switch (expr->value.function.isym->id)
3748 {
3749 /* Fall through to supply start and stride. */
3750 case GFC_ISYM_LBOUND:
3751 case GFC_ISYM_UBOUND:
3752 case GFC_ISYM_LCOBOUND:
3753 case GFC_ISYM_UCOBOUND:
3754 case GFC_ISYM_THIS_IMAGE:
3755 break;
3756
3757 default:
3758 continue;
3759 }
3760
3761 case GFC_SS_CONSTRUCTOR:
3762 case GFC_SS_FUNCTION:
3763 for (n = 0; n < ss->dimen; n++)
3764 {
3765 int dim = ss->dim[n];
3766
3767 info->start[dim] = gfc_index_zero_node;
3768 info->end[dim] = gfc_index_zero_node;
3769 info->stride[dim] = gfc_index_one_node;
3770 }
3771 break;
3772
3773 default:
3774 break;
3775 }
3776 }
3777
3778 /* The rest is just runtime bound checking. */
3779 if (gfc_option.rtcheck & GFC_RTCHECK_BOUNDS)
3780 {
3781 stmtblock_t block;
3782 tree lbound, ubound;
3783 tree end;
3784 tree size[GFC_MAX_DIMENSIONS];
3785 tree stride_pos, stride_neg, non_zerosized, tmp2, tmp3;
3786 gfc_array_info *info;
3787 char *msg;
3788 int dim;
3789
3790 gfc_start_block (&block);
3791
3792 for (n = 0; n < loop->dimen; n++)
3793 size[n] = NULL_TREE;
3794
3795 for (ss = loop->ss; ss != gfc_ss_terminator; ss = ss->loop_chain)
3796 {
3797 stmtblock_t inner;
3798 gfc_ss_info *ss_info;
3799 gfc_expr *expr;
3800 locus *expr_loc;
3801 const char *expr_name;
3802
3803 ss_info = ss->info;
3804 if (ss_info->type != GFC_SS_SECTION)
3805 continue;
3806
3807 /* Catch allocatable lhs in f2003. */
3808 if (gfc_option.flag_realloc_lhs && ss->is_alloc_lhs)
3809 continue;
3810
3811 expr = ss_info->expr;
3812 expr_loc = &expr->where;
3813 expr_name = expr->symtree->name;
3814
3815 gfc_start_block (&inner);
3816
3817 /* TODO: range checking for mapped dimensions. */
3818 info = &ss_info->data.array;
3819
3820 /* This code only checks ranges. Elemental and vector
3821 dimensions are checked later. */
3822 for (n = 0; n < loop->dimen; n++)
3823 {
3824 bool check_upper;
3825
3826 dim = ss->dim[n];
3827 if (info->ref->u.ar.dimen_type[dim] != DIMEN_RANGE)
3828 continue;
3829
3830 if (dim == info->ref->u.ar.dimen - 1
3831 && info->ref->u.ar.as->type == AS_ASSUMED_SIZE)
3832 check_upper = false;
3833 else
3834 check_upper = true;
3835
3836 /* Zero stride is not allowed. */
3837 tmp = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node,
3838 info->stride[dim], gfc_index_zero_node);
3839 asprintf (&msg, "Zero stride is not allowed, for dimension %d "
3840 "of array '%s'", dim + 1, expr_name);
3841 gfc_trans_runtime_check (true, false, tmp, &inner,
3842 expr_loc, msg);
3843 free (msg);
3844
3845 desc = info->descriptor;
3846
3847 /* This is the run-time equivalent of resolve.c's
3848 check_dimension(). The logical is more readable there
3849 than it is here, with all the trees. */
3850 lbound = gfc_conv_array_lbound (desc, dim);
3851 end = info->end[dim];
3852 if (check_upper)
3853 ubound = gfc_conv_array_ubound (desc, dim);
3854 else
3855 ubound = NULL;
3856
3857 /* non_zerosized is true when the selected range is not
3858 empty. */
3859 stride_pos = fold_build2_loc (input_location, GT_EXPR,
3860 boolean_type_node, info->stride[dim],
3861 gfc_index_zero_node);
3862 tmp = fold_build2_loc (input_location, LE_EXPR, boolean_type_node,
3863 info->start[dim], end);
3864 stride_pos = fold_build2_loc (input_location, TRUTH_AND_EXPR,
3865 boolean_type_node, stride_pos, tmp);
3866
3867 stride_neg = fold_build2_loc (input_location, LT_EXPR,
3868 boolean_type_node,
3869 info->stride[dim], gfc_index_zero_node);
3870 tmp = fold_build2_loc (input_location, GE_EXPR, boolean_type_node,
3871 info->start[dim], end);
3872 stride_neg = fold_build2_loc (input_location, TRUTH_AND_EXPR,
3873 boolean_type_node,
3874 stride_neg, tmp);
3875 non_zerosized = fold_build2_loc (input_location, TRUTH_OR_EXPR,
3876 boolean_type_node,
3877 stride_pos, stride_neg);
3878
3879 /* Check the start of the range against the lower and upper
3880 bounds of the array, if the range is not empty.
3881 If upper bound is present, include both bounds in the
3882 error message. */
3883 if (check_upper)
3884 {
3885 tmp = fold_build2_loc (input_location, LT_EXPR,
3886 boolean_type_node,
3887 info->start[dim], lbound);
3888 tmp = fold_build2_loc (input_location, TRUTH_AND_EXPR,
3889 boolean_type_node,
3890 non_zerosized, tmp);
3891 tmp2 = fold_build2_loc (input_location, GT_EXPR,
3892 boolean_type_node,
3893 info->start[dim], ubound);
3894 tmp2 = fold_build2_loc (input_location, TRUTH_AND_EXPR,
3895 boolean_type_node,
3896 non_zerosized, tmp2);
3897 asprintf (&msg, "Index '%%ld' of dimension %d of array '%s' "
3898 "outside of expected range (%%ld:%%ld)",
3899 dim + 1, expr_name);
3900 gfc_trans_runtime_check (true, false, tmp, &inner,
3901 expr_loc, msg,
3902 fold_convert (long_integer_type_node, info->start[dim]),
3903 fold_convert (long_integer_type_node, lbound),
3904 fold_convert (long_integer_type_node, ubound));
3905 gfc_trans_runtime_check (true, false, tmp2, &inner,
3906 expr_loc, msg,
3907 fold_convert (long_integer_type_node, info->start[dim]),
3908 fold_convert (long_integer_type_node, lbound),
3909 fold_convert (long_integer_type_node, ubound));
3910 free (msg);
3911 }
3912 else
3913 {
3914 tmp = fold_build2_loc (input_location, LT_EXPR,
3915 boolean_type_node,
3916 info->start[dim], lbound);
3917 tmp = fold_build2_loc (input_location, TRUTH_AND_EXPR,
3918 boolean_type_node, non_zerosized, tmp);
3919 asprintf (&msg, "Index '%%ld' of dimension %d of array '%s' "
3920 "below lower bound of %%ld",
3921 dim + 1, expr_name);
3922 gfc_trans_runtime_check (true, false, tmp, &inner,
3923 expr_loc, msg,
3924 fold_convert (long_integer_type_node, info->start[dim]),
3925 fold_convert (long_integer_type_node, lbound));
3926 free (msg);
3927 }
3928
3929 /* Compute the last element of the range, which is not
3930 necessarily "end" (think 0:5:3, which doesn't contain 5)
3931 and check it against both lower and upper bounds. */
3932
3933 tmp = fold_build2_loc (input_location, MINUS_EXPR,
3934 gfc_array_index_type, end,
3935 info->start[dim]);
3936 tmp = fold_build2_loc (input_location, TRUNC_MOD_EXPR,
3937 gfc_array_index_type, tmp,
3938 info->stride[dim]);
3939 tmp = fold_build2_loc (input_location, MINUS_EXPR,
3940 gfc_array_index_type, end, tmp);
3941 tmp2 = fold_build2_loc (input_location, LT_EXPR,
3942 boolean_type_node, tmp, lbound);
3943 tmp2 = fold_build2_loc (input_location, TRUTH_AND_EXPR,
3944 boolean_type_node, non_zerosized, tmp2);
3945 if (check_upper)
3946 {
3947 tmp3 = fold_build2_loc (input_location, GT_EXPR,
3948 boolean_type_node, tmp, ubound);
3949 tmp3 = fold_build2_loc (input_location, TRUTH_AND_EXPR,
3950 boolean_type_node, non_zerosized, tmp3);
3951 asprintf (&msg, "Index '%%ld' of dimension %d of array '%s' "
3952 "outside of expected range (%%ld:%%ld)",
3953 dim + 1, expr_name);
3954 gfc_trans_runtime_check (true, false, tmp2, &inner,
3955 expr_loc, msg,
3956 fold_convert (long_integer_type_node, tmp),
3957 fold_convert (long_integer_type_node, ubound),
3958 fold_convert (long_integer_type_node, lbound));
3959 gfc_trans_runtime_check (true, false, tmp3, &inner,
3960 expr_loc, msg,
3961 fold_convert (long_integer_type_node, tmp),
3962 fold_convert (long_integer_type_node, ubound),
3963 fold_convert (long_integer_type_node, lbound));
3964 free (msg);
3965 }
3966 else
3967 {
3968 asprintf (&msg, "Index '%%ld' of dimension %d of array '%s' "
3969 "below lower bound of %%ld",
3970 dim + 1, expr_name);
3971 gfc_trans_runtime_check (true, false, tmp2, &inner,
3972 expr_loc, msg,
3973 fold_convert (long_integer_type_node, tmp),
3974 fold_convert (long_integer_type_node, lbound));
3975 free (msg);
3976 }
3977
3978 /* Check the section sizes match. */
3979 tmp = fold_build2_loc (input_location, MINUS_EXPR,
3980 gfc_array_index_type, end,
3981 info->start[dim]);
3982 tmp = fold_build2_loc (input_location, FLOOR_DIV_EXPR,
3983 gfc_array_index_type, tmp,
3984 info->stride[dim]);
3985 tmp = fold_build2_loc (input_location, PLUS_EXPR,
3986 gfc_array_index_type,
3987 gfc_index_one_node, tmp);
3988 tmp = fold_build2_loc (input_location, MAX_EXPR,
3989 gfc_array_index_type, tmp,
3990 build_int_cst (gfc_array_index_type, 0));
3991 /* We remember the size of the first section, and check all the
3992 others against this. */
3993 if (size[n])
3994 {
3995 tmp3 = fold_build2_loc (input_location, NE_EXPR,
3996 boolean_type_node, tmp, size[n]);
3997 asprintf (&msg, "Array bound mismatch for dimension %d "
3998 "of array '%s' (%%ld/%%ld)",
3999 dim + 1, expr_name);
4000
4001 gfc_trans_runtime_check (true, false, tmp3, &inner,
4002 expr_loc, msg,
4003 fold_convert (long_integer_type_node, tmp),
4004 fold_convert (long_integer_type_node, size[n]));
4005
4006 free (msg);
4007 }
4008 else
4009 size[n] = gfc_evaluate_now (tmp, &inner);
4010 }
4011
4012 tmp = gfc_finish_block (&inner);
4013
4014 /* For optional arguments, only check bounds if the argument is
4015 present. */
4016 if (expr->symtree->n.sym->attr.optional
4017 || expr->symtree->n.sym->attr.not_always_present)
4018 tmp = build3_v (COND_EXPR,
4019 gfc_conv_expr_present (expr->symtree->n.sym),
4020 tmp, build_empty_stmt (input_location));
4021
4022 gfc_add_expr_to_block (&block, tmp);
4023
4024 }
4025
4026 tmp = gfc_finish_block (&block);
4027 gfc_add_expr_to_block (&loop->pre, tmp);
4028 }
4029
4030 for (loop = loop->nested; loop; loop = loop->next)
4031 gfc_conv_ss_startstride (loop);
4032 }
4033
4034 /* Return true if both symbols could refer to the same data object. Does
4035 not take account of aliasing due to equivalence statements. */
4036
4037 static int
4038 symbols_could_alias (gfc_symbol *lsym, gfc_symbol *rsym, bool lsym_pointer,
4039 bool lsym_target, bool rsym_pointer, bool rsym_target)
4040 {
4041 /* Aliasing isn't possible if the symbols have different base types. */
4042 if (gfc_compare_types (&lsym->ts, &rsym->ts) == 0)
4043 return 0;
4044
4045 /* Pointers can point to other pointers and target objects. */
4046
4047 if ((lsym_pointer && (rsym_pointer || rsym_target))
4048 || (rsym_pointer && (lsym_pointer || lsym_target)))
4049 return 1;
4050
4051 /* Special case: Argument association, cf. F90 12.4.1.6, F2003 12.4.1.7
4052 and F2008 12.5.2.13 items 3b and 4b. The pointer case (a) is already
4053 checked above. */
4054 if (lsym_target && rsym_target
4055 && ((lsym->attr.dummy && !lsym->attr.contiguous
4056 && (!lsym->attr.dimension || lsym->as->type == AS_ASSUMED_SHAPE))
4057 || (rsym->attr.dummy && !rsym->attr.contiguous
4058 && (!rsym->attr.dimension
4059 || rsym->as->type == AS_ASSUMED_SHAPE))))
4060 return 1;
4061
4062 return 0;
4063 }
4064
4065
4066 /* Return true if the two SS could be aliased, i.e. both point to the same data
4067 object. */
4068 /* TODO: resolve aliases based on frontend expressions. */
4069
4070 static int
4071 gfc_could_be_alias (gfc_ss * lss, gfc_ss * rss)
4072 {
4073 gfc_ref *lref;
4074 gfc_ref *rref;
4075 gfc_expr *lexpr, *rexpr;
4076 gfc_symbol *lsym;
4077 gfc_symbol *rsym;
4078 bool lsym_pointer, lsym_target, rsym_pointer, rsym_target;
4079
4080 lexpr = lss->info->expr;
4081 rexpr = rss->info->expr;
4082
4083 lsym = lexpr->symtree->n.sym;
4084 rsym = rexpr->symtree->n.sym;
4085
4086 lsym_pointer = lsym->attr.pointer;
4087 lsym_target = lsym->attr.target;
4088 rsym_pointer = rsym->attr.pointer;
4089 rsym_target = rsym->attr.target;
4090
4091 if (symbols_could_alias (lsym, rsym, lsym_pointer, lsym_target,
4092 rsym_pointer, rsym_target))
4093 return 1;
4094
4095 if (rsym->ts.type != BT_DERIVED && rsym->ts.type != BT_CLASS
4096 && lsym->ts.type != BT_DERIVED && lsym->ts.type != BT_CLASS)
4097 return 0;
4098
4099 /* For derived types we must check all the component types. We can ignore
4100 array references as these will have the same base type as the previous
4101 component ref. */
4102 for (lref = lexpr->ref; lref != lss->info->data.array.ref; lref = lref->next)
4103 {
4104 if (lref->type != REF_COMPONENT)
4105 continue;
4106
4107 lsym_pointer = lsym_pointer || lref->u.c.sym->attr.pointer;
4108 lsym_target = lsym_target || lref->u.c.sym->attr.target;
4109
4110 if (symbols_could_alias (lref->u.c.sym, rsym, lsym_pointer, lsym_target,
4111 rsym_pointer, rsym_target))
4112 return 1;
4113
4114 if ((lsym_pointer && (rsym_pointer || rsym_target))
4115 || (rsym_pointer && (lsym_pointer || lsym_target)))
4116 {
4117 if (gfc_compare_types (&lref->u.c.component->ts,
4118 &rsym->ts))
4119 return 1;
4120 }
4121
4122 for (rref = rexpr->ref; rref != rss->info->data.array.ref;
4123 rref = rref->next)
4124 {
4125 if (rref->type != REF_COMPONENT)
4126 continue;
4127
4128 rsym_pointer = rsym_pointer || rref->u.c.sym->attr.pointer;
4129 rsym_target = lsym_target || rref->u.c.sym->attr.target;
4130
4131 if (symbols_could_alias (lref->u.c.sym, rref->u.c.sym,
4132 lsym_pointer, lsym_target,
4133 rsym_pointer, rsym_target))
4134 return 1;
4135
4136 if ((lsym_pointer && (rsym_pointer || rsym_target))
4137 || (rsym_pointer && (lsym_pointer || lsym_target)))
4138 {
4139 if (gfc_compare_types (&lref->u.c.component->ts,
4140 &rref->u.c.sym->ts))
4141 return 1;
4142 if (gfc_compare_types (&lref->u.c.sym->ts,
4143 &rref->u.c.component->ts))
4144 return 1;
4145 if (gfc_compare_types (&lref->u.c.component->ts,
4146 &rref->u.c.component->ts))
4147 return 1;
4148 }
4149 }
4150 }
4151
4152 lsym_pointer = lsym->attr.pointer;
4153 lsym_target = lsym->attr.target;
4154 lsym_pointer = lsym->attr.pointer;
4155 lsym_target = lsym->attr.target;
4156
4157 for (rref = rexpr->ref; rref != rss->info->data.array.ref; rref = rref->next)
4158 {
4159 if (rref->type != REF_COMPONENT)
4160 break;
4161
4162 rsym_pointer = rsym_pointer || rref->u.c.sym->attr.pointer;
4163 rsym_target = lsym_target || rref->u.c.sym->attr.target;
4164
4165 if (symbols_could_alias (rref->u.c.sym, lsym,
4166 lsym_pointer, lsym_target,
4167 rsym_pointer, rsym_target))
4168 return 1;
4169
4170 if ((lsym_pointer && (rsym_pointer || rsym_target))
4171 || (rsym_pointer && (lsym_pointer || lsym_target)))
4172 {
4173 if (gfc_compare_types (&lsym->ts, &rref->u.c.component->ts))
4174 return 1;
4175 }
4176 }
4177
4178 return 0;
4179 }
4180
4181
4182 /* Resolve array data dependencies. Creates a temporary if required. */
4183 /* TODO: Calc dependencies with gfc_expr rather than gfc_ss, and move to
4184 dependency.c. */
4185
4186 void
4187 gfc_conv_resolve_dependencies (gfc_loopinfo * loop, gfc_ss * dest,
4188 gfc_ss * rss)
4189 {
4190 gfc_ss *ss;
4191 gfc_ref *lref;
4192 gfc_ref *rref;
4193 gfc_expr *dest_expr;
4194 gfc_expr *ss_expr;
4195 int nDepend = 0;
4196 int i, j;
4197
4198 loop->temp_ss = NULL;
4199 dest_expr = dest->info->expr;
4200
4201 for (ss = rss; ss != gfc_ss_terminator; ss = ss->next)
4202 {
4203 if (ss->info->type != GFC_SS_SECTION)
4204 continue;
4205
4206 ss_expr = ss->info->expr;
4207
4208 if (dest_expr->symtree->n.sym != ss_expr->symtree->n.sym)
4209 {
4210 if (gfc_could_be_alias (dest, ss)
4211 || gfc_are_equivalenced_arrays (dest_expr, ss_expr))
4212 {
4213 nDepend = 1;
4214 break;
4215 }
4216 }
4217 else
4218 {
4219 lref = dest_expr->ref;
4220 rref = ss_expr->ref;
4221
4222 nDepend = gfc_dep_resolver (lref, rref, &loop->reverse[0]);
4223
4224 if (nDepend == 1)
4225 break;
4226
4227 for (i = 0; i < dest->dimen; i++)
4228 for (j = 0; j < ss->dimen; j++)
4229 if (i != j
4230 && dest->dim[i] == ss->dim[j])
4231 {
4232 /* If we don't access array elements in the same order,
4233 there is a dependency. */
4234 nDepend = 1;
4235 goto temporary;
4236 }
4237 #if 0
4238 /* TODO : loop shifting. */
4239 if (nDepend == 1)
4240 {
4241 /* Mark the dimensions for LOOP SHIFTING */
4242 for (n = 0; n < loop->dimen; n++)
4243 {
4244 int dim = dest->data.info.dim[n];
4245
4246 if (lref->u.ar.dimen_type[dim] == DIMEN_VECTOR)
4247 depends[n] = 2;
4248 else if (! gfc_is_same_range (&lref->u.ar,
4249 &rref->u.ar, dim, 0))
4250 depends[n] = 1;
4251 }
4252
4253 /* Put all the dimensions with dependencies in the
4254 innermost loops. */
4255 dim = 0;
4256 for (n = 0; n < loop->dimen; n++)
4257 {
4258 gcc_assert (loop->order[n] == n);
4259 if (depends[n])
4260 loop->order[dim++] = n;
4261 }
4262 for (n = 0; n < loop->dimen; n++)
4263 {
4264 if (! depends[n])
4265 loop->order[dim++] = n;
4266 }
4267
4268 gcc_assert (dim == loop->dimen);
4269 break;
4270 }
4271 #endif
4272 }
4273 }
4274
4275 temporary:
4276
4277 if (nDepend == 1)
4278 {
4279 tree base_type = gfc_typenode_for_spec (&dest_expr->ts);
4280 if (GFC_ARRAY_TYPE_P (base_type)
4281 || GFC_DESCRIPTOR_TYPE_P (base_type))
4282 base_type = gfc_get_element_type (base_type);
4283 loop->temp_ss = gfc_get_temp_ss (base_type, dest->info->string_length,
4284 loop->dimen);
4285 gfc_add_ss_to_loop (loop, loop->temp_ss);
4286 }
4287 else
4288 loop->temp_ss = NULL;
4289 }
4290
4291
4292 /* Browse through each array's information from the scalarizer and set the loop
4293 bounds according to the "best" one (per dimension), i.e. the one which
4294 provides the most information (constant bounds, shape, etc). */
4295
4296 static void
4297 set_loop_bounds (gfc_loopinfo *loop)
4298 {
4299 int n, dim, spec_dim;
4300 gfc_array_info *info;
4301 gfc_array_info *specinfo;
4302 gfc_ss *ss;
4303 tree tmp;
4304 gfc_ss **loopspec;
4305 bool dynamic[GFC_MAX_DIMENSIONS];
4306 mpz_t *cshape;
4307 mpz_t i;
4308
4309 loopspec = loop->specloop;
4310
4311 mpz_init (i);
4312 for (n = 0; n < loop->dimen; n++)
4313 {
4314 loopspec[n] = NULL;
4315 dynamic[n] = false;
4316 /* We use one SS term, and use that to determine the bounds of the
4317 loop for this dimension. We try to pick the simplest term. */
4318 for (ss = loop->ss; ss != gfc_ss_terminator; ss = ss->loop_chain)
4319 {
4320 gfc_ss_type ss_type;
4321
4322 ss_type = ss->info->type;
4323 if (ss_type == GFC_SS_SCALAR
4324 || ss_type == GFC_SS_TEMP
4325 || ss_type == GFC_SS_REFERENCE)
4326 continue;
4327
4328 info = &ss->info->data.array;
4329 dim = ss->dim[n];
4330
4331 if (loopspec[n] != NULL)
4332 {
4333 specinfo = &loopspec[n]->info->data.array;
4334 spec_dim = loopspec[n]->dim[n];
4335 }
4336 else
4337 {
4338 /* Silence unitialized warnings. */
4339 specinfo = NULL;
4340 spec_dim = 0;
4341 }
4342
4343 if (info->shape)
4344 {
4345 gcc_assert (info->shape[dim]);
4346 /* The frontend has worked out the size for us. */
4347 if (!loopspec[n]
4348 || !specinfo->shape
4349 || !integer_zerop (specinfo->start[spec_dim]))
4350 /* Prefer zero-based descriptors if possible. */
4351 loopspec[n] = ss;
4352 continue;
4353 }
4354
4355 if (ss_type == GFC_SS_CONSTRUCTOR)
4356 {
4357 gfc_constructor_base base;
4358 /* An unknown size constructor will always be rank one.
4359 Higher rank constructors will either have known shape,
4360 or still be wrapped in a call to reshape. */
4361 gcc_assert (loop->dimen == 1);
4362
4363 /* Always prefer to use the constructor bounds if the size
4364 can be determined at compile time. Prefer not to otherwise,
4365 since the general case involves realloc, and it's better to
4366 avoid that overhead if possible. */
4367 base = ss->info->expr->value.constructor;
4368 dynamic[n] = gfc_get_array_constructor_size (&i, base);
4369 if (!dynamic[n] || !loopspec[n])
4370 loopspec[n] = ss;
4371 continue;
4372 }
4373
4374 /* TODO: Pick the best bound if we have a choice between a
4375 function and something else. */
4376 if (ss_type == GFC_SS_FUNCTION)
4377 {
4378 loopspec[n] = ss;
4379 continue;
4380 }
4381
4382 /* Avoid using an allocatable lhs in an assignment, since
4383 there might be a reallocation coming. */
4384 if (loopspec[n] && ss->is_alloc_lhs)
4385 continue;
4386
4387 if (ss_type != GFC_SS_SECTION)
4388 continue;
4389
4390 if (!loopspec[n])
4391 loopspec[n] = ss;
4392 /* Criteria for choosing a loop specifier (most important first):
4393 doesn't need realloc
4394 stride of one
4395 known stride
4396 known lower bound
4397 known upper bound
4398 */
4399 else if ((loopspec[n]->info->type == GFC_SS_CONSTRUCTOR && dynamic[n])
4400 || n >= loop->dimen)
4401 loopspec[n] = ss;
4402 else if (integer_onep (info->stride[dim])
4403 && !integer_onep (specinfo->stride[spec_dim]))
4404 loopspec[n] = ss;
4405 else if (INTEGER_CST_P (info->stride[dim])
4406 && !INTEGER_CST_P (specinfo->stride[spec_dim]))
4407 loopspec[n] = ss;
4408 else if (INTEGER_CST_P (info->start[dim])
4409 && !INTEGER_CST_P (specinfo->start[spec_dim]))
4410 loopspec[n] = ss;
4411 /* We don't work out the upper bound.
4412 else if (INTEGER_CST_P (info->finish[n])
4413 && ! INTEGER_CST_P (specinfo->finish[n]))
4414 loopspec[n] = ss; */
4415 }
4416
4417 /* We should have found the scalarization loop specifier. If not,
4418 that's bad news. */
4419 gcc_assert (loopspec[n]);
4420
4421 info = &loopspec[n]->info->data.array;
4422 dim = loopspec[n]->dim[n];
4423
4424 /* Set the extents of this range. */
4425 cshape = info->shape;
4426 if (cshape && INTEGER_CST_P (info->start[dim])
4427 && INTEGER_CST_P (info->stride[dim]))
4428 {
4429 loop->from[n] = info->start[dim];
4430 mpz_set (i, cshape[get_array_ref_dim_for_loop_dim (loopspec[n], n)]);
4431 mpz_sub_ui (i, i, 1);
4432 /* To = from + (size - 1) * stride. */
4433 tmp = gfc_conv_mpz_to_tree (i, gfc_index_integer_kind);
4434 if (!integer_onep (info->stride[dim]))
4435 tmp = fold_build2_loc (input_location, MULT_EXPR,
4436 gfc_array_index_type, tmp,
4437 info->stride[dim]);
4438 loop->to[n] = fold_build2_loc (input_location, PLUS_EXPR,
4439 gfc_array_index_type,
4440 loop->from[n], tmp);
4441 }
4442 else
4443 {
4444 loop->from[n] = info->start[dim];
4445 switch (loopspec[n]->info->type)
4446 {
4447 case GFC_SS_CONSTRUCTOR:
4448 /* The upper bound is calculated when we expand the
4449 constructor. */
4450 gcc_assert (loop->to[n] == NULL_TREE);
4451 break;
4452
4453 case GFC_SS_SECTION:
4454 /* Use the end expression if it exists and is not constant,
4455 so that it is only evaluated once. */
4456 loop->to[n] = info->end[dim];
4457 break;
4458
4459 case GFC_SS_FUNCTION:
4460 /* The loop bound will be set when we generate the call. */
4461 gcc_assert (loop->to[n] == NULL_TREE);
4462 break;
4463
4464 default:
4465 gcc_unreachable ();
4466 }
4467 }
4468
4469 /* Transform everything so we have a simple incrementing variable. */
4470 if (integer_onep (info->stride[dim]))
4471 info->delta[dim] = gfc_index_zero_node;
4472 else
4473 {
4474 /* Set the delta for this section. */
4475 info->delta[dim] = gfc_evaluate_now (loop->from[n], &loop->pre);
4476 /* Number of iterations is (end - start + step) / step.
4477 with start = 0, this simplifies to
4478 last = end / step;
4479 for (i = 0; i<=last; i++){...}; */
4480 tmp = fold_build2_loc (input_location, MINUS_EXPR,
4481 gfc_array_index_type, loop->to[n],
4482 loop->from[n]);
4483 tmp = fold_build2_loc (input_location, FLOOR_DIV_EXPR,
4484 gfc_array_index_type, tmp, info->stride[dim]);
4485 tmp = fold_build2_loc (input_location, MAX_EXPR, gfc_array_index_type,
4486 tmp, build_int_cst (gfc_array_index_type, -1));
4487 loop->to[n] = gfc_evaluate_now (tmp, &loop->pre);
4488 /* Make the loop variable start at 0. */
4489 loop->from[n] = gfc_index_zero_node;
4490 }
4491 }
4492 mpz_clear (i);
4493
4494 for (loop = loop->nested; loop; loop = loop->next)
4495 set_loop_bounds (loop);
4496 }
4497
4498
4499 /* Initialize the scalarization loop. Creates the loop variables. Determines
4500 the range of the loop variables. Creates a temporary if required.
4501 Also generates code for scalar expressions which have been
4502 moved outside the loop. */
4503
4504 void
4505 gfc_conv_loop_setup (gfc_loopinfo * loop, locus * where)
4506 {
4507 gfc_ss *tmp_ss;
4508 tree tmp;
4509
4510 set_loop_bounds (loop);
4511
4512 /* Add all the scalar code that can be taken out of the loops.
4513 This may include calculating the loop bounds, so do it before
4514 allocating the temporary. */
4515 gfc_add_loop_ss_code (loop, loop->ss, false, where);
4516
4517 tmp_ss = loop->temp_ss;
4518 /* If we want a temporary then create it. */
4519 if (tmp_ss != NULL)
4520 {
4521 gfc_ss_info *tmp_ss_info;
4522
4523 tmp_ss_info = tmp_ss->info;
4524 gcc_assert (tmp_ss_info->type == GFC_SS_TEMP);
4525 gcc_assert (loop->parent == NULL);
4526
4527 /* Make absolutely sure that this is a complete type. */
4528 if (tmp_ss_info->string_length)
4529 tmp_ss_info->data.temp.type
4530 = gfc_get_character_type_len_for_eltype
4531 (TREE_TYPE (tmp_ss_info->data.temp.type),
4532 tmp_ss_info->string_length);
4533
4534 tmp = tmp_ss_info->data.temp.type;
4535 memset (&tmp_ss_info->data.array, 0, sizeof (gfc_array_info));
4536 tmp_ss_info->type = GFC_SS_SECTION;
4537
4538 gcc_assert (tmp_ss->dimen != 0);
4539
4540 gfc_trans_create_temp_array (&loop->pre, &loop->post, tmp_ss, tmp,
4541 NULL_TREE, false, true, false, where);
4542 }
4543
4544 /* For array parameters we don't have loop variables, so don't calculate the
4545 translations. */
4546 if (!loop->array_parameter)
4547 gfc_set_delta (loop);
4548 }
4549
4550
4551 /* Calculates how to transform from loop variables to array indices for each
4552 array: once loop bounds are chosen, sets the difference (DELTA field) between
4553 loop bounds and array reference bounds, for each array info. */
4554
4555 void
4556 gfc_set_delta (gfc_loopinfo *loop)
4557 {
4558 gfc_ss *ss, **loopspec;
4559 gfc_array_info *info;
4560 tree tmp;
4561 int n, dim;
4562
4563 loopspec = loop->specloop;
4564
4565 /* Calculate the translation from loop variables to array indices. */
4566 for (ss = loop->ss; ss != gfc_ss_terminator; ss = ss->loop_chain)
4567 {
4568 gfc_ss_type ss_type;
4569
4570 ss_type = ss->info->type;
4571 if (ss_type != GFC_SS_SECTION
4572 && ss_type != GFC_SS_COMPONENT
4573 && ss_type != GFC_SS_CONSTRUCTOR)
4574 continue;
4575
4576 info = &ss->info->data.array;
4577
4578 for (n = 0; n < ss->dimen; n++)
4579 {
4580 /* If we are specifying the range the delta is already set. */
4581 if (loopspec[n] != ss)
4582 {
4583 dim = ss->dim[n];
4584
4585 /* Calculate the offset relative to the loop variable.
4586 First multiply by the stride. */
4587 tmp = loop->from[n];
4588 if (!integer_onep (info->stride[dim]))
4589 tmp = fold_build2_loc (input_location, MULT_EXPR,
4590 gfc_array_index_type,
4591 tmp, info->stride[dim]);
4592
4593 /* Then subtract this from our starting value. */
4594 tmp = fold_build2_loc (input_location, MINUS_EXPR,
4595 gfc_array_index_type,
4596 info->start[dim], tmp);
4597
4598 info->delta[dim] = gfc_evaluate_now (tmp, &loop->pre);
4599 }
4600 }
4601 }
4602
4603 for (loop = loop->nested; loop; loop = loop->next)
4604 gfc_set_delta (loop);
4605 }
4606
4607
4608 /* Calculate the size of a given array dimension from the bounds. This
4609 is simply (ubound - lbound + 1) if this expression is positive
4610 or 0 if it is negative (pick either one if it is zero). Optionally
4611 (if or_expr is present) OR the (expression != 0) condition to it. */
4612
4613 tree
4614 gfc_conv_array_extent_dim (tree lbound, tree ubound, tree* or_expr)
4615 {
4616 tree res;
4617 tree cond;
4618
4619 /* Calculate (ubound - lbound + 1). */
4620 res = fold_build2_loc (input_location, MINUS_EXPR, gfc_array_index_type,
4621 ubound, lbound);
4622 res = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type, res,
4623 gfc_index_one_node);
4624
4625 /* Check whether the size for this dimension is negative. */
4626 cond = fold_build2_loc (input_location, LE_EXPR, boolean_type_node, res,
4627 gfc_index_zero_node);
4628 res = fold_build3_loc (input_location, COND_EXPR, gfc_array_index_type, cond,
4629 gfc_index_zero_node, res);
4630
4631 /* Build OR expression. */
4632 if (or_expr)
4633 *or_expr = fold_build2_loc (input_location, TRUTH_OR_EXPR,
4634 boolean_type_node, *or_expr, cond);
4635
4636 return res;
4637 }
4638
4639
4640 /* For an array descriptor, get the total number of elements. This is just
4641 the product of the extents along from_dim to to_dim. */
4642
4643 static tree
4644 gfc_conv_descriptor_size_1 (tree desc, int from_dim, int to_dim)
4645 {
4646 tree res;
4647 int dim;
4648
4649 res = gfc_index_one_node;
4650
4651 for (dim = from_dim; dim < to_dim; ++dim)
4652 {
4653 tree lbound;
4654 tree ubound;
4655 tree extent;
4656
4657 lbound = gfc_conv_descriptor_lbound_get (desc, gfc_rank_cst[dim]);
4658 ubound = gfc_conv_descriptor_ubound_get (desc, gfc_rank_cst[dim]);
4659
4660 extent = gfc_conv_array_extent_dim (lbound, ubound, NULL);
4661 res = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
4662 res, extent);
4663 }
4664
4665 return res;
4666 }
4667
4668
4669 /* Full size of an array. */
4670
4671 tree
4672 gfc_conv_descriptor_size (tree desc, int rank)
4673 {
4674 return gfc_conv_descriptor_size_1 (desc, 0, rank);
4675 }
4676
4677
4678 /* Size of a coarray for all dimensions but the last. */
4679
4680 tree
4681 gfc_conv_descriptor_cosize (tree desc, int rank, int corank)
4682 {
4683 return gfc_conv_descriptor_size_1 (desc, rank, rank + corank - 1);
4684 }
4685
4686
4687 /* Fills in an array descriptor, and returns the size of the array.
4688 The size will be a simple_val, ie a variable or a constant. Also
4689 calculates the offset of the base. The pointer argument overflow,
4690 which should be of integer type, will increase in value if overflow
4691 occurs during the size calculation. Returns the size of the array.
4692 {
4693 stride = 1;
4694 offset = 0;
4695 for (n = 0; n < rank; n++)
4696 {
4697 a.lbound[n] = specified_lower_bound;
4698 offset = offset + a.lbond[n] * stride;
4699 size = 1 - lbound;
4700 a.ubound[n] = specified_upper_bound;
4701 a.stride[n] = stride;
4702 size = size >= 0 ? ubound + size : 0; //size = ubound + 1 - lbound
4703 overflow += size == 0 ? 0: (MAX/size < stride ? 1: 0);
4704 stride = stride * size;
4705 }
4706 for (n = rank; n < rank+corank; n++)
4707 (Set lcobound/ucobound as above.)
4708 element_size = sizeof (array element);
4709 if (!rank)
4710 return element_size
4711 stride = (size_t) stride;
4712 overflow += element_size == 0 ? 0: (MAX/element_size < stride ? 1: 0);
4713 stride = stride * element_size;
4714 return (stride);
4715 } */
4716 /*GCC ARRAYS*/
4717
4718 static tree
4719 gfc_array_init_size (tree descriptor, int rank, int corank, tree * poffset,
4720 gfc_expr ** lower, gfc_expr ** upper, stmtblock_t * pblock,
4721 stmtblock_t * descriptor_block, tree * overflow,
4722 tree expr3_elem_size, tree *nelems, gfc_expr *expr3)
4723 {
4724 tree type;
4725 tree tmp;
4726 tree size;
4727 tree offset;
4728 tree stride;
4729 tree element_size;
4730 tree or_expr;
4731 tree thencase;
4732 tree elsecase;
4733 tree cond;
4734 tree var;
4735 stmtblock_t thenblock;
4736 stmtblock_t elseblock;
4737 gfc_expr *ubound;
4738 gfc_se se;
4739 int n;
4740
4741 type = TREE_TYPE (descriptor);
4742
4743 stride = gfc_index_one_node;
4744 offset = gfc_index_zero_node;
4745
4746 /* Set the dtype. */
4747 tmp = gfc_conv_descriptor_dtype (descriptor);
4748 gfc_add_modify (descriptor_block, tmp, gfc_get_dtype (TREE_TYPE (descriptor)));
4749
4750 or_expr = boolean_false_node;
4751
4752 for (n = 0; n < rank; n++)
4753 {
4754 tree conv_lbound;
4755 tree conv_ubound;
4756
4757 /* We have 3 possibilities for determining the size of the array:
4758 lower == NULL => lbound = 1, ubound = upper[n]
4759 upper[n] = NULL => lbound = 1, ubound = lower[n]
4760 upper[n] != NULL => lbound = lower[n], ubound = upper[n] */
4761 ubound = upper[n];
4762
4763 /* Set lower bound. */
4764 gfc_init_se (&se, NULL);
4765 if (lower == NULL)
4766 se.expr = gfc_index_one_node;
4767 else
4768 {
4769 gcc_assert (lower[n]);
4770 if (ubound)
4771 {
4772 gfc_conv_expr_type (&se, lower[n], gfc_array_index_type);
4773 gfc_add_block_to_block (pblock, &se.pre);
4774 }
4775 else
4776 {
4777 se.expr = gfc_index_one_node;
4778 ubound = lower[n];
4779 }
4780 }
4781 gfc_conv_descriptor_lbound_set (descriptor_block, descriptor,
4782 gfc_rank_cst[n], se.expr);
4783 conv_lbound = se.expr;
4784
4785 /* Work out the offset for this component. */
4786 tmp = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
4787 se.expr, stride);
4788 offset = fold_build2_loc (input_location, MINUS_EXPR,
4789 gfc_array_index_type, offset, tmp);
4790
4791 /* Set upper bound. */
4792 gfc_init_se (&se, NULL);
4793 gcc_assert (ubound);
4794 gfc_conv_expr_type (&se, ubound, gfc_array_index_type);
4795 gfc_add_block_to_block (pblock, &se.pre);
4796
4797 gfc_conv_descriptor_ubound_set (descriptor_block, descriptor,
4798 gfc_rank_cst[n], se.expr);
4799 conv_ubound = se.expr;
4800
4801 /* Store the stride. */
4802 gfc_conv_descriptor_stride_set (descriptor_block, descriptor,
4803 gfc_rank_cst[n], stride);
4804
4805 /* Calculate size and check whether extent is negative. */
4806 size = gfc_conv_array_extent_dim (conv_lbound, conv_ubound, &or_expr);
4807 size = gfc_evaluate_now (size, pblock);
4808
4809 /* Check whether multiplying the stride by the number of
4810 elements in this dimension would overflow. We must also check
4811 whether the current dimension has zero size in order to avoid
4812 division by zero.
4813 */
4814 tmp = fold_build2_loc (input_location, TRUNC_DIV_EXPR,
4815 gfc_array_index_type,
4816 fold_convert (gfc_array_index_type,
4817 TYPE_MAX_VALUE (gfc_array_index_type)),
4818 size);
4819 cond = gfc_unlikely (fold_build2_loc (input_location, LT_EXPR,
4820 boolean_type_node, tmp, stride));
4821 tmp = fold_build3_loc (input_location, COND_EXPR, integer_type_node, cond,
4822 integer_one_node, integer_zero_node);
4823 cond = gfc_unlikely (fold_build2_loc (input_location, EQ_EXPR,
4824 boolean_type_node, size,
4825 gfc_index_zero_node));
4826 tmp = fold_build3_loc (input_location, COND_EXPR, integer_type_node, cond,
4827 integer_zero_node, tmp);
4828 tmp = fold_build2_loc (input_location, PLUS_EXPR, integer_type_node,
4829 *overflow, tmp);
4830 *overflow = gfc_evaluate_now (tmp, pblock);
4831
4832 /* Multiply the stride by the number of elements in this dimension. */
4833 stride = fold_build2_loc (input_location, MULT_EXPR,
4834 gfc_array_index_type, stride, size);
4835 stride = gfc_evaluate_now (stride, pblock);
4836 }
4837
4838 for (n = rank; n < rank + corank; n++)
4839 {
4840 ubound = upper[n];
4841
4842 /* Set lower bound. */
4843 gfc_init_se (&se, NULL);
4844 if (lower == NULL || lower[n] == NULL)
4845 {
4846 gcc_assert (n == rank + corank - 1);
4847 se.expr = gfc_index_one_node;
4848 }
4849 else
4850 {
4851 if (ubound || n == rank + corank - 1)
4852 {
4853 gfc_conv_expr_type (&se, lower[n], gfc_array_index_type);
4854 gfc_add_block_to_block (pblock, &se.pre);
4855 }
4856 else
4857 {
4858 se.expr = gfc_index_one_node;
4859 ubound = lower[n];
4860 }
4861 }
4862 gfc_conv_descriptor_lbound_set (descriptor_block, descriptor,
4863 gfc_rank_cst[n], se.expr);
4864
4865 if (n < rank + corank - 1)
4866 {
4867 gfc_init_se (&se, NULL);
4868 gcc_assert (ubound);
4869 gfc_conv_expr_type (&se, ubound, gfc_array_index_type);
4870 gfc_add_block_to_block (pblock, &se.pre);
4871 gfc_conv_descriptor_ubound_set (descriptor_block, descriptor,
4872 gfc_rank_cst[n], se.expr);
4873 }
4874 }
4875
4876 /* The stride is the number of elements in the array, so multiply by the
4877 size of an element to get the total size. Obviously, if there ia a
4878 SOURCE expression (expr3) we must use its element size. */
4879 if (expr3_elem_size != NULL_TREE)
4880 tmp = expr3_elem_size;
4881 else if (expr3 != NULL)
4882 {
4883 if (expr3->ts.type == BT_CLASS)
4884 {
4885 gfc_se se_sz;
4886 gfc_expr *sz = gfc_copy_expr (expr3);
4887 gfc_add_vptr_component (sz);
4888 gfc_add_size_component (sz);
4889 gfc_init_se (&se_sz, NULL);
4890 gfc_conv_expr (&se_sz, sz);
4891 gfc_free_expr (sz);
4892 tmp = se_sz.expr;
4893 }
4894 else
4895 {
4896 tmp = gfc_typenode_for_spec (&expr3->ts);
4897 tmp = TYPE_SIZE_UNIT (tmp);
4898 }
4899 }
4900 else
4901 tmp = TYPE_SIZE_UNIT (gfc_get_element_type (type));
4902
4903 /* Convert to size_t. */
4904 element_size = fold_convert (size_type_node, tmp);
4905
4906 if (rank == 0)
4907 return element_size;
4908
4909 *nelems = gfc_evaluate_now (stride, pblock);
4910 stride = fold_convert (size_type_node, stride);
4911
4912 /* First check for overflow. Since an array of type character can
4913 have zero element_size, we must check for that before
4914 dividing. */
4915 tmp = fold_build2_loc (input_location, TRUNC_DIV_EXPR,
4916 size_type_node,
4917 TYPE_MAX_VALUE (size_type_node), element_size);
4918 cond = gfc_unlikely (fold_build2_loc (input_location, LT_EXPR,
4919 boolean_type_node, tmp, stride));
4920 tmp = fold_build3_loc (input_location, COND_EXPR, integer_type_node, cond,
4921 integer_one_node, integer_zero_node);
4922 cond = gfc_unlikely (fold_build2_loc (input_location, EQ_EXPR,
4923 boolean_type_node, element_size,
4924 build_int_cst (size_type_node, 0)));
4925 tmp = fold_build3_loc (input_location, COND_EXPR, integer_type_node, cond,
4926 integer_zero_node, tmp);
4927 tmp = fold_build2_loc (input_location, PLUS_EXPR, integer_type_node,
4928 *overflow, tmp);
4929 *overflow = gfc_evaluate_now (tmp, pblock);
4930
4931 size = fold_build2_loc (input_location, MULT_EXPR, size_type_node,
4932 stride, element_size);
4933
4934 if (poffset != NULL)
4935 {
4936 offset = gfc_evaluate_now (offset, pblock);
4937 *poffset = offset;
4938 }
4939
4940 if (integer_zerop (or_expr))
4941 return size;
4942 if (integer_onep (or_expr))
4943 return build_int_cst (size_type_node, 0);
4944
4945 var = gfc_create_var (TREE_TYPE (size), "size");
4946 gfc_start_block (&thenblock);
4947 gfc_add_modify (&thenblock, var, build_int_cst (size_type_node, 0));
4948 thencase = gfc_finish_block (&thenblock);
4949
4950 gfc_start_block (&elseblock);
4951 gfc_add_modify (&elseblock, var, size);
4952 elsecase = gfc_finish_block (&elseblock);
4953
4954 tmp = gfc_evaluate_now (or_expr, pblock);
4955 tmp = build3_v (COND_EXPR, tmp, thencase, elsecase);
4956 gfc_add_expr_to_block (pblock, tmp);
4957
4958 return var;
4959 }
4960
4961
4962 /* Initializes the descriptor and generates a call to _gfor_allocate. Does
4963 the work for an ALLOCATE statement. */
4964 /*GCC ARRAYS*/
4965
4966 bool
4967 gfc_array_allocate (gfc_se * se, gfc_expr * expr, tree status, tree errmsg,
4968 tree errlen, tree label_finish, tree expr3_elem_size,
4969 tree *nelems, gfc_expr *expr3)
4970 {
4971 tree tmp;
4972 tree pointer;
4973 tree offset = NULL_TREE;
4974 tree token = NULL_TREE;
4975 tree size;
4976 tree msg;
4977 tree error = NULL_TREE;
4978 tree overflow; /* Boolean storing whether size calculation overflows. */
4979 tree var_overflow = NULL_TREE;
4980 tree cond;
4981 tree set_descriptor;
4982 stmtblock_t set_descriptor_block;
4983 stmtblock_t elseblock;
4984 gfc_expr **lower;
4985 gfc_expr **upper;
4986 gfc_ref *ref, *prev_ref = NULL;
4987 bool allocatable, coarray, dimension;
4988
4989 ref = expr->ref;
4990
4991 /* Find the last reference in the chain. */
4992 while (ref && ref->next != NULL)
4993 {
4994 gcc_assert (ref->type != REF_ARRAY || ref->u.ar.type == AR_ELEMENT
4995 || (ref->u.ar.dimen == 0 && ref->u.ar.codimen > 0));
4996 prev_ref = ref;
4997 ref = ref->next;
4998 }
4999
5000 if (ref == NULL || ref->type != REF_ARRAY)
5001 return false;
5002
5003 if (!prev_ref)
5004 {
5005 allocatable = expr->symtree->n.sym->attr.allocatable;
5006 coarray = expr->symtree->n.sym->attr.codimension;
5007 dimension = expr->symtree->n.sym->attr.dimension;
5008 }
5009 else
5010 {
5011 allocatable = prev_ref->u.c.component->attr.allocatable;
5012 coarray = prev_ref->u.c.component->attr.codimension;
5013 dimension = prev_ref->u.c.component->attr.dimension;
5014 }
5015
5016 if (!dimension)
5017 gcc_assert (coarray);
5018
5019 /* Figure out the size of the array. */
5020 switch (ref->u.ar.type)
5021 {
5022 case AR_ELEMENT:
5023 if (!coarray)
5024 {
5025 lower = NULL;
5026 upper = ref->u.ar.start;
5027 break;
5028 }
5029 /* Fall through. */
5030
5031 case AR_SECTION:
5032 lower = ref->u.ar.start;
5033 upper = ref->u.ar.end;
5034 break;
5035
5036 case AR_FULL:
5037 gcc_assert (ref->u.ar.as->type == AS_EXPLICIT);
5038
5039 lower = ref->u.ar.as->lower;
5040 upper = ref->u.ar.as->upper;
5041 break;
5042
5043 default:
5044 gcc_unreachable ();
5045 break;
5046 }
5047
5048 overflow = integer_zero_node;
5049
5050 gfc_init_block (&set_descriptor_block);
5051 size = gfc_array_init_size (se->expr, ref->u.ar.as->rank,
5052 ref->u.ar.as->corank, &offset, lower, upper,
5053 &se->pre, &set_descriptor_block, &overflow,
5054 expr3_elem_size, nelems, expr3);
5055
5056 if (dimension)
5057 {
5058
5059 var_overflow = gfc_create_var (integer_type_node, "overflow");
5060 gfc_add_modify (&se->pre, var_overflow, overflow);
5061
5062 /* Generate the block of code handling overflow. */
5063 msg = gfc_build_addr_expr (pchar_type_node,
5064 gfc_build_localized_cstring_const
5065 ("Integer overflow when calculating the amount of "
5066 "memory to allocate"));
5067 error = build_call_expr_loc (input_location, gfor_fndecl_runtime_error,
5068 1, msg);
5069 }
5070
5071 if (status != NULL_TREE)
5072 {
5073 tree status_type = TREE_TYPE (status);
5074 stmtblock_t set_status_block;
5075
5076 gfc_start_block (&set_status_block);
5077 gfc_add_modify (&set_status_block, status,
5078 build_int_cst (status_type, LIBERROR_ALLOCATION));
5079 error = gfc_finish_block (&set_status_block);
5080 }
5081
5082 gfc_start_block (&elseblock);
5083
5084 /* Allocate memory to store the data. */
5085 if (POINTER_TYPE_P (TREE_TYPE (se->expr)))
5086 se->expr = build_fold_indirect_ref_loc (input_location, se->expr);
5087
5088 pointer = gfc_conv_descriptor_data_get (se->expr);
5089 STRIP_NOPS (pointer);
5090
5091 if (coarray && gfc_option.coarray == GFC_FCOARRAY_LIB)
5092 token = gfc_build_addr_expr (NULL_TREE,
5093 gfc_conv_descriptor_token (se->expr));
5094
5095 /* The allocatable variant takes the old pointer as first argument. */
5096 if (allocatable)
5097 gfc_allocate_allocatable (&elseblock, pointer, size, token,
5098 status, errmsg, errlen, label_finish, expr);
5099 else
5100 gfc_allocate_using_malloc (&elseblock, pointer, size, status);
5101
5102 if (dimension)
5103 {
5104 cond = gfc_unlikely (fold_build2_loc (input_location, NE_EXPR,
5105 boolean_type_node, var_overflow, integer_zero_node));
5106 tmp = fold_build3_loc (input_location, COND_EXPR, void_type_node, cond,
5107 error, gfc_finish_block (&elseblock));
5108 }
5109 else
5110 tmp = gfc_finish_block (&elseblock);
5111
5112 gfc_add_expr_to_block (&se->pre, tmp);
5113
5114 if (expr->ts.type == BT_CLASS
5115 && (expr3_elem_size != NULL_TREE || expr3))
5116 {
5117 tmp = build_int_cst (unsigned_char_type_node, 0);
5118 /* With class objects, it is best to play safe and null the
5119 memory because we cannot know if dynamic types have allocatable
5120 components or not. */
5121 tmp = build_call_expr_loc (input_location,
5122 builtin_decl_explicit (BUILT_IN_MEMSET),
5123 3, pointer, tmp, size);
5124 gfc_add_expr_to_block (&se->pre, tmp);
5125 }
5126
5127 /* Update the array descriptors. */
5128 if (dimension)
5129 gfc_conv_descriptor_offset_set (&set_descriptor_block, se->expr, offset);
5130
5131 set_descriptor = gfc_finish_block (&set_descriptor_block);
5132 if (status != NULL_TREE)
5133 {
5134 cond = fold_build2_loc (input_location, EQ_EXPR,
5135 boolean_type_node, status,
5136 build_int_cst (TREE_TYPE (status), 0));
5137 gfc_add_expr_to_block (&se->pre,
5138 fold_build3_loc (input_location, COND_EXPR, void_type_node,
5139 gfc_likely (cond), set_descriptor,
5140 build_empty_stmt (input_location)));
5141 }
5142 else
5143 gfc_add_expr_to_block (&se->pre, set_descriptor);
5144
5145 if ((expr->ts.type == BT_DERIVED)
5146 && expr->ts.u.derived->attr.alloc_comp)
5147 {
5148 tmp = gfc_nullify_alloc_comp (expr->ts.u.derived, se->expr,
5149 ref->u.ar.as->rank);
5150 gfc_add_expr_to_block (&se->pre, tmp);
5151 }
5152
5153 return true;
5154 }
5155
5156
5157 /* Deallocate an array variable. Also used when an allocated variable goes
5158 out of scope. */
5159 /*GCC ARRAYS*/
5160
5161 tree
5162 gfc_array_deallocate (tree descriptor, tree pstat, tree errmsg, tree errlen,
5163 tree label_finish, gfc_expr* expr)
5164 {
5165 tree var;
5166 tree tmp;
5167 stmtblock_t block;
5168 bool coarray = gfc_is_coarray (expr);
5169
5170 gfc_start_block (&block);
5171
5172 /* Get a pointer to the data. */
5173 var = gfc_conv_descriptor_data_get (descriptor);
5174 STRIP_NOPS (var);
5175
5176 /* Parameter is the address of the data component. */
5177 tmp = gfc_deallocate_with_status (coarray ? descriptor : var, pstat, errmsg,
5178 errlen, label_finish, false, expr, coarray);
5179 gfc_add_expr_to_block (&block, tmp);
5180
5181 /* Zero the data pointer; only for coarrays an error can occur and then
5182 the allocation status may not be changed. */
5183 tmp = fold_build2_loc (input_location, MODIFY_EXPR, void_type_node,
5184 var, build_int_cst (TREE_TYPE (var), 0));
5185 if (pstat != NULL_TREE && coarray && gfc_option.coarray == GFC_FCOARRAY_LIB)
5186 {
5187 tree cond;
5188 tree stat = build_fold_indirect_ref_loc (input_location, pstat);
5189
5190 cond = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node,
5191 stat, build_int_cst (TREE_TYPE (stat), 0));
5192 tmp = fold_build3_loc (input_location, COND_EXPR, void_type_node,
5193 cond, tmp, build_empty_stmt (input_location));
5194 }
5195
5196 gfc_add_expr_to_block (&block, tmp);
5197
5198 return gfc_finish_block (&block);
5199 }
5200
5201
5202 /* Create an array constructor from an initialization expression.
5203 We assume the frontend already did any expansions and conversions. */
5204
5205 tree
5206 gfc_conv_array_initializer (tree type, gfc_expr * expr)
5207 {
5208 gfc_constructor *c;
5209 tree tmp;
5210 gfc_se se;
5211 HOST_WIDE_INT hi;
5212 unsigned HOST_WIDE_INT lo;
5213 tree index, range;
5214 VEC(constructor_elt,gc) *v = NULL;
5215
5216 if (expr->expr_type == EXPR_VARIABLE
5217 && expr->symtree->n.sym->attr.flavor == FL_PARAMETER
5218 && expr->symtree->n.sym->value)
5219 expr = expr->symtree->n.sym->value;
5220
5221 switch (expr->expr_type)
5222 {
5223 case EXPR_CONSTANT:
5224 case EXPR_STRUCTURE:
5225 /* A single scalar or derived type value. Create an array with all
5226 elements equal to that value. */
5227 gfc_init_se (&se, NULL);
5228
5229 if (expr->expr_type == EXPR_CONSTANT)
5230 gfc_conv_constant (&se, expr);
5231 else
5232 gfc_conv_structure (&se, expr, 1);
5233
5234 tmp = TYPE_MAX_VALUE (TYPE_DOMAIN (type));
5235 gcc_assert (tmp && INTEGER_CST_P (tmp));
5236 hi = TREE_INT_CST_HIGH (tmp);
5237 lo = TREE_INT_CST_LOW (tmp);
5238 lo++;
5239 if (lo == 0)
5240 hi++;
5241 /* This will probably eat buckets of memory for large arrays. */
5242 while (hi != 0 || lo != 0)
5243 {
5244 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, se.expr);
5245 if (lo == 0)
5246 hi--;
5247 lo--;
5248 }
5249 break;
5250
5251 case EXPR_ARRAY:
5252 /* Create a vector of all the elements. */
5253 for (c = gfc_constructor_first (expr->value.constructor);
5254 c; c = gfc_constructor_next (c))
5255 {
5256 if (c->iterator)
5257 {
5258 /* Problems occur when we get something like
5259 integer :: a(lots) = (/(i, i=1, lots)/) */
5260 gfc_fatal_error ("The number of elements in the array constructor "
5261 "at %L requires an increase of the allowed %d "
5262 "upper limit. See -fmax-array-constructor "
5263 "option", &expr->where,
5264 gfc_option.flag_max_array_constructor);
5265 return NULL_TREE;
5266 }
5267 if (mpz_cmp_si (c->offset, 0) != 0)
5268 index = gfc_conv_mpz_to_tree (c->offset, gfc_index_integer_kind);
5269 else
5270 index = NULL_TREE;
5271
5272 if (mpz_cmp_si (c->repeat, 1) > 0)
5273 {
5274 tree tmp1, tmp2;
5275 mpz_t maxval;
5276
5277 mpz_init (maxval);
5278 mpz_add (maxval, c->offset, c->repeat);
5279 mpz_sub_ui (maxval, maxval, 1);
5280 tmp2 = gfc_conv_mpz_to_tree (maxval, gfc_index_integer_kind);
5281 if (mpz_cmp_si (c->offset, 0) != 0)
5282 {
5283 mpz_add_ui (maxval, c->offset, 1);
5284 tmp1 = gfc_conv_mpz_to_tree (maxval, gfc_index_integer_kind);
5285 }
5286 else
5287 tmp1 = gfc_conv_mpz_to_tree (c->offset, gfc_index_integer_kind);
5288
5289 range = fold_build2 (RANGE_EXPR, gfc_array_index_type, tmp1, tmp2);
5290 mpz_clear (maxval);
5291 }
5292 else
5293 range = NULL;
5294
5295 gfc_init_se (&se, NULL);
5296 switch (c->expr->expr_type)
5297 {
5298 case EXPR_CONSTANT:
5299 gfc_conv_constant (&se, c->expr);
5300 break;
5301
5302 case EXPR_STRUCTURE:
5303 gfc_conv_structure (&se, c->expr, 1);
5304 break;
5305
5306 default:
5307 /* Catch those occasional beasts that do not simplify
5308 for one reason or another, assuming that if they are
5309 standard defying the frontend will catch them. */
5310 gfc_conv_expr (&se, c->expr);
5311 break;
5312 }
5313
5314 if (range == NULL_TREE)
5315 CONSTRUCTOR_APPEND_ELT (v, index, se.expr);
5316 else
5317 {
5318 if (index != NULL_TREE)
5319 CONSTRUCTOR_APPEND_ELT (v, index, se.expr);
5320 CONSTRUCTOR_APPEND_ELT (v, range, se.expr);
5321 }
5322 }
5323 break;
5324
5325 case EXPR_NULL:
5326 return gfc_build_null_descriptor (type);
5327
5328 default:
5329 gcc_unreachable ();
5330 }
5331
5332 /* Create a constructor from the list of elements. */
5333 tmp = build_constructor (type, v);
5334 TREE_CONSTANT (tmp) = 1;
5335 return tmp;
5336 }
5337
5338
5339 /* Generate code to evaluate non-constant coarray cobounds. */
5340
5341 void
5342 gfc_trans_array_cobounds (tree type, stmtblock_t * pblock,
5343 const gfc_symbol *sym)
5344 {
5345 int dim;
5346 tree ubound;
5347 tree lbound;
5348 gfc_se se;
5349 gfc_array_spec *as;
5350
5351 as = sym->as;
5352
5353 for (dim = as->rank; dim < as->rank + as->corank; dim++)
5354 {
5355 /* Evaluate non-constant array bound expressions. */
5356 lbound = GFC_TYPE_ARRAY_LBOUND (type, dim);
5357 if (as->lower[dim] && !INTEGER_CST_P (lbound))
5358 {
5359 gfc_init_se (&se, NULL);
5360 gfc_conv_expr_type (&se, as->lower[dim], gfc_array_index_type);
5361 gfc_add_block_to_block (pblock, &se.pre);
5362 gfc_add_modify (pblock, lbound, se.expr);
5363 }
5364 ubound = GFC_TYPE_ARRAY_UBOUND (type, dim);
5365 if (as->upper[dim] && !INTEGER_CST_P (ubound))
5366 {
5367 gfc_init_se (&se, NULL);
5368 gfc_conv_expr_type (&se, as->upper[dim], gfc_array_index_type);
5369 gfc_add_block_to_block (pblock, &se.pre);
5370 gfc_add_modify (pblock, ubound, se.expr);
5371 }
5372 }
5373 }
5374
5375
5376 /* Generate code to evaluate non-constant array bounds. Sets *poffset and
5377 returns the size (in elements) of the array. */
5378
5379 static tree
5380 gfc_trans_array_bounds (tree type, gfc_symbol * sym, tree * poffset,
5381 stmtblock_t * pblock)
5382 {
5383 gfc_array_spec *as;
5384 tree size;
5385 tree stride;
5386 tree offset;
5387 tree ubound;
5388 tree lbound;
5389 tree tmp;
5390 gfc_se se;
5391
5392 int dim;
5393
5394 as = sym->as;
5395
5396 size = gfc_index_one_node;
5397 offset = gfc_index_zero_node;
5398 for (dim = 0; dim < as->rank; dim++)
5399 {
5400 /* Evaluate non-constant array bound expressions. */
5401 lbound = GFC_TYPE_ARRAY_LBOUND (type, dim);
5402 if (as->lower[dim] && !INTEGER_CST_P (lbound))
5403 {
5404 gfc_init_se (&se, NULL);
5405 gfc_conv_expr_type (&se, as->lower[dim], gfc_array_index_type);
5406 gfc_add_block_to_block (pblock, &se.pre);
5407 gfc_add_modify (pblock, lbound, se.expr);
5408 }
5409 ubound = GFC_TYPE_ARRAY_UBOUND (type, dim);
5410 if (as->upper[dim] && !INTEGER_CST_P (ubound))
5411 {
5412 gfc_init_se (&se, NULL);
5413 gfc_conv_expr_type (&se, as->upper[dim], gfc_array_index_type);
5414 gfc_add_block_to_block (pblock, &se.pre);
5415 gfc_add_modify (pblock, ubound, se.expr);
5416 }
5417 /* The offset of this dimension. offset = offset - lbound * stride. */
5418 tmp = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
5419 lbound, size);
5420 offset = fold_build2_loc (input_location, MINUS_EXPR, gfc_array_index_type,
5421 offset, tmp);
5422
5423 /* The size of this dimension, and the stride of the next. */
5424 if (dim + 1 < as->rank)
5425 stride = GFC_TYPE_ARRAY_STRIDE (type, dim + 1);
5426 else
5427 stride = GFC_TYPE_ARRAY_SIZE (type);
5428
5429 if (ubound != NULL_TREE && !(stride && INTEGER_CST_P (stride)))
5430 {
5431 /* Calculate stride = size * (ubound + 1 - lbound). */
5432 tmp = fold_build2_loc (input_location, MINUS_EXPR,
5433 gfc_array_index_type,
5434 gfc_index_one_node, lbound);
5435 tmp = fold_build2_loc (input_location, PLUS_EXPR,
5436 gfc_array_index_type, ubound, tmp);
5437 tmp = fold_build2_loc (input_location, MULT_EXPR,
5438 gfc_array_index_type, size, tmp);
5439 if (stride)
5440 gfc_add_modify (pblock, stride, tmp);
5441 else
5442 stride = gfc_evaluate_now (tmp, pblock);
5443
5444 /* Make sure that negative size arrays are translated
5445 to being zero size. */
5446 tmp = fold_build2_loc (input_location, GE_EXPR, boolean_type_node,
5447 stride, gfc_index_zero_node);
5448 tmp = fold_build3_loc (input_location, COND_EXPR,
5449 gfc_array_index_type, tmp,
5450 stride, gfc_index_zero_node);
5451 gfc_add_modify (pblock, stride, tmp);
5452 }
5453
5454 size = stride;
5455 }
5456
5457 gfc_trans_array_cobounds (type, pblock, sym);
5458 gfc_trans_vla_type_sizes (sym, pblock);
5459
5460 *poffset = offset;
5461 return size;
5462 }
5463
5464
5465 /* Generate code to initialize/allocate an array variable. */
5466
5467 void
5468 gfc_trans_auto_array_allocation (tree decl, gfc_symbol * sym,
5469 gfc_wrapped_block * block)
5470 {
5471 stmtblock_t init;
5472 tree type;
5473 tree tmp = NULL_TREE;
5474 tree size;
5475 tree offset;
5476 tree space;
5477 tree inittree;
5478 bool onstack;
5479
5480 gcc_assert (!(sym->attr.pointer || sym->attr.allocatable));
5481
5482 /* Do nothing for USEd variables. */
5483 if (sym->attr.use_assoc)
5484 return;
5485
5486 type = TREE_TYPE (decl);
5487 gcc_assert (GFC_ARRAY_TYPE_P (type));
5488 onstack = TREE_CODE (type) != POINTER_TYPE;
5489
5490 gfc_init_block (&init);
5491
5492 /* Evaluate character string length. */
5493 if (sym->ts.type == BT_CHARACTER
5494 && onstack && !INTEGER_CST_P (sym->ts.u.cl->backend_decl))
5495 {
5496 gfc_conv_string_length (sym->ts.u.cl, NULL, &init);
5497
5498 gfc_trans_vla_type_sizes (sym, &init);
5499
5500 /* Emit a DECL_EXPR for this variable, which will cause the
5501 gimplifier to allocate storage, and all that good stuff. */
5502 tmp = fold_build1_loc (input_location, DECL_EXPR, TREE_TYPE (decl), decl);
5503 gfc_add_expr_to_block (&init, tmp);
5504 }
5505
5506 if (onstack)
5507 {
5508 gfc_add_init_cleanup (block, gfc_finish_block (&init), NULL_TREE);
5509 return;
5510 }
5511
5512 type = TREE_TYPE (type);
5513
5514 gcc_assert (!sym->attr.use_assoc);
5515 gcc_assert (!TREE_STATIC (decl));
5516 gcc_assert (!sym->module);
5517
5518 if (sym->ts.type == BT_CHARACTER
5519 && !INTEGER_CST_P (sym->ts.u.cl->backend_decl))
5520 gfc_conv_string_length (sym->ts.u.cl, NULL, &init);
5521
5522 size = gfc_trans_array_bounds (type, sym, &offset, &init);
5523
5524 /* Don't actually allocate space for Cray Pointees. */
5525 if (sym->attr.cray_pointee)
5526 {
5527 if (TREE_CODE (GFC_TYPE_ARRAY_OFFSET (type)) == VAR_DECL)
5528 gfc_add_modify (&init, GFC_TYPE_ARRAY_OFFSET (type), offset);
5529
5530 gfc_add_init_cleanup (block, gfc_finish_block (&init), NULL_TREE);
5531 return;
5532 }
5533
5534 if (gfc_option.flag_stack_arrays)
5535 {
5536 gcc_assert (TREE_CODE (TREE_TYPE (decl)) == POINTER_TYPE);
5537 space = build_decl (sym->declared_at.lb->location,
5538 VAR_DECL, create_tmp_var_name ("A"),
5539 TREE_TYPE (TREE_TYPE (decl)));
5540 gfc_trans_vla_type_sizes (sym, &init);
5541 }
5542 else
5543 {
5544 /* The size is the number of elements in the array, so multiply by the
5545 size of an element to get the total size. */
5546 tmp = TYPE_SIZE_UNIT (gfc_get_element_type (type));
5547 size = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
5548 size, fold_convert (gfc_array_index_type, tmp));
5549
5550 /* Allocate memory to hold the data. */
5551 tmp = gfc_call_malloc (&init, TREE_TYPE (decl), size);
5552 gfc_add_modify (&init, decl, tmp);
5553
5554 /* Free the temporary. */
5555 tmp = gfc_call_free (convert (pvoid_type_node, decl));
5556 space = NULL_TREE;
5557 }
5558
5559 /* Set offset of the array. */
5560 if (TREE_CODE (GFC_TYPE_ARRAY_OFFSET (type)) == VAR_DECL)
5561 gfc_add_modify (&init, GFC_TYPE_ARRAY_OFFSET (type), offset);
5562
5563 /* Automatic arrays should not have initializers. */
5564 gcc_assert (!sym->value);
5565
5566 inittree = gfc_finish_block (&init);
5567
5568 if (space)
5569 {
5570 tree addr;
5571 pushdecl (space);
5572
5573 /* Don't create new scope, emit the DECL_EXPR in exactly the scope
5574 where also space is located. */
5575 gfc_init_block (&init);
5576 tmp = fold_build1_loc (input_location, DECL_EXPR,
5577 TREE_TYPE (space), space);
5578 gfc_add_expr_to_block (&init, tmp);
5579 addr = fold_build1_loc (sym->declared_at.lb->location,
5580 ADDR_EXPR, TREE_TYPE (decl), space);
5581 gfc_add_modify (&init, decl, addr);
5582 gfc_add_init_cleanup (block, gfc_finish_block (&init), NULL_TREE);
5583 tmp = NULL_TREE;
5584 }
5585 gfc_add_init_cleanup (block, inittree, tmp);
5586 }
5587
5588
5589 /* Generate entry and exit code for g77 calling convention arrays. */
5590
5591 void
5592 gfc_trans_g77_array (gfc_symbol * sym, gfc_wrapped_block * block)
5593 {
5594 tree parm;
5595 tree type;
5596 locus loc;
5597 tree offset;
5598 tree tmp;
5599 tree stmt;
5600 stmtblock_t init;
5601
5602 gfc_save_backend_locus (&loc);
5603 gfc_set_backend_locus (&sym->declared_at);
5604
5605 /* Descriptor type. */
5606 parm = sym->backend_decl;
5607 type = TREE_TYPE (parm);
5608 gcc_assert (GFC_ARRAY_TYPE_P (type));
5609
5610 gfc_start_block (&init);
5611
5612 if (sym->ts.type == BT_CHARACTER
5613 && TREE_CODE (sym->ts.u.cl->backend_decl) == VAR_DECL)
5614 gfc_conv_string_length (sym->ts.u.cl, NULL, &init);
5615
5616 /* Evaluate the bounds of the array. */
5617 gfc_trans_array_bounds (type, sym, &offset, &init);
5618
5619 /* Set the offset. */
5620 if (TREE_CODE (GFC_TYPE_ARRAY_OFFSET (type)) == VAR_DECL)
5621 gfc_add_modify (&init, GFC_TYPE_ARRAY_OFFSET (type), offset);
5622
5623 /* Set the pointer itself if we aren't using the parameter directly. */
5624 if (TREE_CODE (parm) != PARM_DECL)
5625 {
5626 tmp = convert (TREE_TYPE (parm), GFC_DECL_SAVED_DESCRIPTOR (parm));
5627 gfc_add_modify (&init, parm, tmp);
5628 }
5629 stmt = gfc_finish_block (&init);
5630
5631 gfc_restore_backend_locus (&loc);
5632
5633 /* Add the initialization code to the start of the function. */
5634
5635 if (sym->attr.optional || sym->attr.not_always_present)
5636 {
5637 tmp = gfc_conv_expr_present (sym);
5638 stmt = build3_v (COND_EXPR, tmp, stmt, build_empty_stmt (input_location));
5639 }
5640
5641 gfc_add_init_cleanup (block, stmt, NULL_TREE);
5642 }
5643
5644
5645 /* Modify the descriptor of an array parameter so that it has the
5646 correct lower bound. Also move the upper bound accordingly.
5647 If the array is not packed, it will be copied into a temporary.
5648 For each dimension we set the new lower and upper bounds. Then we copy the
5649 stride and calculate the offset for this dimension. We also work out
5650 what the stride of a packed array would be, and see it the two match.
5651 If the array need repacking, we set the stride to the values we just
5652 calculated, recalculate the offset and copy the array data.
5653 Code is also added to copy the data back at the end of the function.
5654 */
5655
5656 void
5657 gfc_trans_dummy_array_bias (gfc_symbol * sym, tree tmpdesc,
5658 gfc_wrapped_block * block)
5659 {
5660 tree size;
5661 tree type;
5662 tree offset;
5663 locus loc;
5664 stmtblock_t init;
5665 tree stmtInit, stmtCleanup;
5666 tree lbound;
5667 tree ubound;
5668 tree dubound;
5669 tree dlbound;
5670 tree dumdesc;
5671 tree tmp;
5672 tree stride, stride2;
5673 tree stmt_packed;
5674 tree stmt_unpacked;
5675 tree partial;
5676 gfc_se se;
5677 int n;
5678 int checkparm;
5679 int no_repack;
5680 bool optional_arg;
5681
5682 /* Do nothing for pointer and allocatable arrays. */
5683 if (sym->attr.pointer || sym->attr.allocatable)
5684 return;
5685
5686 if (sym->attr.dummy && gfc_is_nodesc_array (sym))
5687 {
5688 gfc_trans_g77_array (sym, block);
5689 return;
5690 }
5691
5692 gfc_save_backend_locus (&loc);
5693 gfc_set_backend_locus (&sym->declared_at);
5694
5695 /* Descriptor type. */
5696 type = TREE_TYPE (tmpdesc);
5697 gcc_assert (GFC_ARRAY_TYPE_P (type));
5698 dumdesc = GFC_DECL_SAVED_DESCRIPTOR (tmpdesc);
5699 dumdesc = build_fold_indirect_ref_loc (input_location, dumdesc);
5700 gfc_start_block (&init);
5701
5702 if (sym->ts.type == BT_CHARACTER
5703 && TREE_CODE (sym->ts.u.cl->backend_decl) == VAR_DECL)
5704 gfc_conv_string_length (sym->ts.u.cl, NULL, &init);
5705
5706 checkparm = (sym->as->type == AS_EXPLICIT
5707 && (gfc_option.rtcheck & GFC_RTCHECK_BOUNDS));
5708
5709 no_repack = !(GFC_DECL_PACKED_ARRAY (tmpdesc)
5710 || GFC_DECL_PARTIAL_PACKED_ARRAY (tmpdesc));
5711
5712 if (GFC_DECL_PARTIAL_PACKED_ARRAY (tmpdesc))
5713 {
5714 /* For non-constant shape arrays we only check if the first dimension
5715 is contiguous. Repacking higher dimensions wouldn't gain us
5716 anything as we still don't know the array stride. */
5717 partial = gfc_create_var (boolean_type_node, "partial");
5718 TREE_USED (partial) = 1;
5719 tmp = gfc_conv_descriptor_stride_get (dumdesc, gfc_rank_cst[0]);
5720 tmp = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node, tmp,
5721 gfc_index_one_node);
5722 gfc_add_modify (&init, partial, tmp);
5723 }
5724 else
5725 partial = NULL_TREE;
5726
5727 /* The naming of stmt_unpacked and stmt_packed may be counter-intuitive
5728 here, however I think it does the right thing. */
5729 if (no_repack)
5730 {
5731 /* Set the first stride. */
5732 stride = gfc_conv_descriptor_stride_get (dumdesc, gfc_rank_cst[0]);
5733 stride = gfc_evaluate_now (stride, &init);
5734
5735 tmp = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node,
5736 stride, gfc_index_zero_node);
5737 tmp = fold_build3_loc (input_location, COND_EXPR, gfc_array_index_type,
5738 tmp, gfc_index_one_node, stride);
5739 stride = GFC_TYPE_ARRAY_STRIDE (type, 0);
5740 gfc_add_modify (&init, stride, tmp);
5741
5742 /* Allow the user to disable array repacking. */
5743 stmt_unpacked = NULL_TREE;
5744 }
5745 else
5746 {
5747 gcc_assert (integer_onep (GFC_TYPE_ARRAY_STRIDE (type, 0)));
5748 /* A library call to repack the array if necessary. */
5749 tmp = GFC_DECL_SAVED_DESCRIPTOR (tmpdesc);
5750 stmt_unpacked = build_call_expr_loc (input_location,
5751 gfor_fndecl_in_pack, 1, tmp);
5752
5753 stride = gfc_index_one_node;
5754
5755 if (gfc_option.warn_array_temp)
5756 gfc_warning ("Creating array temporary at %L", &loc);
5757 }
5758
5759 /* This is for the case where the array data is used directly without
5760 calling the repack function. */
5761 if (no_repack || partial != NULL_TREE)
5762 stmt_packed = gfc_conv_descriptor_data_get (dumdesc);
5763 else
5764 stmt_packed = NULL_TREE;
5765
5766 /* Assign the data pointer. */
5767 if (stmt_packed != NULL_TREE && stmt_unpacked != NULL_TREE)
5768 {
5769 /* Don't repack unknown shape arrays when the first stride is 1. */
5770 tmp = fold_build3_loc (input_location, COND_EXPR, TREE_TYPE (stmt_packed),
5771 partial, stmt_packed, stmt_unpacked);
5772 }
5773 else
5774 tmp = stmt_packed != NULL_TREE ? stmt_packed : stmt_unpacked;
5775 gfc_add_modify (&init, tmpdesc, fold_convert (type, tmp));
5776
5777 offset = gfc_index_zero_node;
5778 size = gfc_index_one_node;
5779
5780 /* Evaluate the bounds of the array. */
5781 for (n = 0; n < sym->as->rank; n++)
5782 {
5783 if (checkparm || !sym->as->upper[n])
5784 {
5785 /* Get the bounds of the actual parameter. */
5786 dubound = gfc_conv_descriptor_ubound_get (dumdesc, gfc_rank_cst[n]);
5787 dlbound = gfc_conv_descriptor_lbound_get (dumdesc, gfc_rank_cst[n]);
5788 }
5789 else
5790 {
5791 dubound = NULL_TREE;
5792 dlbound = NULL_TREE;
5793 }
5794
5795 lbound = GFC_TYPE_ARRAY_LBOUND (type, n);
5796 if (!INTEGER_CST_P (lbound))
5797 {
5798 gfc_init_se (&se, NULL);
5799 gfc_conv_expr_type (&se, sym->as->lower[n],
5800 gfc_array_index_type);
5801 gfc_add_block_to_block (&init, &se.pre);
5802 gfc_add_modify (&init, lbound, se.expr);
5803 }
5804
5805 ubound = GFC_TYPE_ARRAY_UBOUND (type, n);
5806 /* Set the desired upper bound. */
5807 if (sym->as->upper[n])
5808 {
5809 /* We know what we want the upper bound to be. */
5810 if (!INTEGER_CST_P (ubound))
5811 {
5812 gfc_init_se (&se, NULL);
5813 gfc_conv_expr_type (&se, sym->as->upper[n],
5814 gfc_array_index_type);
5815 gfc_add_block_to_block (&init, &se.pre);
5816 gfc_add_modify (&init, ubound, se.expr);
5817 }
5818
5819 /* Check the sizes match. */
5820 if (checkparm)
5821 {
5822 /* Check (ubound(a) - lbound(a) == ubound(b) - lbound(b)). */
5823 char * msg;
5824 tree temp;
5825
5826 temp = fold_build2_loc (input_location, MINUS_EXPR,
5827 gfc_array_index_type, ubound, lbound);
5828 temp = fold_build2_loc (input_location, PLUS_EXPR,
5829 gfc_array_index_type,
5830 gfc_index_one_node, temp);
5831 stride2 = fold_build2_loc (input_location, MINUS_EXPR,
5832 gfc_array_index_type, dubound,
5833 dlbound);
5834 stride2 = fold_build2_loc (input_location, PLUS_EXPR,
5835 gfc_array_index_type,
5836 gfc_index_one_node, stride2);
5837 tmp = fold_build2_loc (input_location, NE_EXPR,
5838 gfc_array_index_type, temp, stride2);
5839 asprintf (&msg, "Dimension %d of array '%s' has extent "
5840 "%%ld instead of %%ld", n+1, sym->name);
5841
5842 gfc_trans_runtime_check (true, false, tmp, &init, &loc, msg,
5843 fold_convert (long_integer_type_node, temp),
5844 fold_convert (long_integer_type_node, stride2));
5845
5846 free (msg);
5847 }
5848 }
5849 else
5850 {
5851 /* For assumed shape arrays move the upper bound by the same amount
5852 as the lower bound. */
5853 tmp = fold_build2_loc (input_location, MINUS_EXPR,
5854 gfc_array_index_type, dubound, dlbound);
5855 tmp = fold_build2_loc (input_location, PLUS_EXPR,
5856 gfc_array_index_type, tmp, lbound);
5857 gfc_add_modify (&init, ubound, tmp);
5858 }
5859 /* The offset of this dimension. offset = offset - lbound * stride. */
5860 tmp = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
5861 lbound, stride);
5862 offset = fold_build2_loc (input_location, MINUS_EXPR,
5863 gfc_array_index_type, offset, tmp);
5864
5865 /* The size of this dimension, and the stride of the next. */
5866 if (n + 1 < sym->as->rank)
5867 {
5868 stride = GFC_TYPE_ARRAY_STRIDE (type, n + 1);
5869
5870 if (no_repack || partial != NULL_TREE)
5871 stmt_unpacked =
5872 gfc_conv_descriptor_stride_get (dumdesc, gfc_rank_cst[n+1]);
5873
5874 /* Figure out the stride if not a known constant. */
5875 if (!INTEGER_CST_P (stride))
5876 {
5877 if (no_repack)
5878 stmt_packed = NULL_TREE;
5879 else
5880 {
5881 /* Calculate stride = size * (ubound + 1 - lbound). */
5882 tmp = fold_build2_loc (input_location, MINUS_EXPR,
5883 gfc_array_index_type,
5884 gfc_index_one_node, lbound);
5885 tmp = fold_build2_loc (input_location, PLUS_EXPR,
5886 gfc_array_index_type, ubound, tmp);
5887 size = fold_build2_loc (input_location, MULT_EXPR,
5888 gfc_array_index_type, size, tmp);
5889 stmt_packed = size;
5890 }
5891
5892 /* Assign the stride. */
5893 if (stmt_packed != NULL_TREE && stmt_unpacked != NULL_TREE)
5894 tmp = fold_build3_loc (input_location, COND_EXPR,
5895 gfc_array_index_type, partial,
5896 stmt_unpacked, stmt_packed);
5897 else
5898 tmp = (stmt_packed != NULL_TREE) ? stmt_packed : stmt_unpacked;
5899 gfc_add_modify (&init, stride, tmp);
5900 }
5901 }
5902 else
5903 {
5904 stride = GFC_TYPE_ARRAY_SIZE (type);
5905
5906 if (stride && !INTEGER_CST_P (stride))
5907 {
5908 /* Calculate size = stride * (ubound + 1 - lbound). */
5909 tmp = fold_build2_loc (input_location, MINUS_EXPR,
5910 gfc_array_index_type,
5911 gfc_index_one_node, lbound);
5912 tmp = fold_build2_loc (input_location, PLUS_EXPR,
5913 gfc_array_index_type,
5914 ubound, tmp);
5915 tmp = fold_build2_loc (input_location, MULT_EXPR,
5916 gfc_array_index_type,
5917 GFC_TYPE_ARRAY_STRIDE (type, n), tmp);
5918 gfc_add_modify (&init, stride, tmp);
5919 }
5920 }
5921 }
5922
5923 gfc_trans_array_cobounds (type, &init, sym);
5924
5925 /* Set the offset. */
5926 if (TREE_CODE (GFC_TYPE_ARRAY_OFFSET (type)) == VAR_DECL)
5927 gfc_add_modify (&init, GFC_TYPE_ARRAY_OFFSET (type), offset);
5928
5929 gfc_trans_vla_type_sizes (sym, &init);
5930
5931 stmtInit = gfc_finish_block (&init);
5932
5933 /* Only do the entry/initialization code if the arg is present. */
5934 dumdesc = GFC_DECL_SAVED_DESCRIPTOR (tmpdesc);
5935 optional_arg = (sym->attr.optional
5936 || (sym->ns->proc_name->attr.entry_master
5937 && sym->attr.dummy));
5938 if (optional_arg)
5939 {
5940 tmp = gfc_conv_expr_present (sym);
5941 stmtInit = build3_v (COND_EXPR, tmp, stmtInit,
5942 build_empty_stmt (input_location));
5943 }
5944
5945 /* Cleanup code. */
5946 if (no_repack)
5947 stmtCleanup = NULL_TREE;
5948 else
5949 {
5950 stmtblock_t cleanup;
5951 gfc_start_block (&cleanup);
5952
5953 if (sym->attr.intent != INTENT_IN)
5954 {
5955 /* Copy the data back. */
5956 tmp = build_call_expr_loc (input_location,
5957 gfor_fndecl_in_unpack, 2, dumdesc, tmpdesc);
5958 gfc_add_expr_to_block (&cleanup, tmp);
5959 }
5960
5961 /* Free the temporary. */
5962 tmp = gfc_call_free (tmpdesc);
5963 gfc_add_expr_to_block (&cleanup, tmp);
5964
5965 stmtCleanup = gfc_finish_block (&cleanup);
5966
5967 /* Only do the cleanup if the array was repacked. */
5968 tmp = build_fold_indirect_ref_loc (input_location, dumdesc);
5969 tmp = gfc_conv_descriptor_data_get (tmp);
5970 tmp = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
5971 tmp, tmpdesc);
5972 stmtCleanup = build3_v (COND_EXPR, tmp, stmtCleanup,
5973 build_empty_stmt (input_location));
5974
5975 if (optional_arg)
5976 {
5977 tmp = gfc_conv_expr_present (sym);
5978 stmtCleanup = build3_v (COND_EXPR, tmp, stmtCleanup,
5979 build_empty_stmt (input_location));
5980 }
5981 }
5982
5983 /* We don't need to free any memory allocated by internal_pack as it will
5984 be freed at the end of the function by pop_context. */
5985 gfc_add_init_cleanup (block, stmtInit, stmtCleanup);
5986
5987 gfc_restore_backend_locus (&loc);
5988 }
5989
5990
5991 /* Calculate the overall offset, including subreferences. */
5992 static void
5993 gfc_get_dataptr_offset (stmtblock_t *block, tree parm, tree desc, tree offset,
5994 bool subref, gfc_expr *expr)
5995 {
5996 tree tmp;
5997 tree field;
5998 tree stride;
5999 tree index;
6000 gfc_ref *ref;
6001 gfc_se start;
6002 int n;
6003
6004 /* If offset is NULL and this is not a subreferenced array, there is
6005 nothing to do. */
6006 if (offset == NULL_TREE)
6007 {
6008 if (subref)
6009 offset = gfc_index_zero_node;
6010 else
6011 return;
6012 }
6013
6014 tmp = gfc_conv_array_data (desc);
6015 tmp = build_fold_indirect_ref_loc (input_location,
6016 tmp);
6017 tmp = gfc_build_array_ref (tmp, offset, NULL);
6018
6019 /* Offset the data pointer for pointer assignments from arrays with
6020 subreferences; e.g. my_integer => my_type(:)%integer_component. */
6021 if (subref)
6022 {
6023 /* Go past the array reference. */
6024 for (ref = expr->ref; ref; ref = ref->next)
6025 if (ref->type == REF_ARRAY &&
6026 ref->u.ar.type != AR_ELEMENT)
6027 {
6028 ref = ref->next;
6029 break;
6030 }
6031
6032 /* Calculate the offset for each subsequent subreference. */
6033 for (; ref; ref = ref->next)
6034 {
6035 switch (ref->type)
6036 {
6037 case REF_COMPONENT:
6038 field = ref->u.c.component->backend_decl;
6039 gcc_assert (field && TREE_CODE (field) == FIELD_DECL);
6040 tmp = fold_build3_loc (input_location, COMPONENT_REF,
6041 TREE_TYPE (field),
6042 tmp, field, NULL_TREE);
6043 break;
6044
6045 case REF_SUBSTRING:
6046 gcc_assert (TREE_CODE (TREE_TYPE (tmp)) == ARRAY_TYPE);
6047 gfc_init_se (&start, NULL);
6048 gfc_conv_expr_type (&start, ref->u.ss.start, gfc_charlen_type_node);
6049 gfc_add_block_to_block (block, &start.pre);
6050 tmp = gfc_build_array_ref (tmp, start.expr, NULL);
6051 break;
6052
6053 case REF_ARRAY:
6054 gcc_assert (TREE_CODE (TREE_TYPE (tmp)) == ARRAY_TYPE
6055 && ref->u.ar.type == AR_ELEMENT);
6056
6057 /* TODO - Add bounds checking. */
6058 stride = gfc_index_one_node;
6059 index = gfc_index_zero_node;
6060 for (n = 0; n < ref->u.ar.dimen; n++)
6061 {
6062 tree itmp;
6063 tree jtmp;
6064
6065 /* Update the index. */
6066 gfc_init_se (&start, NULL);
6067 gfc_conv_expr_type (&start, ref->u.ar.start[n], gfc_array_index_type);
6068 itmp = gfc_evaluate_now (start.expr, block);
6069 gfc_init_se (&start, NULL);
6070 gfc_conv_expr_type (&start, ref->u.ar.as->lower[n], gfc_array_index_type);
6071 jtmp = gfc_evaluate_now (start.expr, block);
6072 itmp = fold_build2_loc (input_location, MINUS_EXPR,
6073 gfc_array_index_type, itmp, jtmp);
6074 itmp = fold_build2_loc (input_location, MULT_EXPR,
6075 gfc_array_index_type, itmp, stride);
6076 index = fold_build2_loc (input_location, PLUS_EXPR,
6077 gfc_array_index_type, itmp, index);
6078 index = gfc_evaluate_now (index, block);
6079
6080 /* Update the stride. */
6081 gfc_init_se (&start, NULL);
6082 gfc_conv_expr_type (&start, ref->u.ar.as->upper[n], gfc_array_index_type);
6083 itmp = fold_build2_loc (input_location, MINUS_EXPR,
6084 gfc_array_index_type, start.expr,
6085 jtmp);
6086 itmp = fold_build2_loc (input_location, PLUS_EXPR,
6087 gfc_array_index_type,
6088 gfc_index_one_node, itmp);
6089 stride = fold_build2_loc (input_location, MULT_EXPR,
6090 gfc_array_index_type, stride, itmp);
6091 stride = gfc_evaluate_now (stride, block);
6092 }
6093
6094 /* Apply the index to obtain the array element. */
6095 tmp = gfc_build_array_ref (tmp, index, NULL);
6096 break;
6097
6098 default:
6099 gcc_unreachable ();
6100 break;
6101 }
6102 }
6103 }
6104
6105 /* Set the target data pointer. */
6106 offset = gfc_build_addr_expr (gfc_array_dataptr_type (desc), tmp);
6107 gfc_conv_descriptor_data_set (block, parm, offset);
6108 }
6109
6110
6111 /* gfc_conv_expr_descriptor needs the string length an expression
6112 so that the size of the temporary can be obtained. This is done
6113 by adding up the string lengths of all the elements in the
6114 expression. Function with non-constant expressions have their
6115 string lengths mapped onto the actual arguments using the
6116 interface mapping machinery in trans-expr.c. */
6117 static void
6118 get_array_charlen (gfc_expr *expr, gfc_se *se)
6119 {
6120 gfc_interface_mapping mapping;
6121 gfc_formal_arglist *formal;
6122 gfc_actual_arglist *arg;
6123 gfc_se tse;
6124
6125 if (expr->ts.u.cl->length
6126 && gfc_is_constant_expr (expr->ts.u.cl->length))
6127 {
6128 if (!expr->ts.u.cl->backend_decl)
6129 gfc_conv_string_length (expr->ts.u.cl, expr, &se->pre);
6130 return;
6131 }
6132
6133 switch (expr->expr_type)
6134 {
6135 case EXPR_OP:
6136 get_array_charlen (expr->value.op.op1, se);
6137
6138 /* For parentheses the expression ts.u.cl is identical. */
6139 if (expr->value.op.op == INTRINSIC_PARENTHESES)
6140 return;
6141
6142 expr->ts.u.cl->backend_decl =
6143 gfc_create_var (gfc_charlen_type_node, "sln");
6144
6145 if (expr->value.op.op2)
6146 {
6147 get_array_charlen (expr->value.op.op2, se);
6148
6149 gcc_assert (expr->value.op.op == INTRINSIC_CONCAT);
6150
6151 /* Add the string lengths and assign them to the expression
6152 string length backend declaration. */
6153 gfc_add_modify (&se->pre, expr->ts.u.cl->backend_decl,
6154 fold_build2_loc (input_location, PLUS_EXPR,
6155 gfc_charlen_type_node,
6156 expr->value.op.op1->ts.u.cl->backend_decl,
6157 expr->value.op.op2->ts.u.cl->backend_decl));
6158 }
6159 else
6160 gfc_add_modify (&se->pre, expr->ts.u.cl->backend_decl,
6161 expr->value.op.op1->ts.u.cl->backend_decl);
6162 break;
6163
6164 case EXPR_FUNCTION:
6165 if (expr->value.function.esym == NULL
6166 || expr->ts.u.cl->length->expr_type == EXPR_CONSTANT)
6167 {
6168 gfc_conv_string_length (expr->ts.u.cl, expr, &se->pre);
6169 break;
6170 }
6171
6172 /* Map expressions involving the dummy arguments onto the actual
6173 argument expressions. */
6174 gfc_init_interface_mapping (&mapping);
6175 formal = expr->symtree->n.sym->formal;
6176 arg = expr->value.function.actual;
6177
6178 /* Set se = NULL in the calls to the interface mapping, to suppress any
6179 backend stuff. */
6180 for (; arg != NULL; arg = arg->next, formal = formal ? formal->next : NULL)
6181 {
6182 if (!arg->expr)
6183 continue;
6184 if (formal->sym)
6185 gfc_add_interface_mapping (&mapping, formal->sym, NULL, arg->expr);
6186 }
6187
6188 gfc_init_se (&tse, NULL);
6189
6190 /* Build the expression for the character length and convert it. */
6191 gfc_apply_interface_mapping (&mapping, &tse, expr->ts.u.cl->length);
6192
6193 gfc_add_block_to_block (&se->pre, &tse.pre);
6194 gfc_add_block_to_block (&se->post, &tse.post);
6195 tse.expr = fold_convert (gfc_charlen_type_node, tse.expr);
6196 tse.expr = fold_build2_loc (input_location, MAX_EXPR,
6197 gfc_charlen_type_node, tse.expr,
6198 build_int_cst (gfc_charlen_type_node, 0));
6199 expr->ts.u.cl->backend_decl = tse.expr;
6200 gfc_free_interface_mapping (&mapping);
6201 break;
6202
6203 default:
6204 gfc_conv_string_length (expr->ts.u.cl, expr, &se->pre);
6205 break;
6206 }
6207 }
6208
6209
6210 /* Helper function to check dimensions. */
6211 static bool
6212 transposed_dims (gfc_ss *ss)
6213 {
6214 int n;
6215
6216 for (n = 0; n < ss->dimen; n++)
6217 if (ss->dim[n] != n)
6218 return true;
6219 return false;
6220 }
6221
6222 /* Convert an array for passing as an actual argument. Expressions and
6223 vector subscripts are evaluated and stored in a temporary, which is then
6224 passed. For whole arrays the descriptor is passed. For array sections
6225 a modified copy of the descriptor is passed, but using the original data.
6226
6227 This function is also used for array pointer assignments, and there
6228 are three cases:
6229
6230 - se->want_pointer && !se->direct_byref
6231 EXPR is an actual argument. On exit, se->expr contains a
6232 pointer to the array descriptor.
6233
6234 - !se->want_pointer && !se->direct_byref
6235 EXPR is an actual argument to an intrinsic function or the
6236 left-hand side of a pointer assignment. On exit, se->expr
6237 contains the descriptor for EXPR.
6238
6239 - !se->want_pointer && se->direct_byref
6240 EXPR is the right-hand side of a pointer assignment and
6241 se->expr is the descriptor for the previously-evaluated
6242 left-hand side. The function creates an assignment from
6243 EXPR to se->expr.
6244
6245
6246 The se->force_tmp flag disables the non-copying descriptor optimization
6247 that is used for transpose. It may be used in cases where there is an
6248 alias between the transpose argument and another argument in the same
6249 function call. */
6250
6251 void
6252 gfc_conv_expr_descriptor (gfc_se * se, gfc_expr * expr, gfc_ss * ss)
6253 {
6254 gfc_ss_type ss_type;
6255 gfc_ss_info *ss_info;
6256 gfc_loopinfo loop;
6257 gfc_array_info *info;
6258 int need_tmp;
6259 int n;
6260 tree tmp;
6261 tree desc;
6262 stmtblock_t block;
6263 tree start;
6264 tree offset;
6265 int full;
6266 bool subref_array_target = false;
6267 gfc_expr *arg, *ss_expr;
6268
6269 gcc_assert (ss != NULL);
6270 gcc_assert (ss != gfc_ss_terminator);
6271
6272 ss_info = ss->info;
6273 ss_type = ss_info->type;
6274 ss_expr = ss_info->expr;
6275
6276 /* Special case things we know we can pass easily. */
6277 switch (expr->expr_type)
6278 {
6279 case EXPR_VARIABLE:
6280 /* If we have a linear array section, we can pass it directly.
6281 Otherwise we need to copy it into a temporary. */
6282
6283 gcc_assert (ss_type == GFC_SS_SECTION);
6284 gcc_assert (ss_expr == expr);
6285 info = &ss_info->data.array;
6286
6287 /* Get the descriptor for the array. */
6288 gfc_conv_ss_descriptor (&se->pre, ss, 0);
6289 desc = info->descriptor;
6290
6291 subref_array_target = se->direct_byref && is_subref_array (expr);
6292 need_tmp = gfc_ref_needs_temporary_p (expr->ref)
6293 && !subref_array_target;
6294
6295 if (se->force_tmp)
6296 need_tmp = 1;
6297
6298 if (need_tmp)
6299 full = 0;
6300 else if (GFC_ARRAY_TYPE_P (TREE_TYPE (desc)))
6301 {
6302 /* Create a new descriptor if the array doesn't have one. */
6303 full = 0;
6304 }
6305 else if (info->ref->u.ar.type == AR_FULL)
6306 full = 1;
6307 else if (se->direct_byref)
6308 full = 0;
6309 else
6310 full = gfc_full_array_ref_p (info->ref, NULL);
6311
6312 if (full && !transposed_dims (ss))
6313 {
6314 if (se->direct_byref && !se->byref_noassign)
6315 {
6316 /* Copy the descriptor for pointer assignments. */
6317 gfc_add_modify (&se->pre, se->expr, desc);
6318
6319 /* Add any offsets from subreferences. */
6320 gfc_get_dataptr_offset (&se->pre, se->expr, desc, NULL_TREE,
6321 subref_array_target, expr);
6322 }
6323 else if (se->want_pointer)
6324 {
6325 /* We pass full arrays directly. This means that pointers and
6326 allocatable arrays should also work. */
6327 se->expr = gfc_build_addr_expr (NULL_TREE, desc);
6328 }
6329 else
6330 {
6331 se->expr = desc;
6332 }
6333
6334 if (expr->ts.type == BT_CHARACTER)
6335 se->string_length = gfc_get_expr_charlen (expr);
6336
6337 return;
6338 }
6339 break;
6340
6341 case EXPR_FUNCTION:
6342
6343 /* We don't need to copy data in some cases. */
6344 arg = gfc_get_noncopying_intrinsic_argument (expr);
6345 if (arg)
6346 {
6347 /* This is a call to transpose... */
6348 gcc_assert (expr->value.function.isym->id == GFC_ISYM_TRANSPOSE);
6349 /* ... which has already been handled by the scalarizer, so
6350 that we just need to get its argument's descriptor. */
6351 gfc_conv_expr_descriptor (se, expr->value.function.actual->expr, ss);
6352 return;
6353 }
6354
6355 /* A transformational function return value will be a temporary
6356 array descriptor. We still need to go through the scalarizer
6357 to create the descriptor. Elemental functions ar handled as
6358 arbitrary expressions, i.e. copy to a temporary. */
6359
6360 if (se->direct_byref)
6361 {
6362 gcc_assert (ss_type == GFC_SS_FUNCTION && ss_expr == expr);
6363
6364 /* For pointer assignments pass the descriptor directly. */
6365 if (se->ss == NULL)
6366 se->ss = ss;
6367 else
6368 gcc_assert (se->ss == ss);
6369 se->expr = gfc_build_addr_expr (NULL_TREE, se->expr);
6370 gfc_conv_expr (se, expr);
6371 return;
6372 }
6373
6374 if (ss_expr != expr || ss_type != GFC_SS_FUNCTION)
6375 {
6376 if (ss_expr != expr)
6377 /* Elemental function. */
6378 gcc_assert ((expr->value.function.esym != NULL
6379 && expr->value.function.esym->attr.elemental)
6380 || (expr->value.function.isym != NULL
6381 && expr->value.function.isym->elemental)
6382 || gfc_inline_intrinsic_function_p (expr));
6383 else
6384 gcc_assert (ss_type == GFC_SS_INTRINSIC);
6385
6386 need_tmp = 1;
6387 if (expr->ts.type == BT_CHARACTER
6388 && expr->ts.u.cl->length->expr_type != EXPR_CONSTANT)
6389 get_array_charlen (expr, se);
6390
6391 info = NULL;
6392 }
6393 else
6394 {
6395 /* Transformational function. */
6396 info = &ss_info->data.array;
6397 need_tmp = 0;
6398 }
6399 break;
6400
6401 case EXPR_ARRAY:
6402 /* Constant array constructors don't need a temporary. */
6403 if (ss_type == GFC_SS_CONSTRUCTOR
6404 && expr->ts.type != BT_CHARACTER
6405 && gfc_constant_array_constructor_p (expr->value.constructor))
6406 {
6407 need_tmp = 0;
6408 info = &ss_info->data.array;
6409 }
6410 else
6411 {
6412 need_tmp = 1;
6413 info = NULL;
6414 }
6415 break;
6416
6417 default:
6418 /* Something complicated. Copy it into a temporary. */
6419 need_tmp = 1;
6420 info = NULL;
6421 break;
6422 }
6423
6424 /* If we are creating a temporary, we don't need to bother about aliases
6425 anymore. */
6426 if (need_tmp)
6427 se->force_tmp = 0;
6428
6429 gfc_init_loopinfo (&loop);
6430
6431 /* Associate the SS with the loop. */
6432 gfc_add_ss_to_loop (&loop, ss);
6433
6434 /* Tell the scalarizer not to bother creating loop variables, etc. */
6435 if (!need_tmp)
6436 loop.array_parameter = 1;
6437 else
6438 /* The right-hand side of a pointer assignment mustn't use a temporary. */
6439 gcc_assert (!se->direct_byref);
6440
6441 /* Setup the scalarizing loops and bounds. */
6442 gfc_conv_ss_startstride (&loop);
6443
6444 if (need_tmp)
6445 {
6446 if (expr->ts.type == BT_CHARACTER && !expr->ts.u.cl->backend_decl)
6447 get_array_charlen (expr, se);
6448
6449 /* Tell the scalarizer to make a temporary. */
6450 loop.temp_ss = gfc_get_temp_ss (gfc_typenode_for_spec (&expr->ts),
6451 ((expr->ts.type == BT_CHARACTER)
6452 ? expr->ts.u.cl->backend_decl
6453 : NULL),
6454 loop.dimen);
6455
6456 se->string_length = loop.temp_ss->info->string_length;
6457 gcc_assert (loop.temp_ss->dimen == loop.dimen);
6458 gfc_add_ss_to_loop (&loop, loop.temp_ss);
6459 }
6460
6461 gfc_conv_loop_setup (&loop, & expr->where);
6462
6463 if (need_tmp)
6464 {
6465 /* Copy into a temporary and pass that. We don't need to copy the data
6466 back because expressions and vector subscripts must be INTENT_IN. */
6467 /* TODO: Optimize passing function return values. */
6468 gfc_se lse;
6469 gfc_se rse;
6470
6471 /* Start the copying loops. */
6472 gfc_mark_ss_chain_used (loop.temp_ss, 1);
6473 gfc_mark_ss_chain_used (ss, 1);
6474 gfc_start_scalarized_body (&loop, &block);
6475
6476 /* Copy each data element. */
6477 gfc_init_se (&lse, NULL);
6478 gfc_copy_loopinfo_to_se (&lse, &loop);
6479 gfc_init_se (&rse, NULL);
6480 gfc_copy_loopinfo_to_se (&rse, &loop);
6481
6482 lse.ss = loop.temp_ss;
6483 rse.ss = ss;
6484
6485 gfc_conv_scalarized_array_ref (&lse, NULL);
6486 if (expr->ts.type == BT_CHARACTER)
6487 {
6488 gfc_conv_expr (&rse, expr);
6489 if (POINTER_TYPE_P (TREE_TYPE (rse.expr)))
6490 rse.expr = build_fold_indirect_ref_loc (input_location,
6491 rse.expr);
6492 }
6493 else
6494 gfc_conv_expr_val (&rse, expr);
6495
6496 gfc_add_block_to_block (&block, &rse.pre);
6497 gfc_add_block_to_block (&block, &lse.pre);
6498
6499 lse.string_length = rse.string_length;
6500 tmp = gfc_trans_scalar_assign (&lse, &rse, expr->ts, true,
6501 expr->expr_type == EXPR_VARIABLE
6502 || expr->expr_type == EXPR_ARRAY, true);
6503 gfc_add_expr_to_block (&block, tmp);
6504
6505 /* Finish the copying loops. */
6506 gfc_trans_scalarizing_loops (&loop, &block);
6507
6508 desc = loop.temp_ss->info->data.array.descriptor;
6509 }
6510 else if (expr->expr_type == EXPR_FUNCTION && !transposed_dims (ss))
6511 {
6512 desc = info->descriptor;
6513 se->string_length = ss_info->string_length;
6514 }
6515 else
6516 {
6517 /* We pass sections without copying to a temporary. Make a new
6518 descriptor and point it at the section we want. The loop variable
6519 limits will be the limits of the section.
6520 A function may decide to repack the array to speed up access, but
6521 we're not bothered about that here. */
6522 int dim, ndim, codim;
6523 tree parm;
6524 tree parmtype;
6525 tree stride;
6526 tree from;
6527 tree to;
6528 tree base;
6529
6530 ndim = info->ref ? info->ref->u.ar.dimen : ss->dimen;
6531
6532 if (se->want_coarray)
6533 {
6534 gfc_array_ref *ar = &info->ref->u.ar;
6535
6536 codim = gfc_get_corank (expr);
6537 for (n = 0; n < codim - 1; n++)
6538 {
6539 /* Make sure we are not lost somehow. */
6540 gcc_assert (ar->dimen_type[n + ndim] == DIMEN_THIS_IMAGE);
6541
6542 /* Make sure the call to gfc_conv_section_startstride won't
6543 generate unnecessary code to calculate stride. */
6544 gcc_assert (ar->stride[n + ndim] == NULL);
6545
6546 gfc_conv_section_startstride (&loop, ss, n + ndim);
6547 loop.from[n + loop.dimen] = info->start[n + ndim];
6548 loop.to[n + loop.dimen] = info->end[n + ndim];
6549 }
6550
6551 gcc_assert (n == codim - 1);
6552 evaluate_bound (&loop.pre, info->start, ar->start,
6553 info->descriptor, n + ndim, true);
6554 loop.from[n + loop.dimen] = info->start[n + ndim];
6555 }
6556 else
6557 codim = 0;
6558
6559 /* Set the string_length for a character array. */
6560 if (expr->ts.type == BT_CHARACTER)
6561 se->string_length = gfc_get_expr_charlen (expr);
6562
6563 desc = info->descriptor;
6564 if (se->direct_byref && !se->byref_noassign)
6565 {
6566 /* For pointer assignments we fill in the destination. */
6567 parm = se->expr;
6568 parmtype = TREE_TYPE (parm);
6569 }
6570 else
6571 {
6572 /* Otherwise make a new one. */
6573 parmtype = gfc_get_element_type (TREE_TYPE (desc));
6574 parmtype = gfc_get_array_type_bounds (parmtype, loop.dimen, codim,
6575 loop.from, loop.to, 0,
6576 GFC_ARRAY_UNKNOWN, false);
6577 parm = gfc_create_var (parmtype, "parm");
6578 }
6579
6580 offset = gfc_index_zero_node;
6581
6582 /* The following can be somewhat confusing. We have two
6583 descriptors, a new one and the original array.
6584 {parm, parmtype, dim} refer to the new one.
6585 {desc, type, n, loop} refer to the original, which maybe
6586 a descriptorless array.
6587 The bounds of the scalarization are the bounds of the section.
6588 We don't have to worry about numeric overflows when calculating
6589 the offsets because all elements are within the array data. */
6590
6591 /* Set the dtype. */
6592 tmp = gfc_conv_descriptor_dtype (parm);
6593 gfc_add_modify (&loop.pre, tmp, gfc_get_dtype (parmtype));
6594
6595 /* Set offset for assignments to pointer only to zero if it is not
6596 the full array. */
6597 if (se->direct_byref
6598 && info->ref && info->ref->u.ar.type != AR_FULL)
6599 base = gfc_index_zero_node;
6600 else if (GFC_ARRAY_TYPE_P (TREE_TYPE (desc)))
6601 base = gfc_evaluate_now (gfc_conv_array_offset (desc), &loop.pre);
6602 else
6603 base = NULL_TREE;
6604
6605 for (n = 0; n < ndim; n++)
6606 {
6607 stride = gfc_conv_array_stride (desc, n);
6608
6609 /* Work out the offset. */
6610 if (info->ref
6611 && info->ref->u.ar.dimen_type[n] == DIMEN_ELEMENT)
6612 {
6613 gcc_assert (info->subscript[n]
6614 && info->subscript[n]->info->type == GFC_SS_SCALAR);
6615 start = info->subscript[n]->info->data.scalar.value;
6616 }
6617 else
6618 {
6619 /* Evaluate and remember the start of the section. */
6620 start = info->start[n];
6621 stride = gfc_evaluate_now (stride, &loop.pre);
6622 }
6623
6624 tmp = gfc_conv_array_lbound (desc, n);
6625 tmp = fold_build2_loc (input_location, MINUS_EXPR, TREE_TYPE (tmp),
6626 start, tmp);
6627 tmp = fold_build2_loc (input_location, MULT_EXPR, TREE_TYPE (tmp),
6628 tmp, stride);
6629 offset = fold_build2_loc (input_location, PLUS_EXPR, TREE_TYPE (tmp),
6630 offset, tmp);
6631
6632 if (info->ref
6633 && info->ref->u.ar.dimen_type[n] == DIMEN_ELEMENT)
6634 {
6635 /* For elemental dimensions, we only need the offset. */
6636 continue;
6637 }
6638
6639 /* Vector subscripts need copying and are handled elsewhere. */
6640 if (info->ref)
6641 gcc_assert (info->ref->u.ar.dimen_type[n] == DIMEN_RANGE);
6642
6643 /* look for the corresponding scalarizer dimension: dim. */
6644 for (dim = 0; dim < ndim; dim++)
6645 if (ss->dim[dim] == n)
6646 break;
6647
6648 /* loop exited early: the DIM being looked for has been found. */
6649 gcc_assert (dim < ndim);
6650
6651 /* Set the new lower bound. */
6652 from = loop.from[dim];
6653 to = loop.to[dim];
6654
6655 /* If we have an array section or are assigning make sure that
6656 the lower bound is 1. References to the full
6657 array should otherwise keep the original bounds. */
6658 if ((!info->ref
6659 || info->ref->u.ar.type != AR_FULL)
6660 && !integer_onep (from))
6661 {
6662 tmp = fold_build2_loc (input_location, MINUS_EXPR,
6663 gfc_array_index_type, gfc_index_one_node,
6664 from);
6665 to = fold_build2_loc (input_location, PLUS_EXPR,
6666 gfc_array_index_type, to, tmp);
6667 from = gfc_index_one_node;
6668 }
6669 gfc_conv_descriptor_lbound_set (&loop.pre, parm,
6670 gfc_rank_cst[dim], from);
6671
6672 /* Set the new upper bound. */
6673 gfc_conv_descriptor_ubound_set (&loop.pre, parm,
6674 gfc_rank_cst[dim], to);
6675
6676 /* Multiply the stride by the section stride to get the
6677 total stride. */
6678 stride = fold_build2_loc (input_location, MULT_EXPR,
6679 gfc_array_index_type,
6680 stride, info->stride[n]);
6681
6682 if (se->direct_byref
6683 && info->ref
6684 && info->ref->u.ar.type != AR_FULL)
6685 {
6686 base = fold_build2_loc (input_location, MINUS_EXPR,
6687 TREE_TYPE (base), base, stride);
6688 }
6689 else if (GFC_ARRAY_TYPE_P (TREE_TYPE (desc)))
6690 {
6691 tmp = gfc_conv_array_lbound (desc, n);
6692 tmp = fold_build2_loc (input_location, MINUS_EXPR,
6693 TREE_TYPE (base), tmp, loop.from[dim]);
6694 tmp = fold_build2_loc (input_location, MULT_EXPR,
6695 TREE_TYPE (base), tmp,
6696 gfc_conv_array_stride (desc, n));
6697 base = fold_build2_loc (input_location, PLUS_EXPR,
6698 TREE_TYPE (base), tmp, base);
6699 }
6700
6701 /* Store the new stride. */
6702 gfc_conv_descriptor_stride_set (&loop.pre, parm,
6703 gfc_rank_cst[dim], stride);
6704 }
6705
6706 for (n = loop.dimen; n < loop.dimen + codim; n++)
6707 {
6708 from = loop.from[n];
6709 to = loop.to[n];
6710 gfc_conv_descriptor_lbound_set (&loop.pre, parm,
6711 gfc_rank_cst[n], from);
6712 if (n < loop.dimen + codim - 1)
6713 gfc_conv_descriptor_ubound_set (&loop.pre, parm,
6714 gfc_rank_cst[n], to);
6715 }
6716
6717 if (se->data_not_needed)
6718 gfc_conv_descriptor_data_set (&loop.pre, parm,
6719 gfc_index_zero_node);
6720 else
6721 /* Point the data pointer at the 1st element in the section. */
6722 gfc_get_dataptr_offset (&loop.pre, parm, desc, offset,
6723 subref_array_target, expr);
6724
6725 if ((se->direct_byref || GFC_ARRAY_TYPE_P (TREE_TYPE (desc)))
6726 && !se->data_not_needed)
6727 {
6728 /* Set the offset. */
6729 gfc_conv_descriptor_offset_set (&loop.pre, parm, base);
6730 }
6731 else
6732 {
6733 /* Only the callee knows what the correct offset it, so just set
6734 it to zero here. */
6735 gfc_conv_descriptor_offset_set (&loop.pre, parm, gfc_index_zero_node);
6736 }
6737 desc = parm;
6738 }
6739
6740 if (!se->direct_byref || se->byref_noassign)
6741 {
6742 /* Get a pointer to the new descriptor. */
6743 if (se->want_pointer)
6744 se->expr = gfc_build_addr_expr (NULL_TREE, desc);
6745 else
6746 se->expr = desc;
6747 }
6748
6749 gfc_add_block_to_block (&se->pre, &loop.pre);
6750 gfc_add_block_to_block (&se->post, &loop.post);
6751
6752 /* Cleanup the scalarizer. */
6753 gfc_cleanup_loop (&loop);
6754 }
6755
6756 /* Helper function for gfc_conv_array_parameter if array size needs to be
6757 computed. */
6758
6759 static void
6760 array_parameter_size (tree desc, gfc_expr *expr, tree *size)
6761 {
6762 tree elem;
6763 if (GFC_ARRAY_TYPE_P (TREE_TYPE (desc)))
6764 *size = GFC_TYPE_ARRAY_SIZE (TREE_TYPE (desc));
6765 else if (expr->rank > 1)
6766 *size = build_call_expr_loc (input_location,
6767 gfor_fndecl_size0, 1,
6768 gfc_build_addr_expr (NULL, desc));
6769 else
6770 {
6771 tree ubound = gfc_conv_descriptor_ubound_get (desc, gfc_index_zero_node);
6772 tree lbound = gfc_conv_descriptor_lbound_get (desc, gfc_index_zero_node);
6773
6774 *size = fold_build2_loc (input_location, MINUS_EXPR,
6775 gfc_array_index_type, ubound, lbound);
6776 *size = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
6777 *size, gfc_index_one_node);
6778 *size = fold_build2_loc (input_location, MAX_EXPR, gfc_array_index_type,
6779 *size, gfc_index_zero_node);
6780 }
6781 elem = TYPE_SIZE_UNIT (gfc_get_element_type (TREE_TYPE (desc)));
6782 *size = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
6783 *size, fold_convert (gfc_array_index_type, elem));
6784 }
6785
6786 /* Convert an array for passing as an actual parameter. */
6787 /* TODO: Optimize passing g77 arrays. */
6788
6789 void
6790 gfc_conv_array_parameter (gfc_se * se, gfc_expr * expr, gfc_ss * ss, bool g77,
6791 const gfc_symbol *fsym, const char *proc_name,
6792 tree *size)
6793 {
6794 tree ptr;
6795 tree desc;
6796 tree tmp = NULL_TREE;
6797 tree stmt;
6798 tree parent = DECL_CONTEXT (current_function_decl);
6799 bool full_array_var;
6800 bool this_array_result;
6801 bool contiguous;
6802 bool no_pack;
6803 bool array_constructor;
6804 bool good_allocatable;
6805 bool ultimate_ptr_comp;
6806 bool ultimate_alloc_comp;
6807 gfc_symbol *sym;
6808 stmtblock_t block;
6809 gfc_ref *ref;
6810
6811 ultimate_ptr_comp = false;
6812 ultimate_alloc_comp = false;
6813
6814 for (ref = expr->ref; ref; ref = ref->next)
6815 {
6816 if (ref->next == NULL)
6817 break;
6818
6819 if (ref->type == REF_COMPONENT)
6820 {
6821 ultimate_ptr_comp = ref->u.c.component->attr.pointer;
6822 ultimate_alloc_comp = ref->u.c.component->attr.allocatable;
6823 }
6824 }
6825
6826 full_array_var = false;
6827 contiguous = false;
6828
6829 if (expr->expr_type == EXPR_VARIABLE && ref && !ultimate_ptr_comp)
6830 full_array_var = gfc_full_array_ref_p (ref, &contiguous);
6831
6832 sym = full_array_var ? expr->symtree->n.sym : NULL;
6833
6834 /* The symbol should have an array specification. */
6835 gcc_assert (!sym || sym->as || ref->u.ar.as);
6836
6837 if (expr->expr_type == EXPR_ARRAY && expr->ts.type == BT_CHARACTER)
6838 {
6839 get_array_ctor_strlen (&se->pre, expr->value.constructor, &tmp);
6840 expr->ts.u.cl->backend_decl = tmp;
6841 se->string_length = tmp;
6842 }
6843
6844 /* Is this the result of the enclosing procedure? */
6845 this_array_result = (full_array_var && sym->attr.flavor == FL_PROCEDURE);
6846 if (this_array_result
6847 && (sym->backend_decl != current_function_decl)
6848 && (sym->backend_decl != parent))
6849 this_array_result = false;
6850
6851 /* Passing address of the array if it is not pointer or assumed-shape. */
6852 if (full_array_var && g77 && !this_array_result)
6853 {
6854 tmp = gfc_get_symbol_decl (sym);
6855
6856 if (sym->ts.type == BT_CHARACTER)
6857 se->string_length = sym->ts.u.cl->backend_decl;
6858
6859 if (sym->ts.type == BT_DERIVED || sym->ts.type == BT_CLASS)
6860 {
6861 gfc_conv_expr_descriptor (se, expr, ss);
6862 se->expr = gfc_conv_array_data (se->expr);
6863 return;
6864 }
6865
6866 if (!sym->attr.pointer
6867 && sym->as
6868 && sym->as->type != AS_ASSUMED_SHAPE
6869 && !sym->attr.allocatable)
6870 {
6871 /* Some variables are declared directly, others are declared as
6872 pointers and allocated on the heap. */
6873 if (sym->attr.dummy || POINTER_TYPE_P (TREE_TYPE (tmp)))
6874 se->expr = tmp;
6875 else
6876 se->expr = gfc_build_addr_expr (NULL_TREE, tmp);
6877 if (size)
6878 array_parameter_size (tmp, expr, size);
6879 return;
6880 }
6881
6882 if (sym->attr.allocatable)
6883 {
6884 if (sym->attr.dummy || sym->attr.result)
6885 {
6886 gfc_conv_expr_descriptor (se, expr, ss);
6887 tmp = se->expr;
6888 }
6889 if (size)
6890 array_parameter_size (tmp, expr, size);
6891 se->expr = gfc_conv_array_data (tmp);
6892 return;
6893 }
6894 }
6895
6896 /* A convenient reduction in scope. */
6897 contiguous = g77 && !this_array_result && contiguous;
6898
6899 /* There is no need to pack and unpack the array, if it is contiguous
6900 and not a deferred- or assumed-shape array, or if it is simply
6901 contiguous. */
6902 no_pack = ((sym && sym->as
6903 && !sym->attr.pointer
6904 && sym->as->type != AS_DEFERRED
6905 && sym->as->type != AS_ASSUMED_SHAPE)
6906 ||
6907 (ref && ref->u.ar.as
6908 && ref->u.ar.as->type != AS_DEFERRED
6909 && ref->u.ar.as->type != AS_ASSUMED_SHAPE)
6910 ||
6911 gfc_is_simply_contiguous (expr, false));
6912
6913 no_pack = contiguous && no_pack;
6914
6915 /* Array constructors are always contiguous and do not need packing. */
6916 array_constructor = g77 && !this_array_result && expr->expr_type == EXPR_ARRAY;
6917
6918 /* Same is true of contiguous sections from allocatable variables. */
6919 good_allocatable = contiguous
6920 && expr->symtree
6921 && expr->symtree->n.sym->attr.allocatable;
6922
6923 /* Or ultimate allocatable components. */
6924 ultimate_alloc_comp = contiguous && ultimate_alloc_comp;
6925
6926 if (no_pack || array_constructor || good_allocatable || ultimate_alloc_comp)
6927 {
6928 gfc_conv_expr_descriptor (se, expr, ss);
6929 if (expr->ts.type == BT_CHARACTER)
6930 se->string_length = expr->ts.u.cl->backend_decl;
6931 if (size)
6932 array_parameter_size (se->expr, expr, size);
6933 se->expr = gfc_conv_array_data (se->expr);
6934 return;
6935 }
6936
6937 if (this_array_result)
6938 {
6939 /* Result of the enclosing function. */
6940 gfc_conv_expr_descriptor (se, expr, ss);
6941 if (size)
6942 array_parameter_size (se->expr, expr, size);
6943 se->expr = gfc_build_addr_expr (NULL_TREE, se->expr);
6944
6945 if (g77 && TREE_TYPE (TREE_TYPE (se->expr)) != NULL_TREE
6946 && GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (TREE_TYPE (se->expr))))
6947 se->expr = gfc_conv_array_data (build_fold_indirect_ref_loc (input_location,
6948 se->expr));
6949
6950 return;
6951 }
6952 else
6953 {
6954 /* Every other type of array. */
6955 se->want_pointer = 1;
6956 gfc_conv_expr_descriptor (se, expr, ss);
6957 if (size)
6958 array_parameter_size (build_fold_indirect_ref_loc (input_location,
6959 se->expr),
6960 expr, size);
6961 }
6962
6963 /* Deallocate the allocatable components of structures that are
6964 not variable. */
6965 if ((expr->ts.type == BT_DERIVED || expr->ts.type == BT_CLASS)
6966 && expr->ts.u.derived->attr.alloc_comp
6967 && expr->expr_type != EXPR_VARIABLE)
6968 {
6969 tmp = build_fold_indirect_ref_loc (input_location, se->expr);
6970 tmp = gfc_deallocate_alloc_comp (expr->ts.u.derived, tmp, expr->rank);
6971
6972 /* The components shall be deallocated before their containing entity. */
6973 gfc_prepend_expr_to_block (&se->post, tmp);
6974 }
6975
6976 if (g77 || (fsym && fsym->attr.contiguous
6977 && !gfc_is_simply_contiguous (expr, false)))
6978 {
6979 tree origptr = NULL_TREE;
6980
6981 desc = se->expr;
6982
6983 /* For contiguous arrays, save the original value of the descriptor. */
6984 if (!g77)
6985 {
6986 origptr = gfc_create_var (pvoid_type_node, "origptr");
6987 tmp = build_fold_indirect_ref_loc (input_location, desc);
6988 tmp = gfc_conv_array_data (tmp);
6989 tmp = fold_build2_loc (input_location, MODIFY_EXPR,
6990 TREE_TYPE (origptr), origptr,
6991 fold_convert (TREE_TYPE (origptr), tmp));
6992 gfc_add_expr_to_block (&se->pre, tmp);
6993 }
6994
6995 /* Repack the array. */
6996 if (gfc_option.warn_array_temp)
6997 {
6998 if (fsym)
6999 gfc_warning ("Creating array temporary at %L for argument '%s'",
7000 &expr->where, fsym->name);
7001 else
7002 gfc_warning ("Creating array temporary at %L", &expr->where);
7003 }
7004
7005 ptr = build_call_expr_loc (input_location,
7006 gfor_fndecl_in_pack, 1, desc);
7007
7008 if (fsym && fsym->attr.optional && sym && sym->attr.optional)
7009 {
7010 tmp = gfc_conv_expr_present (sym);
7011 ptr = build3_loc (input_location, COND_EXPR, TREE_TYPE (se->expr),
7012 tmp, fold_convert (TREE_TYPE (se->expr), ptr),
7013 fold_convert (TREE_TYPE (se->expr), null_pointer_node));
7014 }
7015
7016 ptr = gfc_evaluate_now (ptr, &se->pre);
7017
7018 /* Use the packed data for the actual argument, except for contiguous arrays,
7019 where the descriptor's data component is set. */
7020 if (g77)
7021 se->expr = ptr;
7022 else
7023 {
7024 tmp = build_fold_indirect_ref_loc (input_location, desc);
7025 gfc_conv_descriptor_data_set (&se->pre, tmp, ptr);
7026 }
7027
7028 if (gfc_option.rtcheck & GFC_RTCHECK_ARRAY_TEMPS)
7029 {
7030 char * msg;
7031
7032 if (fsym && proc_name)
7033 asprintf (&msg, "An array temporary was created for argument "
7034 "'%s' of procedure '%s'", fsym->name, proc_name);
7035 else
7036 asprintf (&msg, "An array temporary was created");
7037
7038 tmp = build_fold_indirect_ref_loc (input_location,
7039 desc);
7040 tmp = gfc_conv_array_data (tmp);
7041 tmp = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
7042 fold_convert (TREE_TYPE (tmp), ptr), tmp);
7043
7044 if (fsym && fsym->attr.optional && sym && sym->attr.optional)
7045 tmp = fold_build2_loc (input_location, TRUTH_AND_EXPR,
7046 boolean_type_node,
7047 gfc_conv_expr_present (sym), tmp);
7048
7049 gfc_trans_runtime_check (false, true, tmp, &se->pre,
7050 &expr->where, msg);
7051 free (msg);
7052 }
7053
7054 gfc_start_block (&block);
7055
7056 /* Copy the data back. */
7057 if (fsym == NULL || fsym->attr.intent != INTENT_IN)
7058 {
7059 tmp = build_call_expr_loc (input_location,
7060 gfor_fndecl_in_unpack, 2, desc, ptr);
7061 gfc_add_expr_to_block (&block, tmp);
7062 }
7063
7064 /* Free the temporary. */
7065 tmp = gfc_call_free (convert (pvoid_type_node, ptr));
7066 gfc_add_expr_to_block (&block, tmp);
7067
7068 stmt = gfc_finish_block (&block);
7069
7070 gfc_init_block (&block);
7071 /* Only if it was repacked. This code needs to be executed before the
7072 loop cleanup code. */
7073 tmp = build_fold_indirect_ref_loc (input_location,
7074 desc);
7075 tmp = gfc_conv_array_data (tmp);
7076 tmp = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
7077 fold_convert (TREE_TYPE (tmp), ptr), tmp);
7078
7079 if (fsym && fsym->attr.optional && sym && sym->attr.optional)
7080 tmp = fold_build2_loc (input_location, TRUTH_AND_EXPR,
7081 boolean_type_node,
7082 gfc_conv_expr_present (sym), tmp);
7083
7084 tmp = build3_v (COND_EXPR, tmp, stmt, build_empty_stmt (input_location));
7085
7086 gfc_add_expr_to_block (&block, tmp);
7087 gfc_add_block_to_block (&block, &se->post);
7088
7089 gfc_init_block (&se->post);
7090
7091 /* Reset the descriptor pointer. */
7092 if (!g77)
7093 {
7094 tmp = build_fold_indirect_ref_loc (input_location, desc);
7095 gfc_conv_descriptor_data_set (&se->post, tmp, origptr);
7096 }
7097
7098 gfc_add_block_to_block (&se->post, &block);
7099 }
7100 }
7101
7102
7103 /* Generate code to deallocate an array, if it is allocated. */
7104
7105 tree
7106 gfc_trans_dealloc_allocated (tree descriptor, bool coarray)
7107 {
7108 tree tmp;
7109 tree var;
7110 stmtblock_t block;
7111
7112 gfc_start_block (&block);
7113
7114 var = gfc_conv_descriptor_data_get (descriptor);
7115 STRIP_NOPS (var);
7116
7117 /* Call array_deallocate with an int * present in the second argument.
7118 Although it is ignored here, it's presence ensures that arrays that
7119 are already deallocated are ignored. */
7120 tmp = gfc_deallocate_with_status (coarray ? descriptor : var, NULL_TREE,
7121 NULL_TREE, NULL_TREE, NULL_TREE, true,
7122 NULL, coarray);
7123 gfc_add_expr_to_block (&block, tmp);
7124
7125 /* Zero the data pointer. */
7126 tmp = fold_build2_loc (input_location, MODIFY_EXPR, void_type_node,
7127 var, build_int_cst (TREE_TYPE (var), 0));
7128 gfc_add_expr_to_block (&block, tmp);
7129
7130 return gfc_finish_block (&block);
7131 }
7132
7133
7134 /* This helper function calculates the size in words of a full array. */
7135
7136 static tree
7137 get_full_array_size (stmtblock_t *block, tree decl, int rank)
7138 {
7139 tree idx;
7140 tree nelems;
7141 tree tmp;
7142 idx = gfc_rank_cst[rank - 1];
7143 nelems = gfc_conv_descriptor_ubound_get (decl, idx);
7144 tmp = gfc_conv_descriptor_lbound_get (decl, idx);
7145 tmp = fold_build2_loc (input_location, MINUS_EXPR, gfc_array_index_type,
7146 nelems, tmp);
7147 tmp = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
7148 tmp, gfc_index_one_node);
7149 tmp = gfc_evaluate_now (tmp, block);
7150
7151 nelems = gfc_conv_descriptor_stride_get (decl, idx);
7152 tmp = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
7153 nelems, tmp);
7154 return gfc_evaluate_now (tmp, block);
7155 }
7156
7157
7158 /* Allocate dest to the same size as src, and copy src -> dest.
7159 If no_malloc is set, only the copy is done. */
7160
7161 static tree
7162 duplicate_allocatable (tree dest, tree src, tree type, int rank,
7163 bool no_malloc)
7164 {
7165 tree tmp;
7166 tree size;
7167 tree nelems;
7168 tree null_cond;
7169 tree null_data;
7170 stmtblock_t block;
7171
7172 /* If the source is null, set the destination to null. Then,
7173 allocate memory to the destination. */
7174 gfc_init_block (&block);
7175
7176 if (rank == 0)
7177 {
7178 tmp = null_pointer_node;
7179 tmp = fold_build2_loc (input_location, MODIFY_EXPR, type, dest, tmp);
7180 gfc_add_expr_to_block (&block, tmp);
7181 null_data = gfc_finish_block (&block);
7182
7183 gfc_init_block (&block);
7184 size = TYPE_SIZE_UNIT (TREE_TYPE (type));
7185 if (!no_malloc)
7186 {
7187 tmp = gfc_call_malloc (&block, type, size);
7188 tmp = fold_build2_loc (input_location, MODIFY_EXPR, void_type_node,
7189 dest, fold_convert (type, tmp));
7190 gfc_add_expr_to_block (&block, tmp);
7191 }
7192
7193 tmp = builtin_decl_explicit (BUILT_IN_MEMCPY);
7194 tmp = build_call_expr_loc (input_location, tmp, 3,
7195 dest, src, size);
7196 }
7197 else
7198 {
7199 gfc_conv_descriptor_data_set (&block, dest, null_pointer_node);
7200 null_data = gfc_finish_block (&block);
7201
7202 gfc_init_block (&block);
7203 nelems = get_full_array_size (&block, src, rank);
7204 tmp = fold_convert (gfc_array_index_type,
7205 TYPE_SIZE_UNIT (gfc_get_element_type (type)));
7206 size = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
7207 nelems, tmp);
7208 if (!no_malloc)
7209 {
7210 tmp = TREE_TYPE (gfc_conv_descriptor_data_get (src));
7211 tmp = gfc_call_malloc (&block, tmp, size);
7212 gfc_conv_descriptor_data_set (&block, dest, tmp);
7213 }
7214
7215 /* We know the temporary and the value will be the same length,
7216 so can use memcpy. */
7217 tmp = builtin_decl_explicit (BUILT_IN_MEMCPY);
7218 tmp = build_call_expr_loc (input_location,
7219 tmp, 3, gfc_conv_descriptor_data_get (dest),
7220 gfc_conv_descriptor_data_get (src), size);
7221 }
7222
7223 gfc_add_expr_to_block (&block, tmp);
7224 tmp = gfc_finish_block (&block);
7225
7226 /* Null the destination if the source is null; otherwise do
7227 the allocate and copy. */
7228 if (rank == 0)
7229 null_cond = src;
7230 else
7231 null_cond = gfc_conv_descriptor_data_get (src);
7232
7233 null_cond = convert (pvoid_type_node, null_cond);
7234 null_cond = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
7235 null_cond, null_pointer_node);
7236 return build3_v (COND_EXPR, null_cond, tmp, null_data);
7237 }
7238
7239
7240 /* Allocate dest to the same size as src, and copy data src -> dest. */
7241
7242 tree
7243 gfc_duplicate_allocatable (tree dest, tree src, tree type, int rank)
7244 {
7245 return duplicate_allocatable (dest, src, type, rank, false);
7246 }
7247
7248
7249 /* Copy data src -> dest. */
7250
7251 tree
7252 gfc_copy_allocatable_data (tree dest, tree src, tree type, int rank)
7253 {
7254 return duplicate_allocatable (dest, src, type, rank, true);
7255 }
7256
7257
7258 /* Recursively traverse an object of derived type, generating code to
7259 deallocate, nullify or copy allocatable components. This is the work horse
7260 function for the functions named in this enum. */
7261
7262 enum {DEALLOCATE_ALLOC_COMP = 1, NULLIFY_ALLOC_COMP, COPY_ALLOC_COMP,
7263 COPY_ONLY_ALLOC_COMP};
7264
7265 static tree
7266 structure_alloc_comps (gfc_symbol * der_type, tree decl,
7267 tree dest, int rank, int purpose)
7268 {
7269 gfc_component *c;
7270 gfc_loopinfo loop;
7271 stmtblock_t fnblock;
7272 stmtblock_t loopbody;
7273 stmtblock_t tmpblock;
7274 tree decl_type;
7275 tree tmp;
7276 tree comp;
7277 tree dcmp;
7278 tree nelems;
7279 tree index;
7280 tree var;
7281 tree cdecl;
7282 tree ctype;
7283 tree vref, dref;
7284 tree null_cond = NULL_TREE;
7285 bool called_dealloc_with_status;
7286
7287 gfc_init_block (&fnblock);
7288
7289 decl_type = TREE_TYPE (decl);
7290
7291 if ((POINTER_TYPE_P (decl_type) && rank != 0)
7292 || (TREE_CODE (decl_type) == REFERENCE_TYPE && rank == 0))
7293
7294 decl = build_fold_indirect_ref_loc (input_location,
7295 decl);
7296
7297 /* Just in case in gets dereferenced. */
7298 decl_type = TREE_TYPE (decl);
7299
7300 /* If this an array of derived types with allocatable components
7301 build a loop and recursively call this function. */
7302 if (TREE_CODE (decl_type) == ARRAY_TYPE
7303 || GFC_DESCRIPTOR_TYPE_P (decl_type))
7304 {
7305 tmp = gfc_conv_array_data (decl);
7306 var = build_fold_indirect_ref_loc (input_location,
7307 tmp);
7308
7309 /* Get the number of elements - 1 and set the counter. */
7310 if (GFC_DESCRIPTOR_TYPE_P (decl_type))
7311 {
7312 /* Use the descriptor for an allocatable array. Since this
7313 is a full array reference, we only need the descriptor
7314 information from dimension = rank. */
7315 tmp = get_full_array_size (&fnblock, decl, rank);
7316 tmp = fold_build2_loc (input_location, MINUS_EXPR,
7317 gfc_array_index_type, tmp,
7318 gfc_index_one_node);
7319
7320 null_cond = gfc_conv_descriptor_data_get (decl);
7321 null_cond = fold_build2_loc (input_location, NE_EXPR,
7322 boolean_type_node, null_cond,
7323 build_int_cst (TREE_TYPE (null_cond), 0));
7324 }
7325 else
7326 {
7327 /* Otherwise use the TYPE_DOMAIN information. */
7328 tmp = array_type_nelts (decl_type);
7329 tmp = fold_convert (gfc_array_index_type, tmp);
7330 }
7331
7332 /* Remember that this is, in fact, the no. of elements - 1. */
7333 nelems = gfc_evaluate_now (tmp, &fnblock);
7334 index = gfc_create_var (gfc_array_index_type, "S");
7335
7336 /* Build the body of the loop. */
7337 gfc_init_block (&loopbody);
7338
7339 vref = gfc_build_array_ref (var, index, NULL);
7340
7341 if (purpose == COPY_ALLOC_COMP)
7342 {
7343 if (GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (dest)))
7344 {
7345 tmp = gfc_duplicate_allocatable (dest, decl, decl_type, rank);
7346 gfc_add_expr_to_block (&fnblock, tmp);
7347 }
7348 tmp = build_fold_indirect_ref_loc (input_location,
7349 gfc_conv_array_data (dest));
7350 dref = gfc_build_array_ref (tmp, index, NULL);
7351 tmp = structure_alloc_comps (der_type, vref, dref, rank, purpose);
7352 }
7353 else if (purpose == COPY_ONLY_ALLOC_COMP)
7354 {
7355 tmp = build_fold_indirect_ref_loc (input_location,
7356 gfc_conv_array_data (dest));
7357 dref = gfc_build_array_ref (tmp, index, NULL);
7358 tmp = structure_alloc_comps (der_type, vref, dref, rank,
7359 COPY_ALLOC_COMP);
7360 }
7361 else
7362 tmp = structure_alloc_comps (der_type, vref, NULL_TREE, rank, purpose);
7363
7364 gfc_add_expr_to_block (&loopbody, tmp);
7365
7366 /* Build the loop and return. */
7367 gfc_init_loopinfo (&loop);
7368 loop.dimen = 1;
7369 loop.from[0] = gfc_index_zero_node;
7370 loop.loopvar[0] = index;
7371 loop.to[0] = nelems;
7372 gfc_trans_scalarizing_loops (&loop, &loopbody);
7373 gfc_add_block_to_block (&fnblock, &loop.pre);
7374
7375 tmp = gfc_finish_block (&fnblock);
7376 if (null_cond != NULL_TREE)
7377 tmp = build3_v (COND_EXPR, null_cond, tmp,
7378 build_empty_stmt (input_location));
7379
7380 return tmp;
7381 }
7382
7383 /* Otherwise, act on the components or recursively call self to
7384 act on a chain of components. */
7385 for (c = der_type->components; c; c = c->next)
7386 {
7387 bool cmp_has_alloc_comps = (c->ts.type == BT_DERIVED
7388 || c->ts.type == BT_CLASS)
7389 && c->ts.u.derived->attr.alloc_comp;
7390 cdecl = c->backend_decl;
7391 ctype = TREE_TYPE (cdecl);
7392
7393 switch (purpose)
7394 {
7395 case DEALLOCATE_ALLOC_COMP:
7396
7397 /* gfc_deallocate_scalar_with_status calls gfc_deallocate_alloc_comp
7398 (ie. this function) so generate all the calls and suppress the
7399 recursion from here, if necessary. */
7400 called_dealloc_with_status = false;
7401 gfc_init_block (&tmpblock);
7402
7403 if (c->attr.allocatable
7404 && (c->attr.dimension || c->attr.codimension))
7405 {
7406 comp = fold_build3_loc (input_location, COMPONENT_REF, ctype,
7407 decl, cdecl, NULL_TREE);
7408 tmp = gfc_trans_dealloc_allocated (comp, c->attr.codimension);
7409 gfc_add_expr_to_block (&tmpblock, tmp);
7410 }
7411 else if (c->attr.allocatable)
7412 {
7413 /* Allocatable scalar components. */
7414 comp = fold_build3_loc (input_location, COMPONENT_REF, ctype,
7415 decl, cdecl, NULL_TREE);
7416
7417 tmp = gfc_deallocate_scalar_with_status (comp, NULL, true, NULL,
7418 c->ts);
7419 gfc_add_expr_to_block (&tmpblock, tmp);
7420 called_dealloc_with_status = true;
7421
7422 tmp = fold_build2_loc (input_location, MODIFY_EXPR,
7423 void_type_node, comp,
7424 build_int_cst (TREE_TYPE (comp), 0));
7425 gfc_add_expr_to_block (&tmpblock, tmp);
7426 }
7427 else if (c->ts.type == BT_CLASS && CLASS_DATA (c)->attr.allocatable)
7428 {
7429 /* Allocatable CLASS components. */
7430 comp = fold_build3_loc (input_location, COMPONENT_REF, ctype,
7431 decl, cdecl, NULL_TREE);
7432
7433 /* Add reference to '_data' component. */
7434 tmp = CLASS_DATA (c)->backend_decl;
7435 comp = fold_build3_loc (input_location, COMPONENT_REF,
7436 TREE_TYPE (tmp), comp, tmp, NULL_TREE);
7437
7438 if (GFC_DESCRIPTOR_TYPE_P(TREE_TYPE (comp)))
7439 tmp = gfc_trans_dealloc_allocated (comp,
7440 CLASS_DATA (c)->attr.codimension);
7441 else
7442 {
7443 tmp = gfc_deallocate_scalar_with_status (comp, NULL, true, NULL,
7444 CLASS_DATA (c)->ts);
7445 gfc_add_expr_to_block (&tmpblock, tmp);
7446 called_dealloc_with_status = true;
7447
7448 tmp = fold_build2_loc (input_location, MODIFY_EXPR,
7449 void_type_node, comp,
7450 build_int_cst (TREE_TYPE (comp), 0));
7451 }
7452 gfc_add_expr_to_block (&tmpblock, tmp);
7453 }
7454
7455 if (cmp_has_alloc_comps
7456 && !c->attr.pointer
7457 && !called_dealloc_with_status)
7458 {
7459 /* Do not deallocate the components of ultimate pointer
7460 components or iteratively call self if call has been made
7461 to gfc_trans_dealloc_allocated */
7462 comp = fold_build3_loc (input_location, COMPONENT_REF, ctype,
7463 decl, cdecl, NULL_TREE);
7464 rank = c->as ? c->as->rank : 0;
7465 tmp = structure_alloc_comps (c->ts.u.derived, comp, NULL_TREE,
7466 rank, purpose);
7467 gfc_add_expr_to_block (&fnblock, tmp);
7468 }
7469
7470 /* Now add the deallocation of this component. */
7471 gfc_add_block_to_block (&fnblock, &tmpblock);
7472 break;
7473
7474 case NULLIFY_ALLOC_COMP:
7475 if (c->attr.pointer)
7476 continue;
7477 else if (c->attr.allocatable
7478 && (c->attr.dimension|| c->attr.codimension))
7479 {
7480 comp = fold_build3_loc (input_location, COMPONENT_REF, ctype,
7481 decl, cdecl, NULL_TREE);
7482 gfc_conv_descriptor_data_set (&fnblock, comp, null_pointer_node);
7483 }
7484 else if (c->attr.allocatable)
7485 {
7486 /* Allocatable scalar components. */
7487 comp = fold_build3_loc (input_location, COMPONENT_REF, ctype,
7488 decl, cdecl, NULL_TREE);
7489 tmp = fold_build2_loc (input_location, MODIFY_EXPR,
7490 void_type_node, comp,
7491 build_int_cst (TREE_TYPE (comp), 0));
7492 gfc_add_expr_to_block (&fnblock, tmp);
7493 }
7494 else if (c->ts.type == BT_CLASS && CLASS_DATA (c)->attr.allocatable)
7495 {
7496 /* Allocatable CLASS components. */
7497 comp = fold_build3_loc (input_location, COMPONENT_REF, ctype,
7498 decl, cdecl, NULL_TREE);
7499 /* Add reference to '_data' component. */
7500 tmp = CLASS_DATA (c)->backend_decl;
7501 comp = fold_build3_loc (input_location, COMPONENT_REF,
7502 TREE_TYPE (tmp), comp, tmp, NULL_TREE);
7503 if (GFC_DESCRIPTOR_TYPE_P(TREE_TYPE (comp)))
7504 gfc_conv_descriptor_data_set (&fnblock, comp, null_pointer_node);
7505 else
7506 {
7507 tmp = fold_build2_loc (input_location, MODIFY_EXPR,
7508 void_type_node, comp,
7509 build_int_cst (TREE_TYPE (comp), 0));
7510 gfc_add_expr_to_block (&fnblock, tmp);
7511 }
7512 }
7513 else if (cmp_has_alloc_comps)
7514 {
7515 comp = fold_build3_loc (input_location, COMPONENT_REF, ctype,
7516 decl, cdecl, NULL_TREE);
7517 rank = c->as ? c->as->rank : 0;
7518 tmp = structure_alloc_comps (c->ts.u.derived, comp, NULL_TREE,
7519 rank, purpose);
7520 gfc_add_expr_to_block (&fnblock, tmp);
7521 }
7522 break;
7523
7524 case COPY_ALLOC_COMP:
7525 if (c->attr.pointer)
7526 continue;
7527
7528 /* We need source and destination components. */
7529 comp = fold_build3_loc (input_location, COMPONENT_REF, ctype, decl,
7530 cdecl, NULL_TREE);
7531 dcmp = fold_build3_loc (input_location, COMPONENT_REF, ctype, dest,
7532 cdecl, NULL_TREE);
7533 dcmp = fold_convert (TREE_TYPE (comp), dcmp);
7534
7535 if (c->attr.allocatable && !cmp_has_alloc_comps)
7536 {
7537 rank = c->as ? c->as->rank : 0;
7538 tmp = gfc_duplicate_allocatable (dcmp, comp, ctype, rank);
7539 gfc_add_expr_to_block (&fnblock, tmp);
7540 }
7541
7542 if (cmp_has_alloc_comps)
7543 {
7544 rank = c->as ? c->as->rank : 0;
7545 tmp = fold_convert (TREE_TYPE (dcmp), comp);
7546 gfc_add_modify (&fnblock, dcmp, tmp);
7547 tmp = structure_alloc_comps (c->ts.u.derived, comp, dcmp,
7548 rank, purpose);
7549 gfc_add_expr_to_block (&fnblock, tmp);
7550 }
7551 break;
7552
7553 default:
7554 gcc_unreachable ();
7555 break;
7556 }
7557 }
7558
7559 return gfc_finish_block (&fnblock);
7560 }
7561
7562 /* Recursively traverse an object of derived type, generating code to
7563 nullify allocatable components. */
7564
7565 tree
7566 gfc_nullify_alloc_comp (gfc_symbol * der_type, tree decl, int rank)
7567 {
7568 return structure_alloc_comps (der_type, decl, NULL_TREE, rank,
7569 NULLIFY_ALLOC_COMP);
7570 }
7571
7572
7573 /* Recursively traverse an object of derived type, generating code to
7574 deallocate allocatable components. */
7575
7576 tree
7577 gfc_deallocate_alloc_comp (gfc_symbol * der_type, tree decl, int rank)
7578 {
7579 return structure_alloc_comps (der_type, decl, NULL_TREE, rank,
7580 DEALLOCATE_ALLOC_COMP);
7581 }
7582
7583
7584 /* Recursively traverse an object of derived type, generating code to
7585 copy it and its allocatable components. */
7586
7587 tree
7588 gfc_copy_alloc_comp (gfc_symbol * der_type, tree decl, tree dest, int rank)
7589 {
7590 return structure_alloc_comps (der_type, decl, dest, rank, COPY_ALLOC_COMP);
7591 }
7592
7593
7594 /* Recursively traverse an object of derived type, generating code to
7595 copy only its allocatable components. */
7596
7597 tree
7598 gfc_copy_only_alloc_comp (gfc_symbol * der_type, tree decl, tree dest, int rank)
7599 {
7600 return structure_alloc_comps (der_type, decl, dest, rank, COPY_ONLY_ALLOC_COMP);
7601 }
7602
7603
7604 /* Returns the value of LBOUND for an expression. This could be broken out
7605 from gfc_conv_intrinsic_bound but this seemed to be simpler. This is
7606 called by gfc_alloc_allocatable_for_assignment. */
7607 static tree
7608 get_std_lbound (gfc_expr *expr, tree desc, int dim, bool assumed_size)
7609 {
7610 tree lbound;
7611 tree ubound;
7612 tree stride;
7613 tree cond, cond1, cond3, cond4;
7614 tree tmp;
7615 gfc_ref *ref;
7616
7617 if (GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (desc)))
7618 {
7619 tmp = gfc_rank_cst[dim];
7620 lbound = gfc_conv_descriptor_lbound_get (desc, tmp);
7621 ubound = gfc_conv_descriptor_ubound_get (desc, tmp);
7622 stride = gfc_conv_descriptor_stride_get (desc, tmp);
7623 cond1 = fold_build2_loc (input_location, GE_EXPR, boolean_type_node,
7624 ubound, lbound);
7625 cond3 = fold_build2_loc (input_location, GE_EXPR, boolean_type_node,
7626 stride, gfc_index_zero_node);
7627 cond3 = fold_build2_loc (input_location, TRUTH_AND_EXPR,
7628 boolean_type_node, cond3, cond1);
7629 cond4 = fold_build2_loc (input_location, LT_EXPR, boolean_type_node,
7630 stride, gfc_index_zero_node);
7631 if (assumed_size)
7632 cond = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node,
7633 tmp, build_int_cst (gfc_array_index_type,
7634 expr->rank - 1));
7635 else
7636 cond = boolean_false_node;
7637
7638 cond1 = fold_build2_loc (input_location, TRUTH_OR_EXPR,
7639 boolean_type_node, cond3, cond4);
7640 cond = fold_build2_loc (input_location, TRUTH_OR_EXPR,
7641 boolean_type_node, cond, cond1);
7642
7643 return fold_build3_loc (input_location, COND_EXPR,
7644 gfc_array_index_type, cond,
7645 lbound, gfc_index_one_node);
7646 }
7647
7648 if (expr->expr_type == EXPR_FUNCTION)
7649 {
7650 /* A conversion function, so use the argument. */
7651 gcc_assert (expr->value.function.isym
7652 && expr->value.function.isym->conversion);
7653 expr = expr->value.function.actual->expr;
7654 }
7655
7656 if (expr->expr_type == EXPR_VARIABLE)
7657 {
7658 tmp = TREE_TYPE (expr->symtree->n.sym->backend_decl);
7659 for (ref = expr->ref; ref; ref = ref->next)
7660 {
7661 if (ref->type == REF_COMPONENT
7662 && ref->u.c.component->as
7663 && ref->next
7664 && ref->next->u.ar.type == AR_FULL)
7665 tmp = TREE_TYPE (ref->u.c.component->backend_decl);
7666 }
7667 return GFC_TYPE_ARRAY_LBOUND(tmp, dim);
7668 }
7669
7670 return gfc_index_one_node;
7671 }
7672
7673
7674 /* Returns true if an expression represents an lhs that can be reallocated
7675 on assignment. */
7676
7677 bool
7678 gfc_is_reallocatable_lhs (gfc_expr *expr)
7679 {
7680 gfc_ref * ref;
7681
7682 if (!expr->ref)
7683 return false;
7684
7685 /* An allocatable variable. */
7686 if (expr->symtree->n.sym->attr.allocatable
7687 && expr->ref
7688 && expr->ref->type == REF_ARRAY
7689 && expr->ref->u.ar.type == AR_FULL)
7690 return true;
7691
7692 /* All that can be left are allocatable components. */
7693 if ((expr->symtree->n.sym->ts.type != BT_DERIVED
7694 && expr->symtree->n.sym->ts.type != BT_CLASS)
7695 || !expr->symtree->n.sym->ts.u.derived->attr.alloc_comp)
7696 return false;
7697
7698 /* Find a component ref followed by an array reference. */
7699 for (ref = expr->ref; ref; ref = ref->next)
7700 if (ref->next
7701 && ref->type == REF_COMPONENT
7702 && ref->next->type == REF_ARRAY
7703 && !ref->next->next)
7704 break;
7705
7706 if (!ref)
7707 return false;
7708
7709 /* Return true if valid reallocatable lhs. */
7710 if (ref->u.c.component->attr.allocatable
7711 && ref->next->u.ar.type == AR_FULL)
7712 return true;
7713
7714 return false;
7715 }
7716
7717
7718 /* Allocate the lhs of an assignment to an allocatable array, otherwise
7719 reallocate it. */
7720
7721 tree
7722 gfc_alloc_allocatable_for_assignment (gfc_loopinfo *loop,
7723 gfc_expr *expr1,
7724 gfc_expr *expr2)
7725 {
7726 stmtblock_t realloc_block;
7727 stmtblock_t alloc_block;
7728 stmtblock_t fblock;
7729 gfc_ss *rss;
7730 gfc_ss *lss;
7731 gfc_array_info *linfo;
7732 tree realloc_expr;
7733 tree alloc_expr;
7734 tree size1;
7735 tree size2;
7736 tree array1;
7737 tree cond;
7738 tree tmp;
7739 tree tmp2;
7740 tree lbound;
7741 tree ubound;
7742 tree desc;
7743 tree desc2;
7744 tree offset;
7745 tree jump_label1;
7746 tree jump_label2;
7747 tree neq_size;
7748 tree lbd;
7749 int n;
7750 int dim;
7751 gfc_array_spec * as;
7752
7753 /* x = f(...) with x allocatable. In this case, expr1 is the rhs.
7754 Find the lhs expression in the loop chain and set expr1 and
7755 expr2 accordingly. */
7756 if (expr1->expr_type == EXPR_FUNCTION && expr2 == NULL)
7757 {
7758 expr2 = expr1;
7759 /* Find the ss for the lhs. */
7760 lss = loop->ss;
7761 for (; lss && lss != gfc_ss_terminator; lss = lss->loop_chain)
7762 if (lss->info->expr && lss->info->expr->expr_type == EXPR_VARIABLE)
7763 break;
7764 if (lss == gfc_ss_terminator)
7765 return NULL_TREE;
7766 expr1 = lss->info->expr;
7767 }
7768
7769 /* Bail out if this is not a valid allocate on assignment. */
7770 if (!gfc_is_reallocatable_lhs (expr1)
7771 || (expr2 && !expr2->rank))
7772 return NULL_TREE;
7773
7774 /* Find the ss for the lhs. */
7775 lss = loop->ss;
7776 for (; lss && lss != gfc_ss_terminator; lss = lss->loop_chain)
7777 if (lss->info->expr == expr1)
7778 break;
7779
7780 if (lss == gfc_ss_terminator)
7781 return NULL_TREE;
7782
7783 linfo = &lss->info->data.array;
7784
7785 /* Find an ss for the rhs. For operator expressions, we see the
7786 ss's for the operands. Any one of these will do. */
7787 rss = loop->ss;
7788 for (; rss && rss != gfc_ss_terminator; rss = rss->loop_chain)
7789 if (rss->info->expr != expr1 && rss != loop->temp_ss)
7790 break;
7791
7792 if (expr2 && rss == gfc_ss_terminator)
7793 return NULL_TREE;
7794
7795 gfc_start_block (&fblock);
7796
7797 /* Since the lhs is allocatable, this must be a descriptor type.
7798 Get the data and array size. */
7799 desc = linfo->descriptor;
7800 gcc_assert (GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (desc)));
7801 array1 = gfc_conv_descriptor_data_get (desc);
7802
7803 /* 7.4.1.3 "If variable is an allocated allocatable variable, it is
7804 deallocated if expr is an array of different shape or any of the
7805 corresponding length type parameter values of variable and expr
7806 differ." This assures F95 compatibility. */
7807 jump_label1 = gfc_build_label_decl (NULL_TREE);
7808 jump_label2 = gfc_build_label_decl (NULL_TREE);
7809
7810 /* Allocate if data is NULL. */
7811 cond = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node,
7812 array1, build_int_cst (TREE_TYPE (array1), 0));
7813 tmp = build3_v (COND_EXPR, cond,
7814 build1_v (GOTO_EXPR, jump_label1),
7815 build_empty_stmt (input_location));
7816 gfc_add_expr_to_block (&fblock, tmp);
7817
7818 /* Get arrayspec if expr is a full array. */
7819 if (expr2 && expr2->expr_type == EXPR_FUNCTION
7820 && expr2->value.function.isym
7821 && expr2->value.function.isym->conversion)
7822 {
7823 /* For conversion functions, take the arg. */
7824 gfc_expr *arg = expr2->value.function.actual->expr;
7825 as = gfc_get_full_arrayspec_from_expr (arg);
7826 }
7827 else if (expr2)
7828 as = gfc_get_full_arrayspec_from_expr (expr2);
7829 else
7830 as = NULL;
7831
7832 /* If the lhs shape is not the same as the rhs jump to setting the
7833 bounds and doing the reallocation....... */
7834 for (n = 0; n < expr1->rank; n++)
7835 {
7836 /* Check the shape. */
7837 lbound = gfc_conv_descriptor_lbound_get (desc, gfc_rank_cst[n]);
7838 ubound = gfc_conv_descriptor_ubound_get (desc, gfc_rank_cst[n]);
7839 tmp = fold_build2_loc (input_location, MINUS_EXPR,
7840 gfc_array_index_type,
7841 loop->to[n], loop->from[n]);
7842 tmp = fold_build2_loc (input_location, PLUS_EXPR,
7843 gfc_array_index_type,
7844 tmp, lbound);
7845 tmp = fold_build2_loc (input_location, MINUS_EXPR,
7846 gfc_array_index_type,
7847 tmp, ubound);
7848 cond = fold_build2_loc (input_location, NE_EXPR,
7849 boolean_type_node,
7850 tmp, gfc_index_zero_node);
7851 tmp = build3_v (COND_EXPR, cond,
7852 build1_v (GOTO_EXPR, jump_label1),
7853 build_empty_stmt (input_location));
7854 gfc_add_expr_to_block (&fblock, tmp);
7855 }
7856
7857 /* ....else jump past the (re)alloc code. */
7858 tmp = build1_v (GOTO_EXPR, jump_label2);
7859 gfc_add_expr_to_block (&fblock, tmp);
7860
7861 /* Add the label to start automatic (re)allocation. */
7862 tmp = build1_v (LABEL_EXPR, jump_label1);
7863 gfc_add_expr_to_block (&fblock, tmp);
7864
7865 size1 = gfc_conv_descriptor_size (desc, expr1->rank);
7866
7867 /* Get the rhs size. Fix both sizes. */
7868 if (expr2)
7869 desc2 = rss->info->data.array.descriptor;
7870 else
7871 desc2 = NULL_TREE;
7872 size2 = gfc_index_one_node;
7873 for (n = 0; n < expr2->rank; n++)
7874 {
7875 tmp = fold_build2_loc (input_location, MINUS_EXPR,
7876 gfc_array_index_type,
7877 loop->to[n], loop->from[n]);
7878 tmp = fold_build2_loc (input_location, PLUS_EXPR,
7879 gfc_array_index_type,
7880 tmp, gfc_index_one_node);
7881 size2 = fold_build2_loc (input_location, MULT_EXPR,
7882 gfc_array_index_type,
7883 tmp, size2);
7884 }
7885
7886 size1 = gfc_evaluate_now (size1, &fblock);
7887 size2 = gfc_evaluate_now (size2, &fblock);
7888
7889 cond = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
7890 size1, size2);
7891 neq_size = gfc_evaluate_now (cond, &fblock);
7892
7893
7894 /* Now modify the lhs descriptor and the associated scalarizer
7895 variables. F2003 7.4.1.3: "If variable is or becomes an
7896 unallocated allocatable variable, then it is allocated with each
7897 deferred type parameter equal to the corresponding type parameters
7898 of expr , with the shape of expr , and with each lower bound equal
7899 to the corresponding element of LBOUND(expr)."
7900 Reuse size1 to keep a dimension-by-dimension track of the
7901 stride of the new array. */
7902 size1 = gfc_index_one_node;
7903 offset = gfc_index_zero_node;
7904
7905 for (n = 0; n < expr2->rank; n++)
7906 {
7907 tmp = fold_build2_loc (input_location, MINUS_EXPR,
7908 gfc_array_index_type,
7909 loop->to[n], loop->from[n]);
7910 tmp = fold_build2_loc (input_location, PLUS_EXPR,
7911 gfc_array_index_type,
7912 tmp, gfc_index_one_node);
7913
7914 lbound = gfc_index_one_node;
7915 ubound = tmp;
7916
7917 if (as)
7918 {
7919 lbd = get_std_lbound (expr2, desc2, n,
7920 as->type == AS_ASSUMED_SIZE);
7921 ubound = fold_build2_loc (input_location,
7922 MINUS_EXPR,
7923 gfc_array_index_type,
7924 ubound, lbound);
7925 ubound = fold_build2_loc (input_location,
7926 PLUS_EXPR,
7927 gfc_array_index_type,
7928 ubound, lbd);
7929 lbound = lbd;
7930 }
7931
7932 gfc_conv_descriptor_lbound_set (&fblock, desc,
7933 gfc_rank_cst[n],
7934 lbound);
7935 gfc_conv_descriptor_ubound_set (&fblock, desc,
7936 gfc_rank_cst[n],
7937 ubound);
7938 gfc_conv_descriptor_stride_set (&fblock, desc,
7939 gfc_rank_cst[n],
7940 size1);
7941 lbound = gfc_conv_descriptor_lbound_get (desc,
7942 gfc_rank_cst[n]);
7943 tmp2 = fold_build2_loc (input_location, MULT_EXPR,
7944 gfc_array_index_type,
7945 lbound, size1);
7946 offset = fold_build2_loc (input_location, MINUS_EXPR,
7947 gfc_array_index_type,
7948 offset, tmp2);
7949 size1 = fold_build2_loc (input_location, MULT_EXPR,
7950 gfc_array_index_type,
7951 tmp, size1);
7952 }
7953
7954 /* Set the lhs descriptor and scalarizer offsets. For rank > 1,
7955 the array offset is saved and the info.offset is used for a
7956 running offset. Use the saved_offset instead. */
7957 tmp = gfc_conv_descriptor_offset (desc);
7958 gfc_add_modify (&fblock, tmp, offset);
7959 if (linfo->saved_offset
7960 && TREE_CODE (linfo->saved_offset) == VAR_DECL)
7961 gfc_add_modify (&fblock, linfo->saved_offset, tmp);
7962
7963 /* Now set the deltas for the lhs. */
7964 for (n = 0; n < expr1->rank; n++)
7965 {
7966 tmp = gfc_conv_descriptor_lbound_get (desc, gfc_rank_cst[n]);
7967 dim = lss->dim[n];
7968 tmp = fold_build2_loc (input_location, MINUS_EXPR,
7969 gfc_array_index_type, tmp,
7970 loop->from[dim]);
7971 if (linfo->delta[dim]
7972 && TREE_CODE (linfo->delta[dim]) == VAR_DECL)
7973 gfc_add_modify (&fblock, linfo->delta[dim], tmp);
7974 }
7975
7976 /* Get the new lhs size in bytes. */
7977 if (expr1->ts.type == BT_CHARACTER && expr1->ts.deferred)
7978 {
7979 tmp = expr2->ts.u.cl->backend_decl;
7980 gcc_assert (expr1->ts.u.cl->backend_decl);
7981 tmp = fold_convert (TREE_TYPE (expr1->ts.u.cl->backend_decl), tmp);
7982 gfc_add_modify (&fblock, expr1->ts.u.cl->backend_decl, tmp);
7983 }
7984 else if (expr1->ts.type == BT_CHARACTER && expr1->ts.u.cl->backend_decl)
7985 {
7986 tmp = TYPE_SIZE_UNIT (TREE_TYPE (gfc_typenode_for_spec (&expr1->ts)));
7987 tmp = fold_build2_loc (input_location, MULT_EXPR,
7988 gfc_array_index_type, tmp,
7989 expr1->ts.u.cl->backend_decl);
7990 }
7991 else
7992 tmp = TYPE_SIZE_UNIT (gfc_typenode_for_spec (&expr1->ts));
7993 tmp = fold_convert (gfc_array_index_type, tmp);
7994 size2 = fold_build2_loc (input_location, MULT_EXPR,
7995 gfc_array_index_type,
7996 tmp, size2);
7997 size2 = fold_convert (size_type_node, size2);
7998 size2 = gfc_evaluate_now (size2, &fblock);
7999
8000 /* Realloc expression. Note that the scalarizer uses desc.data
8001 in the array reference - (*desc.data)[<element>]. */
8002 gfc_init_block (&realloc_block);
8003 tmp = build_call_expr_loc (input_location,
8004 builtin_decl_explicit (BUILT_IN_REALLOC), 2,
8005 fold_convert (pvoid_type_node, array1),
8006 size2);
8007 gfc_conv_descriptor_data_set (&realloc_block,
8008 desc, tmp);
8009 realloc_expr = gfc_finish_block (&realloc_block);
8010
8011 /* Only reallocate if sizes are different. */
8012 tmp = build3_v (COND_EXPR, neq_size, realloc_expr,
8013 build_empty_stmt (input_location));
8014 realloc_expr = tmp;
8015
8016
8017 /* Malloc expression. */
8018 gfc_init_block (&alloc_block);
8019 tmp = build_call_expr_loc (input_location,
8020 builtin_decl_explicit (BUILT_IN_MALLOC),
8021 1, size2);
8022 gfc_conv_descriptor_data_set (&alloc_block,
8023 desc, tmp);
8024 tmp = gfc_conv_descriptor_dtype (desc);
8025 gfc_add_modify (&alloc_block, tmp, gfc_get_dtype (TREE_TYPE (desc)));
8026 alloc_expr = gfc_finish_block (&alloc_block);
8027
8028 /* Malloc if not allocated; realloc otherwise. */
8029 tmp = build_int_cst (TREE_TYPE (array1), 0);
8030 cond = fold_build2_loc (input_location, EQ_EXPR,
8031 boolean_type_node,
8032 array1, tmp);
8033 tmp = build3_v (COND_EXPR, cond, alloc_expr, realloc_expr);
8034 gfc_add_expr_to_block (&fblock, tmp);
8035
8036 /* Make sure that the scalarizer data pointer is updated. */
8037 if (linfo->data
8038 && TREE_CODE (linfo->data) == VAR_DECL)
8039 {
8040 tmp = gfc_conv_descriptor_data_get (desc);
8041 gfc_add_modify (&fblock, linfo->data, tmp);
8042 }
8043
8044 /* Add the exit label. */
8045 tmp = build1_v (LABEL_EXPR, jump_label2);
8046 gfc_add_expr_to_block (&fblock, tmp);
8047
8048 return gfc_finish_block (&fblock);
8049 }
8050
8051
8052 /* NULLIFY an allocatable/pointer array on function entry, free it on exit.
8053 Do likewise, recursively if necessary, with the allocatable components of
8054 derived types. */
8055
8056 void
8057 gfc_trans_deferred_array (gfc_symbol * sym, gfc_wrapped_block * block)
8058 {
8059 tree type;
8060 tree tmp;
8061 tree descriptor;
8062 stmtblock_t init;
8063 stmtblock_t cleanup;
8064 locus loc;
8065 int rank;
8066 bool sym_has_alloc_comp;
8067
8068 sym_has_alloc_comp = (sym->ts.type == BT_DERIVED
8069 || sym->ts.type == BT_CLASS)
8070 && sym->ts.u.derived->attr.alloc_comp;
8071
8072 /* Make sure the frontend gets these right. */
8073 if (!(sym->attr.pointer || sym->attr.allocatable || sym_has_alloc_comp))
8074 fatal_error ("Possible front-end bug: Deferred array size without pointer, "
8075 "allocatable attribute or derived type without allocatable "
8076 "components.");
8077
8078 gfc_save_backend_locus (&loc);
8079 gfc_set_backend_locus (&sym->declared_at);
8080 gfc_init_block (&init);
8081
8082 gcc_assert (TREE_CODE (sym->backend_decl) == VAR_DECL
8083 || TREE_CODE (sym->backend_decl) == PARM_DECL);
8084
8085 if (sym->ts.type == BT_CHARACTER
8086 && !INTEGER_CST_P (sym->ts.u.cl->backend_decl))
8087 {
8088 gfc_conv_string_length (sym->ts.u.cl, NULL, &init);
8089 gfc_trans_vla_type_sizes (sym, &init);
8090 }
8091
8092 /* Dummy, use associated and result variables don't need anything special. */
8093 if (sym->attr.dummy || sym->attr.use_assoc || sym->attr.result)
8094 {
8095 gfc_add_init_cleanup (block, gfc_finish_block (&init), NULL_TREE);
8096 gfc_restore_backend_locus (&loc);
8097 return;
8098 }
8099
8100 descriptor = sym->backend_decl;
8101
8102 /* Although static, derived types with default initializers and
8103 allocatable components must not be nulled wholesale; instead they
8104 are treated component by component. */
8105 if (TREE_STATIC (descriptor) && !sym_has_alloc_comp)
8106 {
8107 /* SAVEd variables are not freed on exit. */
8108 gfc_trans_static_array_pointer (sym);
8109
8110 gfc_add_init_cleanup (block, gfc_finish_block (&init), NULL_TREE);
8111 gfc_restore_backend_locus (&loc);
8112 return;
8113 }
8114
8115 /* Get the descriptor type. */
8116 type = TREE_TYPE (sym->backend_decl);
8117
8118 if (sym_has_alloc_comp && !(sym->attr.pointer || sym->attr.allocatable))
8119 {
8120 if (!sym->attr.save
8121 && !(TREE_STATIC (sym->backend_decl) && sym->attr.is_main_program))
8122 {
8123 if (sym->value == NULL
8124 || !gfc_has_default_initializer (sym->ts.u.derived))
8125 {
8126 rank = sym->as ? sym->as->rank : 0;
8127 tmp = gfc_nullify_alloc_comp (sym->ts.u.derived,
8128 descriptor, rank);
8129 gfc_add_expr_to_block (&init, tmp);
8130 }
8131 else
8132 gfc_init_default_dt (sym, &init, false);
8133 }
8134 }
8135 else if (!GFC_DESCRIPTOR_TYPE_P (type))
8136 {
8137 /* If the backend_decl is not a descriptor, we must have a pointer
8138 to one. */
8139 descriptor = build_fold_indirect_ref_loc (input_location,
8140 sym->backend_decl);
8141 type = TREE_TYPE (descriptor);
8142 }
8143
8144 /* NULLIFY the data pointer. */
8145 if (GFC_DESCRIPTOR_TYPE_P (type) && !sym->attr.save)
8146 gfc_conv_descriptor_data_set (&init, descriptor, null_pointer_node);
8147
8148 gfc_restore_backend_locus (&loc);
8149 gfc_init_block (&cleanup);
8150
8151 /* Allocatable arrays need to be freed when they go out of scope.
8152 The allocatable components of pointers must not be touched. */
8153 if (sym_has_alloc_comp && !(sym->attr.function || sym->attr.result)
8154 && !sym->attr.pointer && !sym->attr.save)
8155 {
8156 int rank;
8157 rank = sym->as ? sym->as->rank : 0;
8158 tmp = gfc_deallocate_alloc_comp (sym->ts.u.derived, descriptor, rank);
8159 gfc_add_expr_to_block (&cleanup, tmp);
8160 }
8161
8162 if (sym->attr.allocatable && (sym->attr.dimension || sym->attr.codimension)
8163 && !sym->attr.save && !sym->attr.result)
8164 {
8165 tmp = gfc_trans_dealloc_allocated (sym->backend_decl,
8166 sym->attr.codimension);
8167 gfc_add_expr_to_block (&cleanup, tmp);
8168 }
8169
8170 gfc_add_init_cleanup (block, gfc_finish_block (&init),
8171 gfc_finish_block (&cleanup));
8172 }
8173
8174 /************ Expression Walking Functions ******************/
8175
8176 /* Walk a variable reference.
8177
8178 Possible extension - multiple component subscripts.
8179 x(:,:) = foo%a(:)%b(:)
8180 Transforms to
8181 forall (i=..., j=...)
8182 x(i,j) = foo%a(j)%b(i)
8183 end forall
8184 This adds a fair amount of complexity because you need to deal with more
8185 than one ref. Maybe handle in a similar manner to vector subscripts.
8186 Maybe not worth the effort. */
8187
8188
8189 static gfc_ss *
8190 gfc_walk_variable_expr (gfc_ss * ss, gfc_expr * expr)
8191 {
8192 gfc_ref *ref;
8193
8194 for (ref = expr->ref; ref; ref = ref->next)
8195 if (ref->type == REF_ARRAY && ref->u.ar.type != AR_ELEMENT)
8196 break;
8197
8198 return gfc_walk_array_ref (ss, expr, ref);
8199 }
8200
8201
8202 gfc_ss *
8203 gfc_walk_array_ref (gfc_ss * ss, gfc_expr * expr, gfc_ref * ref)
8204 {
8205 gfc_array_ref *ar;
8206 gfc_ss *newss;
8207 int n;
8208
8209 for (; ref; ref = ref->next)
8210 {
8211 if (ref->type == REF_SUBSTRING)
8212 {
8213 ss = gfc_get_scalar_ss (ss, ref->u.ss.start);
8214 ss = gfc_get_scalar_ss (ss, ref->u.ss.end);
8215 }
8216
8217 /* We're only interested in array sections from now on. */
8218 if (ref->type != REF_ARRAY)
8219 continue;
8220
8221 ar = &ref->u.ar;
8222
8223 switch (ar->type)
8224 {
8225 case AR_ELEMENT:
8226 for (n = ar->dimen - 1; n >= 0; n--)
8227 ss = gfc_get_scalar_ss (ss, ar->start[n]);
8228 break;
8229
8230 case AR_FULL:
8231 newss = gfc_get_array_ss (ss, expr, ar->as->rank, GFC_SS_SECTION);
8232 newss->info->data.array.ref = ref;
8233
8234 /* Make sure array is the same as array(:,:), this way
8235 we don't need to special case all the time. */
8236 ar->dimen = ar->as->rank;
8237 for (n = 0; n < ar->dimen; n++)
8238 {
8239 ar->dimen_type[n] = DIMEN_RANGE;
8240
8241 gcc_assert (ar->start[n] == NULL);
8242 gcc_assert (ar->end[n] == NULL);
8243 gcc_assert (ar->stride[n] == NULL);
8244 }
8245 ss = newss;
8246 break;
8247
8248 case AR_SECTION:
8249 newss = gfc_get_array_ss (ss, expr, 0, GFC_SS_SECTION);
8250 newss->info->data.array.ref = ref;
8251
8252 /* We add SS chains for all the subscripts in the section. */
8253 for (n = 0; n < ar->dimen; n++)
8254 {
8255 gfc_ss *indexss;
8256
8257 switch (ar->dimen_type[n])
8258 {
8259 case DIMEN_ELEMENT:
8260 /* Add SS for elemental (scalar) subscripts. */
8261 gcc_assert (ar->start[n]);
8262 indexss = gfc_get_scalar_ss (gfc_ss_terminator, ar->start[n]);
8263 indexss->loop_chain = gfc_ss_terminator;
8264 newss->info->data.array.subscript[n] = indexss;
8265 break;
8266
8267 case DIMEN_RANGE:
8268 /* We don't add anything for sections, just remember this
8269 dimension for later. */
8270 newss->dim[newss->dimen] = n;
8271 newss->dimen++;
8272 break;
8273
8274 case DIMEN_VECTOR:
8275 /* Create a GFC_SS_VECTOR index in which we can store
8276 the vector's descriptor. */
8277 indexss = gfc_get_array_ss (gfc_ss_terminator, ar->start[n],
8278 1, GFC_SS_VECTOR);
8279 indexss->loop_chain = gfc_ss_terminator;
8280 newss->info->data.array.subscript[n] = indexss;
8281 newss->dim[newss->dimen] = n;
8282 newss->dimen++;
8283 break;
8284
8285 default:
8286 /* We should know what sort of section it is by now. */
8287 gcc_unreachable ();
8288 }
8289 }
8290 /* We should have at least one non-elemental dimension,
8291 unless we are creating a descriptor for a (scalar) coarray. */
8292 gcc_assert (newss->dimen > 0
8293 || newss->info->data.array.ref->u.ar.as->corank > 0);
8294 ss = newss;
8295 break;
8296
8297 default:
8298 /* We should know what sort of section it is by now. */
8299 gcc_unreachable ();
8300 }
8301
8302 }
8303 return ss;
8304 }
8305
8306
8307 /* Walk an expression operator. If only one operand of a binary expression is
8308 scalar, we must also add the scalar term to the SS chain. */
8309
8310 static gfc_ss *
8311 gfc_walk_op_expr (gfc_ss * ss, gfc_expr * expr)
8312 {
8313 gfc_ss *head;
8314 gfc_ss *head2;
8315
8316 head = gfc_walk_subexpr (ss, expr->value.op.op1);
8317 if (expr->value.op.op2 == NULL)
8318 head2 = head;
8319 else
8320 head2 = gfc_walk_subexpr (head, expr->value.op.op2);
8321
8322 /* All operands are scalar. Pass back and let the caller deal with it. */
8323 if (head2 == ss)
8324 return head2;
8325
8326 /* All operands require scalarization. */
8327 if (head != ss && (expr->value.op.op2 == NULL || head2 != head))
8328 return head2;
8329
8330 /* One of the operands needs scalarization, the other is scalar.
8331 Create a gfc_ss for the scalar expression. */
8332 if (head == ss)
8333 {
8334 /* First operand is scalar. We build the chain in reverse order, so
8335 add the scalar SS after the second operand. */
8336 head = head2;
8337 while (head && head->next != ss)
8338 head = head->next;
8339 /* Check we haven't somehow broken the chain. */
8340 gcc_assert (head);
8341 head->next = gfc_get_scalar_ss (ss, expr->value.op.op1);
8342 }
8343 else /* head2 == head */
8344 {
8345 gcc_assert (head2 == head);
8346 /* Second operand is scalar. */
8347 head2 = gfc_get_scalar_ss (head2, expr->value.op.op2);
8348 }
8349
8350 return head2;
8351 }
8352
8353
8354 /* Reverse a SS chain. */
8355
8356 gfc_ss *
8357 gfc_reverse_ss (gfc_ss * ss)
8358 {
8359 gfc_ss *next;
8360 gfc_ss *head;
8361
8362 gcc_assert (ss != NULL);
8363
8364 head = gfc_ss_terminator;
8365 while (ss != gfc_ss_terminator)
8366 {
8367 next = ss->next;
8368 /* Check we didn't somehow break the chain. */
8369 gcc_assert (next != NULL);
8370 ss->next = head;
8371 head = ss;
8372 ss = next;
8373 }
8374
8375 return (head);
8376 }
8377
8378
8379 /* Walk the arguments of an elemental function.
8380 PROC_EXPR is used to check whether an argument is permitted to be absent. If
8381 it is NULL, we don't do the check and the argument is assumed to be present.
8382 */
8383
8384 gfc_ss *
8385 gfc_walk_elemental_function_args (gfc_ss * ss, gfc_actual_arglist *arg,
8386 gfc_expr *proc_expr, gfc_ss_type type)
8387 {
8388 gfc_formal_arglist *dummy_arg;
8389 int scalar;
8390 gfc_ss *head;
8391 gfc_ss *tail;
8392 gfc_ss *newss;
8393
8394 head = gfc_ss_terminator;
8395 tail = NULL;
8396
8397 if (proc_expr)
8398 {
8399 gfc_ref *ref;
8400
8401 /* Normal procedure case. */
8402 dummy_arg = proc_expr->symtree->n.sym->formal;
8403
8404 /* Typebound procedure case. */
8405 for (ref = proc_expr->ref; ref; ref = ref->next)
8406 {
8407 if (ref->type == REF_COMPONENT
8408 && ref->u.c.component->attr.proc_pointer
8409 && ref->u.c.component->ts.interface)
8410 dummy_arg = ref->u.c.component->ts.interface->formal;
8411 else
8412 dummy_arg = NULL;
8413 }
8414 }
8415 else
8416 dummy_arg = NULL;
8417
8418 scalar = 1;
8419 for (; arg; arg = arg->next)
8420 {
8421 if (!arg->expr || arg->expr->expr_type == EXPR_NULL)
8422 continue;
8423
8424 newss = gfc_walk_subexpr (head, arg->expr);
8425 if (newss == head)
8426 {
8427 /* Scalar argument. */
8428 gcc_assert (type == GFC_SS_SCALAR || type == GFC_SS_REFERENCE);
8429 newss = gfc_get_scalar_ss (head, arg->expr);
8430 newss->info->type = type;
8431
8432 if (dummy_arg != NULL
8433 && dummy_arg->sym->attr.optional
8434 && arg->expr->expr_type == EXPR_VARIABLE
8435 && (gfc_expr_attr (arg->expr).optional
8436 || gfc_expr_attr (arg->expr).allocatable
8437 || gfc_expr_attr (arg->expr).pointer))
8438 newss->info->data.scalar.can_be_null_ref = true;
8439 }
8440 else
8441 scalar = 0;
8442
8443 head = newss;
8444 if (!tail)
8445 {
8446 tail = head;
8447 while (tail->next != gfc_ss_terminator)
8448 tail = tail->next;
8449 }
8450
8451 if (dummy_arg != NULL)
8452 dummy_arg = dummy_arg->next;
8453 }
8454
8455 if (scalar)
8456 {
8457 /* If all the arguments are scalar we don't need the argument SS. */
8458 gfc_free_ss_chain (head);
8459 /* Pass it back. */
8460 return ss;
8461 }
8462
8463 /* Add it onto the existing chain. */
8464 tail->next = ss;
8465 return head;
8466 }
8467
8468
8469 /* Walk a function call. Scalar functions are passed back, and taken out of
8470 scalarization loops. For elemental functions we walk their arguments.
8471 The result of functions returning arrays is stored in a temporary outside
8472 the loop, so that the function is only called once. Hence we do not need
8473 to walk their arguments. */
8474
8475 static gfc_ss *
8476 gfc_walk_function_expr (gfc_ss * ss, gfc_expr * expr)
8477 {
8478 gfc_intrinsic_sym *isym;
8479 gfc_symbol *sym;
8480 gfc_component *comp = NULL;
8481
8482 isym = expr->value.function.isym;
8483
8484 /* Handle intrinsic functions separately. */
8485 if (isym)
8486 return gfc_walk_intrinsic_function (ss, expr, isym);
8487
8488 sym = expr->value.function.esym;
8489 if (!sym)
8490 sym = expr->symtree->n.sym;
8491
8492 /* A function that returns arrays. */
8493 gfc_is_proc_ptr_comp (expr, &comp);
8494 if ((!comp && gfc_return_by_reference (sym) && sym->result->attr.dimension)
8495 || (comp && comp->attr.dimension))
8496 return gfc_get_array_ss (ss, expr, expr->rank, GFC_SS_FUNCTION);
8497
8498 /* Walk the parameters of an elemental function. For now we always pass
8499 by reference. */
8500 if (sym->attr.elemental || (comp && comp->attr.elemental))
8501 return gfc_walk_elemental_function_args (ss, expr->value.function.actual,
8502 expr, GFC_SS_REFERENCE);
8503
8504 /* Scalar functions are OK as these are evaluated outside the scalarization
8505 loop. Pass back and let the caller deal with it. */
8506 return ss;
8507 }
8508
8509
8510 /* An array temporary is constructed for array constructors. */
8511
8512 static gfc_ss *
8513 gfc_walk_array_constructor (gfc_ss * ss, gfc_expr * expr)
8514 {
8515 return gfc_get_array_ss (ss, expr, expr->rank, GFC_SS_CONSTRUCTOR);
8516 }
8517
8518
8519 /* Walk an expression. Add walked expressions to the head of the SS chain.
8520 A wholly scalar expression will not be added. */
8521
8522 gfc_ss *
8523 gfc_walk_subexpr (gfc_ss * ss, gfc_expr * expr)
8524 {
8525 gfc_ss *head;
8526
8527 switch (expr->expr_type)
8528 {
8529 case EXPR_VARIABLE:
8530 head = gfc_walk_variable_expr (ss, expr);
8531 return head;
8532
8533 case EXPR_OP:
8534 head = gfc_walk_op_expr (ss, expr);
8535 return head;
8536
8537 case EXPR_FUNCTION:
8538 head = gfc_walk_function_expr (ss, expr);
8539 return head;
8540
8541 case EXPR_CONSTANT:
8542 case EXPR_NULL:
8543 case EXPR_STRUCTURE:
8544 /* Pass back and let the caller deal with it. */
8545 break;
8546
8547 case EXPR_ARRAY:
8548 head = gfc_walk_array_constructor (ss, expr);
8549 return head;
8550
8551 case EXPR_SUBSTRING:
8552 /* Pass back and let the caller deal with it. */
8553 break;
8554
8555 default:
8556 internal_error ("bad expression type during walk (%d)",
8557 expr->expr_type);
8558 }
8559 return ss;
8560 }
8561
8562
8563 /* Entry point for expression walking.
8564 A return value equal to the passed chain means this is
8565 a scalar expression. It is up to the caller to take whatever action is
8566 necessary to translate these. */
8567
8568 gfc_ss *
8569 gfc_walk_expr (gfc_expr * expr)
8570 {
8571 gfc_ss *res;
8572
8573 res = gfc_walk_subexpr (gfc_ss_terminator, expr);
8574 return gfc_reverse_ss (res);
8575 }