]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/fortran/trans-array.c
re PR fortran/45859 ([Coarray, F2008, IR] Rejects valid actuals to coarray dummies)
[thirdparty/gcc.git] / gcc / fortran / trans-array.c
1 /* Array translation routines
2 Copyright (C) 2002-2015 Free Software Foundation, Inc.
3 Contributed by Paul Brook <paul@nowt.org>
4 and Steven Bosscher <s.bosscher@student.tudelft.nl>
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 /* trans-array.c-- Various array related code, including scalarization,
23 allocation, initialization and other support routines. */
24
25 /* How the scalarizer works.
26 In gfortran, array expressions use the same core routines as scalar
27 expressions.
28 First, a Scalarization State (SS) chain is built. This is done by walking
29 the expression tree, and building a linear list of the terms in the
30 expression. As the tree is walked, scalar subexpressions are translated.
31
32 The scalarization parameters are stored in a gfc_loopinfo structure.
33 First the start and stride of each term is calculated by
34 gfc_conv_ss_startstride. During this process the expressions for the array
35 descriptors and data pointers are also translated.
36
37 If the expression is an assignment, we must then resolve any dependencies.
38 In Fortran all the rhs values of an assignment must be evaluated before
39 any assignments take place. This can require a temporary array to store the
40 values. We also require a temporary when we are passing array expressions
41 or vector subscripts as procedure parameters.
42
43 Array sections are passed without copying to a temporary. These use the
44 scalarizer to determine the shape of the section. The flag
45 loop->array_parameter tells the scalarizer that the actual values and loop
46 variables will not be required.
47
48 The function gfc_conv_loop_setup generates the scalarization setup code.
49 It determines the range of the scalarizing loop variables. If a temporary
50 is required, this is created and initialized. Code for scalar expressions
51 taken outside the loop is also generated at this time. Next the offset and
52 scaling required to translate from loop variables to array indices for each
53 term is calculated.
54
55 A call to gfc_start_scalarized_body marks the start of the scalarized
56 expression. This creates a scope and declares the loop variables. Before
57 calling this gfc_make_ss_chain_used must be used to indicate which terms
58 will be used inside this loop.
59
60 The scalar gfc_conv_* functions are then used to build the main body of the
61 scalarization loop. Scalarization loop variables and precalculated scalar
62 values are automatically substituted. Note that gfc_advance_se_ss_chain
63 must be used, rather than changing the se->ss directly.
64
65 For assignment expressions requiring a temporary two sub loops are
66 generated. The first stores the result of the expression in the temporary,
67 the second copies it to the result. A call to
68 gfc_trans_scalarized_loop_boundary marks the end of the main loop code and
69 the start of the copying loop. The temporary may be less than full rank.
70
71 Finally gfc_trans_scalarizing_loops is called to generate the implicit do
72 loops. The loops are added to the pre chain of the loopinfo. The post
73 chain may still contain cleanup code.
74
75 After the loop code has been added into its parent scope gfc_cleanup_loop
76 is called to free all the SS allocated by the scalarizer. */
77
78 #include "config.h"
79 #include "system.h"
80 #include "coretypes.h"
81 #include "options.h"
82 #include "tree.h"
83 #include "gfortran.h"
84 #include "gimple-expr.h"
85 #include "trans.h"
86 #include "fold-const.h"
87 #include "constructor.h"
88 #include "trans-types.h"
89 #include "trans-array.h"
90 #include "trans-const.h"
91 #include "dependency.h"
92
93 static bool gfc_get_array_constructor_size (mpz_t *, gfc_constructor_base);
94
95 /* The contents of this structure aren't actually used, just the address. */
96 static gfc_ss gfc_ss_terminator_var;
97 gfc_ss * const gfc_ss_terminator = &gfc_ss_terminator_var;
98
99
100 static tree
101 gfc_array_dataptr_type (tree desc)
102 {
103 return (GFC_TYPE_ARRAY_DATAPTR_TYPE (TREE_TYPE (desc)));
104 }
105
106
107 /* Build expressions to access the members of an array descriptor.
108 It's surprisingly easy to mess up here, so never access
109 an array descriptor by "brute force", always use these
110 functions. This also avoids problems if we change the format
111 of an array descriptor.
112
113 To understand these magic numbers, look at the comments
114 before gfc_build_array_type() in trans-types.c.
115
116 The code within these defines should be the only code which knows the format
117 of an array descriptor.
118
119 Any code just needing to read obtain the bounds of an array should use
120 gfc_conv_array_* rather than the following functions as these will return
121 know constant values, and work with arrays which do not have descriptors.
122
123 Don't forget to #undef these! */
124
125 #define DATA_FIELD 0
126 #define OFFSET_FIELD 1
127 #define DTYPE_FIELD 2
128 #define DIMENSION_FIELD 3
129 #define CAF_TOKEN_FIELD 4
130
131 #define STRIDE_SUBFIELD 0
132 #define LBOUND_SUBFIELD 1
133 #define UBOUND_SUBFIELD 2
134
135 /* This provides READ-ONLY access to the data field. The field itself
136 doesn't have the proper type. */
137
138 tree
139 gfc_conv_descriptor_data_get (tree desc)
140 {
141 tree field, type, t;
142
143 type = TREE_TYPE (desc);
144 gcc_assert (GFC_DESCRIPTOR_TYPE_P (type));
145
146 field = TYPE_FIELDS (type);
147 gcc_assert (DATA_FIELD == 0);
148
149 t = fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field), desc,
150 field, NULL_TREE);
151 t = fold_convert (GFC_TYPE_ARRAY_DATAPTR_TYPE (type), t);
152
153 return t;
154 }
155
156 /* This provides WRITE access to the data field.
157
158 TUPLES_P is true if we are generating tuples.
159
160 This function gets called through the following macros:
161 gfc_conv_descriptor_data_set
162 gfc_conv_descriptor_data_set. */
163
164 void
165 gfc_conv_descriptor_data_set (stmtblock_t *block, tree desc, tree value)
166 {
167 tree field, type, t;
168
169 type = TREE_TYPE (desc);
170 gcc_assert (GFC_DESCRIPTOR_TYPE_P (type));
171
172 field = TYPE_FIELDS (type);
173 gcc_assert (DATA_FIELD == 0);
174
175 t = fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field), desc,
176 field, NULL_TREE);
177 gfc_add_modify (block, t, fold_convert (TREE_TYPE (field), value));
178 }
179
180
181 /* This provides address access to the data field. This should only be
182 used by array allocation, passing this on to the runtime. */
183
184 tree
185 gfc_conv_descriptor_data_addr (tree desc)
186 {
187 tree field, type, t;
188
189 type = TREE_TYPE (desc);
190 gcc_assert (GFC_DESCRIPTOR_TYPE_P (type));
191
192 field = TYPE_FIELDS (type);
193 gcc_assert (DATA_FIELD == 0);
194
195 t = fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field), desc,
196 field, NULL_TREE);
197 return gfc_build_addr_expr (NULL_TREE, t);
198 }
199
200 static tree
201 gfc_conv_descriptor_offset (tree desc)
202 {
203 tree type;
204 tree field;
205
206 type = TREE_TYPE (desc);
207 gcc_assert (GFC_DESCRIPTOR_TYPE_P (type));
208
209 field = gfc_advance_chain (TYPE_FIELDS (type), OFFSET_FIELD);
210 gcc_assert (field != NULL_TREE && TREE_TYPE (field) == gfc_array_index_type);
211
212 return fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field),
213 desc, field, NULL_TREE);
214 }
215
216 tree
217 gfc_conv_descriptor_offset_get (tree desc)
218 {
219 return gfc_conv_descriptor_offset (desc);
220 }
221
222 void
223 gfc_conv_descriptor_offset_set (stmtblock_t *block, tree desc,
224 tree value)
225 {
226 tree t = gfc_conv_descriptor_offset (desc);
227 gfc_add_modify (block, t, fold_convert (TREE_TYPE (t), value));
228 }
229
230
231 tree
232 gfc_conv_descriptor_dtype (tree desc)
233 {
234 tree field;
235 tree type;
236
237 type = TREE_TYPE (desc);
238 gcc_assert (GFC_DESCRIPTOR_TYPE_P (type));
239
240 field = gfc_advance_chain (TYPE_FIELDS (type), DTYPE_FIELD);
241 gcc_assert (field != NULL_TREE && TREE_TYPE (field) == gfc_array_index_type);
242
243 return fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field),
244 desc, field, NULL_TREE);
245 }
246
247
248 tree
249 gfc_conv_descriptor_rank (tree desc)
250 {
251 tree tmp;
252 tree dtype;
253
254 dtype = gfc_conv_descriptor_dtype (desc);
255 tmp = build_int_cst (TREE_TYPE (dtype), GFC_DTYPE_RANK_MASK);
256 tmp = fold_build2_loc (input_location, BIT_AND_EXPR, TREE_TYPE (dtype),
257 dtype, tmp);
258 return fold_convert (gfc_get_int_type (gfc_default_integer_kind), tmp);
259 }
260
261
262 tree
263 gfc_get_descriptor_dimension (tree desc)
264 {
265 tree type, field;
266
267 type = TREE_TYPE (desc);
268 gcc_assert (GFC_DESCRIPTOR_TYPE_P (type));
269
270 field = gfc_advance_chain (TYPE_FIELDS (type), DIMENSION_FIELD);
271 gcc_assert (field != NULL_TREE
272 && TREE_CODE (TREE_TYPE (field)) == ARRAY_TYPE
273 && TREE_CODE (TREE_TYPE (TREE_TYPE (field))) == RECORD_TYPE);
274
275 return fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field),
276 desc, field, NULL_TREE);
277 }
278
279
280 static tree
281 gfc_conv_descriptor_dimension (tree desc, tree dim)
282 {
283 tree tmp;
284
285 tmp = gfc_get_descriptor_dimension (desc);
286
287 return gfc_build_array_ref (tmp, dim, NULL);
288 }
289
290
291 tree
292 gfc_conv_descriptor_token (tree desc)
293 {
294 tree type;
295 tree field;
296
297 type = TREE_TYPE (desc);
298 gcc_assert (GFC_DESCRIPTOR_TYPE_P (type));
299 gcc_assert (flag_coarray == GFC_FCOARRAY_LIB);
300 field = gfc_advance_chain (TYPE_FIELDS (type), CAF_TOKEN_FIELD);
301
302 /* Should be a restricted pointer - except in the finalization wrapper. */
303 gcc_assert (field != NULL_TREE
304 && (TREE_TYPE (field) == prvoid_type_node
305 || TREE_TYPE (field) == pvoid_type_node));
306
307 return fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field),
308 desc, field, NULL_TREE);
309 }
310
311
312 static tree
313 gfc_conv_descriptor_stride (tree desc, tree dim)
314 {
315 tree tmp;
316 tree field;
317
318 tmp = gfc_conv_descriptor_dimension (desc, dim);
319 field = TYPE_FIELDS (TREE_TYPE (tmp));
320 field = gfc_advance_chain (field, STRIDE_SUBFIELD);
321 gcc_assert (field != NULL_TREE && TREE_TYPE (field) == gfc_array_index_type);
322
323 tmp = fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field),
324 tmp, field, NULL_TREE);
325 return tmp;
326 }
327
328 tree
329 gfc_conv_descriptor_stride_get (tree desc, tree dim)
330 {
331 tree type = TREE_TYPE (desc);
332 gcc_assert (GFC_DESCRIPTOR_TYPE_P (type));
333 if (integer_zerop (dim)
334 && (GFC_TYPE_ARRAY_AKIND (type) == GFC_ARRAY_ALLOCATABLE
335 ||GFC_TYPE_ARRAY_AKIND (type) == GFC_ARRAY_ASSUMED_SHAPE_CONT
336 ||GFC_TYPE_ARRAY_AKIND (type) == GFC_ARRAY_ASSUMED_RANK_CONT
337 ||GFC_TYPE_ARRAY_AKIND (type) == GFC_ARRAY_POINTER_CONT))
338 return gfc_index_one_node;
339
340 return gfc_conv_descriptor_stride (desc, dim);
341 }
342
343 void
344 gfc_conv_descriptor_stride_set (stmtblock_t *block, tree desc,
345 tree dim, tree value)
346 {
347 tree t = gfc_conv_descriptor_stride (desc, dim);
348 gfc_add_modify (block, t, fold_convert (TREE_TYPE (t), value));
349 }
350
351 static tree
352 gfc_conv_descriptor_lbound (tree desc, tree dim)
353 {
354 tree tmp;
355 tree field;
356
357 tmp = gfc_conv_descriptor_dimension (desc, dim);
358 field = TYPE_FIELDS (TREE_TYPE (tmp));
359 field = gfc_advance_chain (field, LBOUND_SUBFIELD);
360 gcc_assert (field != NULL_TREE && TREE_TYPE (field) == gfc_array_index_type);
361
362 tmp = fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field),
363 tmp, field, NULL_TREE);
364 return tmp;
365 }
366
367 tree
368 gfc_conv_descriptor_lbound_get (tree desc, tree dim)
369 {
370 return gfc_conv_descriptor_lbound (desc, dim);
371 }
372
373 void
374 gfc_conv_descriptor_lbound_set (stmtblock_t *block, tree desc,
375 tree dim, tree value)
376 {
377 tree t = gfc_conv_descriptor_lbound (desc, dim);
378 gfc_add_modify (block, t, fold_convert (TREE_TYPE (t), value));
379 }
380
381 static tree
382 gfc_conv_descriptor_ubound (tree desc, tree dim)
383 {
384 tree tmp;
385 tree field;
386
387 tmp = gfc_conv_descriptor_dimension (desc, dim);
388 field = TYPE_FIELDS (TREE_TYPE (tmp));
389 field = gfc_advance_chain (field, UBOUND_SUBFIELD);
390 gcc_assert (field != NULL_TREE && TREE_TYPE (field) == gfc_array_index_type);
391
392 tmp = fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field),
393 tmp, field, NULL_TREE);
394 return tmp;
395 }
396
397 tree
398 gfc_conv_descriptor_ubound_get (tree desc, tree dim)
399 {
400 return gfc_conv_descriptor_ubound (desc, dim);
401 }
402
403 void
404 gfc_conv_descriptor_ubound_set (stmtblock_t *block, tree desc,
405 tree dim, tree value)
406 {
407 tree t = gfc_conv_descriptor_ubound (desc, dim);
408 gfc_add_modify (block, t, fold_convert (TREE_TYPE (t), value));
409 }
410
411 /* Build a null array descriptor constructor. */
412
413 tree
414 gfc_build_null_descriptor (tree type)
415 {
416 tree field;
417 tree tmp;
418
419 gcc_assert (GFC_DESCRIPTOR_TYPE_P (type));
420 gcc_assert (DATA_FIELD == 0);
421 field = TYPE_FIELDS (type);
422
423 /* Set a NULL data pointer. */
424 tmp = build_constructor_single (type, field, null_pointer_node);
425 TREE_CONSTANT (tmp) = 1;
426 /* All other fields are ignored. */
427
428 return tmp;
429 }
430
431
432 /* Modify a descriptor such that the lbound of a given dimension is the value
433 specified. This also updates ubound and offset accordingly. */
434
435 void
436 gfc_conv_shift_descriptor_lbound (stmtblock_t* block, tree desc,
437 int dim, tree new_lbound)
438 {
439 tree offs, ubound, lbound, stride;
440 tree diff, offs_diff;
441
442 new_lbound = fold_convert (gfc_array_index_type, new_lbound);
443
444 offs = gfc_conv_descriptor_offset_get (desc);
445 lbound = gfc_conv_descriptor_lbound_get (desc, gfc_rank_cst[dim]);
446 ubound = gfc_conv_descriptor_ubound_get (desc, gfc_rank_cst[dim]);
447 stride = gfc_conv_descriptor_stride_get (desc, gfc_rank_cst[dim]);
448
449 /* Get difference (new - old) by which to shift stuff. */
450 diff = fold_build2_loc (input_location, MINUS_EXPR, gfc_array_index_type,
451 new_lbound, lbound);
452
453 /* Shift ubound and offset accordingly. This has to be done before
454 updating the lbound, as they depend on the lbound expression! */
455 ubound = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
456 ubound, diff);
457 gfc_conv_descriptor_ubound_set (block, desc, gfc_rank_cst[dim], ubound);
458 offs_diff = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
459 diff, stride);
460 offs = fold_build2_loc (input_location, MINUS_EXPR, gfc_array_index_type,
461 offs, offs_diff);
462 gfc_conv_descriptor_offset_set (block, desc, offs);
463
464 /* Finally set lbound to value we want. */
465 gfc_conv_descriptor_lbound_set (block, desc, gfc_rank_cst[dim], new_lbound);
466 }
467
468
469 /* Cleanup those #defines. */
470
471 #undef DATA_FIELD
472 #undef OFFSET_FIELD
473 #undef DTYPE_FIELD
474 #undef DIMENSION_FIELD
475 #undef CAF_TOKEN_FIELD
476 #undef STRIDE_SUBFIELD
477 #undef LBOUND_SUBFIELD
478 #undef UBOUND_SUBFIELD
479
480
481 /* Mark a SS chain as used. Flags specifies in which loops the SS is used.
482 flags & 1 = Main loop body.
483 flags & 2 = temp copy loop. */
484
485 void
486 gfc_mark_ss_chain_used (gfc_ss * ss, unsigned flags)
487 {
488 for (; ss != gfc_ss_terminator; ss = ss->next)
489 ss->info->useflags = flags;
490 }
491
492
493 /* Free a gfc_ss chain. */
494
495 void
496 gfc_free_ss_chain (gfc_ss * ss)
497 {
498 gfc_ss *next;
499
500 while (ss != gfc_ss_terminator)
501 {
502 gcc_assert (ss != NULL);
503 next = ss->next;
504 gfc_free_ss (ss);
505 ss = next;
506 }
507 }
508
509
510 static void
511 free_ss_info (gfc_ss_info *ss_info)
512 {
513 int n;
514
515 ss_info->refcount--;
516 if (ss_info->refcount > 0)
517 return;
518
519 gcc_assert (ss_info->refcount == 0);
520
521 switch (ss_info->type)
522 {
523 case GFC_SS_SECTION:
524 for (n = 0; n < GFC_MAX_DIMENSIONS; n++)
525 if (ss_info->data.array.subscript[n])
526 gfc_free_ss_chain (ss_info->data.array.subscript[n]);
527 break;
528
529 default:
530 break;
531 }
532
533 free (ss_info);
534 }
535
536
537 /* Free a SS. */
538
539 void
540 gfc_free_ss (gfc_ss * ss)
541 {
542 free_ss_info (ss->info);
543 free (ss);
544 }
545
546
547 /* Creates and initializes an array type gfc_ss struct. */
548
549 gfc_ss *
550 gfc_get_array_ss (gfc_ss *next, gfc_expr *expr, int dimen, gfc_ss_type type)
551 {
552 gfc_ss *ss;
553 gfc_ss_info *ss_info;
554 int i;
555
556 ss_info = gfc_get_ss_info ();
557 ss_info->refcount++;
558 ss_info->type = type;
559 ss_info->expr = expr;
560
561 ss = gfc_get_ss ();
562 ss->info = ss_info;
563 ss->next = next;
564 ss->dimen = dimen;
565 for (i = 0; i < ss->dimen; i++)
566 ss->dim[i] = i;
567
568 return ss;
569 }
570
571
572 /* Creates and initializes a temporary type gfc_ss struct. */
573
574 gfc_ss *
575 gfc_get_temp_ss (tree type, tree string_length, int dimen)
576 {
577 gfc_ss *ss;
578 gfc_ss_info *ss_info;
579 int i;
580
581 ss_info = gfc_get_ss_info ();
582 ss_info->refcount++;
583 ss_info->type = GFC_SS_TEMP;
584 ss_info->string_length = string_length;
585 ss_info->data.temp.type = type;
586
587 ss = gfc_get_ss ();
588 ss->info = ss_info;
589 ss->next = gfc_ss_terminator;
590 ss->dimen = dimen;
591 for (i = 0; i < ss->dimen; i++)
592 ss->dim[i] = i;
593
594 return ss;
595 }
596
597
598 /* Creates and initializes a scalar type gfc_ss struct. */
599
600 gfc_ss *
601 gfc_get_scalar_ss (gfc_ss *next, gfc_expr *expr)
602 {
603 gfc_ss *ss;
604 gfc_ss_info *ss_info;
605
606 ss_info = gfc_get_ss_info ();
607 ss_info->refcount++;
608 ss_info->type = GFC_SS_SCALAR;
609 ss_info->expr = expr;
610
611 ss = gfc_get_ss ();
612 ss->info = ss_info;
613 ss->next = next;
614
615 return ss;
616 }
617
618
619 /* Free all the SS associated with a loop. */
620
621 void
622 gfc_cleanup_loop (gfc_loopinfo * loop)
623 {
624 gfc_loopinfo *loop_next, **ploop;
625 gfc_ss *ss;
626 gfc_ss *next;
627
628 ss = loop->ss;
629 while (ss != gfc_ss_terminator)
630 {
631 gcc_assert (ss != NULL);
632 next = ss->loop_chain;
633 gfc_free_ss (ss);
634 ss = next;
635 }
636
637 /* Remove reference to self in the parent loop. */
638 if (loop->parent)
639 for (ploop = &loop->parent->nested; *ploop; ploop = &(*ploop)->next)
640 if (*ploop == loop)
641 {
642 *ploop = loop->next;
643 break;
644 }
645
646 /* Free non-freed nested loops. */
647 for (loop = loop->nested; loop; loop = loop_next)
648 {
649 loop_next = loop->next;
650 gfc_cleanup_loop (loop);
651 free (loop);
652 }
653 }
654
655
656 static void
657 set_ss_loop (gfc_ss *ss, gfc_loopinfo *loop)
658 {
659 int n;
660
661 for (; ss != gfc_ss_terminator; ss = ss->next)
662 {
663 ss->loop = loop;
664
665 if (ss->info->type == GFC_SS_SCALAR
666 || ss->info->type == GFC_SS_REFERENCE
667 || ss->info->type == GFC_SS_TEMP)
668 continue;
669
670 for (n = 0; n < GFC_MAX_DIMENSIONS; n++)
671 if (ss->info->data.array.subscript[n] != NULL)
672 set_ss_loop (ss->info->data.array.subscript[n], loop);
673 }
674 }
675
676
677 /* Associate a SS chain with a loop. */
678
679 void
680 gfc_add_ss_to_loop (gfc_loopinfo * loop, gfc_ss * head)
681 {
682 gfc_ss *ss;
683 gfc_loopinfo *nested_loop;
684
685 if (head == gfc_ss_terminator)
686 return;
687
688 set_ss_loop (head, loop);
689
690 ss = head;
691 for (; ss && ss != gfc_ss_terminator; ss = ss->next)
692 {
693 if (ss->nested_ss)
694 {
695 nested_loop = ss->nested_ss->loop;
696
697 /* More than one ss can belong to the same loop. Hence, we add the
698 loop to the chain only if it is different from the previously
699 added one, to avoid duplicate nested loops. */
700 if (nested_loop != loop->nested)
701 {
702 gcc_assert (nested_loop->parent == NULL);
703 nested_loop->parent = loop;
704
705 gcc_assert (nested_loop->next == NULL);
706 nested_loop->next = loop->nested;
707 loop->nested = nested_loop;
708 }
709 else
710 gcc_assert (nested_loop->parent == loop);
711 }
712
713 if (ss->next == gfc_ss_terminator)
714 ss->loop_chain = loop->ss;
715 else
716 ss->loop_chain = ss->next;
717 }
718 gcc_assert (ss == gfc_ss_terminator);
719 loop->ss = head;
720 }
721
722
723 /* Generate an initializer for a static pointer or allocatable array. */
724
725 void
726 gfc_trans_static_array_pointer (gfc_symbol * sym)
727 {
728 tree type;
729
730 gcc_assert (TREE_STATIC (sym->backend_decl));
731 /* Just zero the data member. */
732 type = TREE_TYPE (sym->backend_decl);
733 DECL_INITIAL (sym->backend_decl) = gfc_build_null_descriptor (type);
734 }
735
736
737 /* If the bounds of SE's loop have not yet been set, see if they can be
738 determined from array spec AS, which is the array spec of a called
739 function. MAPPING maps the callee's dummy arguments to the values
740 that the caller is passing. Add any initialization and finalization
741 code to SE. */
742
743 void
744 gfc_set_loop_bounds_from_array_spec (gfc_interface_mapping * mapping,
745 gfc_se * se, gfc_array_spec * as)
746 {
747 int n, dim, total_dim;
748 gfc_se tmpse;
749 gfc_ss *ss;
750 tree lower;
751 tree upper;
752 tree tmp;
753
754 total_dim = 0;
755
756 if (!as || as->type != AS_EXPLICIT)
757 return;
758
759 for (ss = se->ss; ss; ss = ss->parent)
760 {
761 total_dim += ss->loop->dimen;
762 for (n = 0; n < ss->loop->dimen; n++)
763 {
764 /* The bound is known, nothing to do. */
765 if (ss->loop->to[n] != NULL_TREE)
766 continue;
767
768 dim = ss->dim[n];
769 gcc_assert (dim < as->rank);
770 gcc_assert (ss->loop->dimen <= as->rank);
771
772 /* Evaluate the lower bound. */
773 gfc_init_se (&tmpse, NULL);
774 gfc_apply_interface_mapping (mapping, &tmpse, as->lower[dim]);
775 gfc_add_block_to_block (&se->pre, &tmpse.pre);
776 gfc_add_block_to_block (&se->post, &tmpse.post);
777 lower = fold_convert (gfc_array_index_type, tmpse.expr);
778
779 /* ...and the upper bound. */
780 gfc_init_se (&tmpse, NULL);
781 gfc_apply_interface_mapping (mapping, &tmpse, as->upper[dim]);
782 gfc_add_block_to_block (&se->pre, &tmpse.pre);
783 gfc_add_block_to_block (&se->post, &tmpse.post);
784 upper = fold_convert (gfc_array_index_type, tmpse.expr);
785
786 /* Set the upper bound of the loop to UPPER - LOWER. */
787 tmp = fold_build2_loc (input_location, MINUS_EXPR,
788 gfc_array_index_type, upper, lower);
789 tmp = gfc_evaluate_now (tmp, &se->pre);
790 ss->loop->to[n] = tmp;
791 }
792 }
793
794 gcc_assert (total_dim == as->rank);
795 }
796
797
798 /* Generate code to allocate an array temporary, or create a variable to
799 hold the data. If size is NULL, zero the descriptor so that the
800 callee will allocate the array. If DEALLOC is true, also generate code to
801 free the array afterwards.
802
803 If INITIAL is not NULL, it is packed using internal_pack and the result used
804 as data instead of allocating a fresh, unitialized area of memory.
805
806 Initialization code is added to PRE and finalization code to POST.
807 DYNAMIC is true if the caller may want to extend the array later
808 using realloc. This prevents us from putting the array on the stack. */
809
810 static void
811 gfc_trans_allocate_array_storage (stmtblock_t * pre, stmtblock_t * post,
812 gfc_array_info * info, tree size, tree nelem,
813 tree initial, bool dynamic, bool dealloc)
814 {
815 tree tmp;
816 tree desc;
817 bool onstack;
818
819 desc = info->descriptor;
820 info->offset = gfc_index_zero_node;
821 if (size == NULL_TREE || integer_zerop (size))
822 {
823 /* A callee allocated array. */
824 gfc_conv_descriptor_data_set (pre, desc, null_pointer_node);
825 onstack = FALSE;
826 }
827 else
828 {
829 /* Allocate the temporary. */
830 onstack = !dynamic && initial == NULL_TREE
831 && (flag_stack_arrays
832 || gfc_can_put_var_on_stack (size));
833
834 if (onstack)
835 {
836 /* Make a temporary variable to hold the data. */
837 tmp = fold_build2_loc (input_location, MINUS_EXPR, TREE_TYPE (nelem),
838 nelem, gfc_index_one_node);
839 tmp = gfc_evaluate_now (tmp, pre);
840 tmp = build_range_type (gfc_array_index_type, gfc_index_zero_node,
841 tmp);
842 tmp = build_array_type (gfc_get_element_type (TREE_TYPE (desc)),
843 tmp);
844 tmp = gfc_create_var (tmp, "A");
845 /* If we're here only because of -fstack-arrays we have to
846 emit a DECL_EXPR to make the gimplifier emit alloca calls. */
847 if (!gfc_can_put_var_on_stack (size))
848 gfc_add_expr_to_block (pre,
849 fold_build1_loc (input_location,
850 DECL_EXPR, TREE_TYPE (tmp),
851 tmp));
852 tmp = gfc_build_addr_expr (NULL_TREE, tmp);
853 gfc_conv_descriptor_data_set (pre, desc, tmp);
854 }
855 else
856 {
857 /* Allocate memory to hold the data or call internal_pack. */
858 if (initial == NULL_TREE)
859 {
860 tmp = gfc_call_malloc (pre, NULL, size);
861 tmp = gfc_evaluate_now (tmp, pre);
862 }
863 else
864 {
865 tree packed;
866 tree source_data;
867 tree was_packed;
868 stmtblock_t do_copying;
869
870 tmp = TREE_TYPE (initial); /* Pointer to descriptor. */
871 gcc_assert (TREE_CODE (tmp) == POINTER_TYPE);
872 tmp = TREE_TYPE (tmp); /* The descriptor itself. */
873 tmp = gfc_get_element_type (tmp);
874 gcc_assert (tmp == gfc_get_element_type (TREE_TYPE (desc)));
875 packed = gfc_create_var (build_pointer_type (tmp), "data");
876
877 tmp = build_call_expr_loc (input_location,
878 gfor_fndecl_in_pack, 1, initial);
879 tmp = fold_convert (TREE_TYPE (packed), tmp);
880 gfc_add_modify (pre, packed, tmp);
881
882 tmp = build_fold_indirect_ref_loc (input_location,
883 initial);
884 source_data = gfc_conv_descriptor_data_get (tmp);
885
886 /* internal_pack may return source->data without any allocation
887 or copying if it is already packed. If that's the case, we
888 need to allocate and copy manually. */
889
890 gfc_start_block (&do_copying);
891 tmp = gfc_call_malloc (&do_copying, NULL, size);
892 tmp = fold_convert (TREE_TYPE (packed), tmp);
893 gfc_add_modify (&do_copying, packed, tmp);
894 tmp = gfc_build_memcpy_call (packed, source_data, size);
895 gfc_add_expr_to_block (&do_copying, tmp);
896
897 was_packed = fold_build2_loc (input_location, EQ_EXPR,
898 boolean_type_node, packed,
899 source_data);
900 tmp = gfc_finish_block (&do_copying);
901 tmp = build3_v (COND_EXPR, was_packed, tmp,
902 build_empty_stmt (input_location));
903 gfc_add_expr_to_block (pre, tmp);
904
905 tmp = fold_convert (pvoid_type_node, packed);
906 }
907
908 gfc_conv_descriptor_data_set (pre, desc, tmp);
909 }
910 }
911 info->data = gfc_conv_descriptor_data_get (desc);
912
913 /* The offset is zero because we create temporaries with a zero
914 lower bound. */
915 gfc_conv_descriptor_offset_set (pre, desc, gfc_index_zero_node);
916
917 if (dealloc && !onstack)
918 {
919 /* Free the temporary. */
920 tmp = gfc_conv_descriptor_data_get (desc);
921 tmp = gfc_call_free (tmp);
922 gfc_add_expr_to_block (post, tmp);
923 }
924 }
925
926
927 /* Get the scalarizer array dimension corresponding to actual array dimension
928 given by ARRAY_DIM.
929
930 For example, if SS represents the array ref a(1,:,:,1), it is a
931 bidimensional scalarizer array, and the result would be 0 for ARRAY_DIM=1,
932 and 1 for ARRAY_DIM=2.
933 If SS represents transpose(a(:,1,1,:)), it is again a bidimensional
934 scalarizer array, and the result would be 1 for ARRAY_DIM=0 and 0 for
935 ARRAY_DIM=3.
936 If SS represents sum(a(:,:,:,1), dim=1), it is a 2+1-dimensional scalarizer
937 array. If called on the inner ss, the result would be respectively 0,1,2 for
938 ARRAY_DIM=0,1,2. If called on the outer ss, the result would be 0,1
939 for ARRAY_DIM=1,2. */
940
941 static int
942 get_scalarizer_dim_for_array_dim (gfc_ss *ss, int array_dim)
943 {
944 int array_ref_dim;
945 int n;
946
947 array_ref_dim = 0;
948
949 for (; ss; ss = ss->parent)
950 for (n = 0; n < ss->dimen; n++)
951 if (ss->dim[n] < array_dim)
952 array_ref_dim++;
953
954 return array_ref_dim;
955 }
956
957
958 static gfc_ss *
959 innermost_ss (gfc_ss *ss)
960 {
961 while (ss->nested_ss != NULL)
962 ss = ss->nested_ss;
963
964 return ss;
965 }
966
967
968
969 /* Get the array reference dimension corresponding to the given loop dimension.
970 It is different from the true array dimension given by the dim array in
971 the case of a partial array reference (i.e. a(:,:,1,:) for example)
972 It is different from the loop dimension in the case of a transposed array.
973 */
974
975 static int
976 get_array_ref_dim_for_loop_dim (gfc_ss *ss, int loop_dim)
977 {
978 return get_scalarizer_dim_for_array_dim (innermost_ss (ss),
979 ss->dim[loop_dim]);
980 }
981
982
983 /* Generate code to create and initialize the descriptor for a temporary
984 array. This is used for both temporaries needed by the scalarizer, and
985 functions returning arrays. Adjusts the loop variables to be
986 zero-based, and calculates the loop bounds for callee allocated arrays.
987 Allocate the array unless it's callee allocated (we have a callee
988 allocated array if 'callee_alloc' is true, or if loop->to[n] is
989 NULL_TREE for any n). Also fills in the descriptor, data and offset
990 fields of info if known. Returns the size of the array, or NULL for a
991 callee allocated array.
992
993 'eltype' == NULL signals that the temporary should be a class object.
994 The 'initial' expression is used to obtain the size of the dynamic
995 type; otherwise the allocation and initialization proceeds as for any
996 other expression
997
998 PRE, POST, INITIAL, DYNAMIC and DEALLOC are as for
999 gfc_trans_allocate_array_storage. */
1000
1001 tree
1002 gfc_trans_create_temp_array (stmtblock_t * pre, stmtblock_t * post, gfc_ss * ss,
1003 tree eltype, tree initial, bool dynamic,
1004 bool dealloc, bool callee_alloc, locus * where)
1005 {
1006 gfc_loopinfo *loop;
1007 gfc_ss *s;
1008 gfc_array_info *info;
1009 tree from[GFC_MAX_DIMENSIONS], to[GFC_MAX_DIMENSIONS];
1010 tree type;
1011 tree desc;
1012 tree tmp;
1013 tree size;
1014 tree nelem;
1015 tree cond;
1016 tree or_expr;
1017 tree class_expr = NULL_TREE;
1018 int n, dim, tmp_dim;
1019 int total_dim = 0;
1020
1021 /* This signals a class array for which we need the size of the
1022 dynamic type. Generate an eltype and then the class expression. */
1023 if (eltype == NULL_TREE && initial)
1024 {
1025 gcc_assert (POINTER_TYPE_P (TREE_TYPE (initial)));
1026 class_expr = build_fold_indirect_ref_loc (input_location, initial);
1027 eltype = TREE_TYPE (class_expr);
1028 eltype = gfc_get_element_type (eltype);
1029 /* Obtain the structure (class) expression. */
1030 class_expr = TREE_OPERAND (class_expr, 0);
1031 gcc_assert (class_expr);
1032 }
1033
1034 memset (from, 0, sizeof (from));
1035 memset (to, 0, sizeof (to));
1036
1037 info = &ss->info->data.array;
1038
1039 gcc_assert (ss->dimen > 0);
1040 gcc_assert (ss->loop->dimen == ss->dimen);
1041
1042 if (warn_array_temporaries && where)
1043 gfc_warning (OPT_Warray_temporaries,
1044 "Creating array temporary at %L", where);
1045
1046 /* Set the lower bound to zero. */
1047 for (s = ss; s; s = s->parent)
1048 {
1049 loop = s->loop;
1050
1051 total_dim += loop->dimen;
1052 for (n = 0; n < loop->dimen; n++)
1053 {
1054 dim = s->dim[n];
1055
1056 /* Callee allocated arrays may not have a known bound yet. */
1057 if (loop->to[n])
1058 loop->to[n] = gfc_evaluate_now (
1059 fold_build2_loc (input_location, MINUS_EXPR,
1060 gfc_array_index_type,
1061 loop->to[n], loop->from[n]),
1062 pre);
1063 loop->from[n] = gfc_index_zero_node;
1064
1065 /* We have just changed the loop bounds, we must clear the
1066 corresponding specloop, so that delta calculation is not skipped
1067 later in gfc_set_delta. */
1068 loop->specloop[n] = NULL;
1069
1070 /* We are constructing the temporary's descriptor based on the loop
1071 dimensions. As the dimensions may be accessed in arbitrary order
1072 (think of transpose) the size taken from the n'th loop may not map
1073 to the n'th dimension of the array. We need to reconstruct loop
1074 infos in the right order before using it to set the descriptor
1075 bounds. */
1076 tmp_dim = get_scalarizer_dim_for_array_dim (ss, dim);
1077 from[tmp_dim] = loop->from[n];
1078 to[tmp_dim] = loop->to[n];
1079
1080 info->delta[dim] = gfc_index_zero_node;
1081 info->start[dim] = gfc_index_zero_node;
1082 info->end[dim] = gfc_index_zero_node;
1083 info->stride[dim] = gfc_index_one_node;
1084 }
1085 }
1086
1087 /* Initialize the descriptor. */
1088 type =
1089 gfc_get_array_type_bounds (eltype, total_dim, 0, from, to, 1,
1090 GFC_ARRAY_UNKNOWN, true);
1091 desc = gfc_create_var (type, "atmp");
1092 GFC_DECL_PACKED_ARRAY (desc) = 1;
1093
1094 info->descriptor = desc;
1095 size = gfc_index_one_node;
1096
1097 /* Fill in the array dtype. */
1098 tmp = gfc_conv_descriptor_dtype (desc);
1099 gfc_add_modify (pre, tmp, gfc_get_dtype (TREE_TYPE (desc)));
1100
1101 /*
1102 Fill in the bounds and stride. This is a packed array, so:
1103
1104 size = 1;
1105 for (n = 0; n < rank; n++)
1106 {
1107 stride[n] = size
1108 delta = ubound[n] + 1 - lbound[n];
1109 size = size * delta;
1110 }
1111 size = size * sizeof(element);
1112 */
1113
1114 or_expr = NULL_TREE;
1115
1116 /* If there is at least one null loop->to[n], it is a callee allocated
1117 array. */
1118 for (n = 0; n < total_dim; n++)
1119 if (to[n] == NULL_TREE)
1120 {
1121 size = NULL_TREE;
1122 break;
1123 }
1124
1125 if (size == NULL_TREE)
1126 for (s = ss; s; s = s->parent)
1127 for (n = 0; n < s->loop->dimen; n++)
1128 {
1129 dim = get_scalarizer_dim_for_array_dim (ss, s->dim[n]);
1130
1131 /* For a callee allocated array express the loop bounds in terms
1132 of the descriptor fields. */
1133 tmp = fold_build2_loc (input_location,
1134 MINUS_EXPR, gfc_array_index_type,
1135 gfc_conv_descriptor_ubound_get (desc, gfc_rank_cst[dim]),
1136 gfc_conv_descriptor_lbound_get (desc, gfc_rank_cst[dim]));
1137 s->loop->to[n] = tmp;
1138 }
1139 else
1140 {
1141 for (n = 0; n < total_dim; n++)
1142 {
1143 /* Store the stride and bound components in the descriptor. */
1144 gfc_conv_descriptor_stride_set (pre, desc, gfc_rank_cst[n], size);
1145
1146 gfc_conv_descriptor_lbound_set (pre, desc, gfc_rank_cst[n],
1147 gfc_index_zero_node);
1148
1149 gfc_conv_descriptor_ubound_set (pre, desc, gfc_rank_cst[n], to[n]);
1150
1151 tmp = fold_build2_loc (input_location, PLUS_EXPR,
1152 gfc_array_index_type,
1153 to[n], gfc_index_one_node);
1154
1155 /* Check whether the size for this dimension is negative. */
1156 cond = fold_build2_loc (input_location, LE_EXPR, boolean_type_node,
1157 tmp, gfc_index_zero_node);
1158 cond = gfc_evaluate_now (cond, pre);
1159
1160 if (n == 0)
1161 or_expr = cond;
1162 else
1163 or_expr = fold_build2_loc (input_location, TRUTH_OR_EXPR,
1164 boolean_type_node, or_expr, cond);
1165
1166 size = fold_build2_loc (input_location, MULT_EXPR,
1167 gfc_array_index_type, size, tmp);
1168 size = gfc_evaluate_now (size, pre);
1169 }
1170 }
1171
1172 /* Get the size of the array. */
1173 if (size && !callee_alloc)
1174 {
1175 tree elemsize;
1176 /* If or_expr is true, then the extent in at least one
1177 dimension is zero and the size is set to zero. */
1178 size = fold_build3_loc (input_location, COND_EXPR, gfc_array_index_type,
1179 or_expr, gfc_index_zero_node, size);
1180
1181 nelem = size;
1182 if (class_expr == NULL_TREE)
1183 elemsize = fold_convert (gfc_array_index_type,
1184 TYPE_SIZE_UNIT (gfc_get_element_type (type)));
1185 else
1186 elemsize = gfc_class_vtab_size_get (class_expr);
1187
1188 size = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
1189 size, elemsize);
1190 }
1191 else
1192 {
1193 nelem = size;
1194 size = NULL_TREE;
1195 }
1196
1197 gfc_trans_allocate_array_storage (pre, post, info, size, nelem, initial,
1198 dynamic, dealloc);
1199
1200 while (ss->parent)
1201 ss = ss->parent;
1202
1203 if (ss->dimen > ss->loop->temp_dim)
1204 ss->loop->temp_dim = ss->dimen;
1205
1206 return size;
1207 }
1208
1209
1210 /* Return the number of iterations in a loop that starts at START,
1211 ends at END, and has step STEP. */
1212
1213 static tree
1214 gfc_get_iteration_count (tree start, tree end, tree step)
1215 {
1216 tree tmp;
1217 tree type;
1218
1219 type = TREE_TYPE (step);
1220 tmp = fold_build2_loc (input_location, MINUS_EXPR, type, end, start);
1221 tmp = fold_build2_loc (input_location, FLOOR_DIV_EXPR, type, tmp, step);
1222 tmp = fold_build2_loc (input_location, PLUS_EXPR, type, tmp,
1223 build_int_cst (type, 1));
1224 tmp = fold_build2_loc (input_location, MAX_EXPR, type, tmp,
1225 build_int_cst (type, 0));
1226 return fold_convert (gfc_array_index_type, tmp);
1227 }
1228
1229
1230 /* Extend the data in array DESC by EXTRA elements. */
1231
1232 static void
1233 gfc_grow_array (stmtblock_t * pblock, tree desc, tree extra)
1234 {
1235 tree arg0, arg1;
1236 tree tmp;
1237 tree size;
1238 tree ubound;
1239
1240 if (integer_zerop (extra))
1241 return;
1242
1243 ubound = gfc_conv_descriptor_ubound_get (desc, gfc_rank_cst[0]);
1244
1245 /* Add EXTRA to the upper bound. */
1246 tmp = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
1247 ubound, extra);
1248 gfc_conv_descriptor_ubound_set (pblock, desc, gfc_rank_cst[0], tmp);
1249
1250 /* Get the value of the current data pointer. */
1251 arg0 = gfc_conv_descriptor_data_get (desc);
1252
1253 /* Calculate the new array size. */
1254 size = TYPE_SIZE_UNIT (gfc_get_element_type (TREE_TYPE (desc)));
1255 tmp = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
1256 ubound, gfc_index_one_node);
1257 arg1 = fold_build2_loc (input_location, MULT_EXPR, size_type_node,
1258 fold_convert (size_type_node, tmp),
1259 fold_convert (size_type_node, size));
1260
1261 /* Call the realloc() function. */
1262 tmp = gfc_call_realloc (pblock, arg0, arg1);
1263 gfc_conv_descriptor_data_set (pblock, desc, tmp);
1264 }
1265
1266
1267 /* Return true if the bounds of iterator I can only be determined
1268 at run time. */
1269
1270 static inline bool
1271 gfc_iterator_has_dynamic_bounds (gfc_iterator * i)
1272 {
1273 return (i->start->expr_type != EXPR_CONSTANT
1274 || i->end->expr_type != EXPR_CONSTANT
1275 || i->step->expr_type != EXPR_CONSTANT);
1276 }
1277
1278
1279 /* Split the size of constructor element EXPR into the sum of two terms,
1280 one of which can be determined at compile time and one of which must
1281 be calculated at run time. Set *SIZE to the former and return true
1282 if the latter might be nonzero. */
1283
1284 static bool
1285 gfc_get_array_constructor_element_size (mpz_t * size, gfc_expr * expr)
1286 {
1287 if (expr->expr_type == EXPR_ARRAY)
1288 return gfc_get_array_constructor_size (size, expr->value.constructor);
1289 else if (expr->rank > 0)
1290 {
1291 /* Calculate everything at run time. */
1292 mpz_set_ui (*size, 0);
1293 return true;
1294 }
1295 else
1296 {
1297 /* A single element. */
1298 mpz_set_ui (*size, 1);
1299 return false;
1300 }
1301 }
1302
1303
1304 /* Like gfc_get_array_constructor_element_size, but applied to the whole
1305 of array constructor C. */
1306
1307 static bool
1308 gfc_get_array_constructor_size (mpz_t * size, gfc_constructor_base base)
1309 {
1310 gfc_constructor *c;
1311 gfc_iterator *i;
1312 mpz_t val;
1313 mpz_t len;
1314 bool dynamic;
1315
1316 mpz_set_ui (*size, 0);
1317 mpz_init (len);
1318 mpz_init (val);
1319
1320 dynamic = false;
1321 for (c = gfc_constructor_first (base); c; c = gfc_constructor_next (c))
1322 {
1323 i = c->iterator;
1324 if (i && gfc_iterator_has_dynamic_bounds (i))
1325 dynamic = true;
1326 else
1327 {
1328 dynamic |= gfc_get_array_constructor_element_size (&len, c->expr);
1329 if (i)
1330 {
1331 /* Multiply the static part of the element size by the
1332 number of iterations. */
1333 mpz_sub (val, i->end->value.integer, i->start->value.integer);
1334 mpz_fdiv_q (val, val, i->step->value.integer);
1335 mpz_add_ui (val, val, 1);
1336 if (mpz_sgn (val) > 0)
1337 mpz_mul (len, len, val);
1338 else
1339 mpz_set_ui (len, 0);
1340 }
1341 mpz_add (*size, *size, len);
1342 }
1343 }
1344 mpz_clear (len);
1345 mpz_clear (val);
1346 return dynamic;
1347 }
1348
1349
1350 /* Make sure offset is a variable. */
1351
1352 static void
1353 gfc_put_offset_into_var (stmtblock_t * pblock, tree * poffset,
1354 tree * offsetvar)
1355 {
1356 /* We should have already created the offset variable. We cannot
1357 create it here because we may be in an inner scope. */
1358 gcc_assert (*offsetvar != NULL_TREE);
1359 gfc_add_modify (pblock, *offsetvar, *poffset);
1360 *poffset = *offsetvar;
1361 TREE_USED (*offsetvar) = 1;
1362 }
1363
1364
1365 /* Variables needed for bounds-checking. */
1366 static bool first_len;
1367 static tree first_len_val;
1368 static bool typespec_chararray_ctor;
1369
1370 static void
1371 gfc_trans_array_ctor_element (stmtblock_t * pblock, tree desc,
1372 tree offset, gfc_se * se, gfc_expr * expr)
1373 {
1374 tree tmp;
1375
1376 gfc_conv_expr (se, expr);
1377
1378 /* Store the value. */
1379 tmp = build_fold_indirect_ref_loc (input_location,
1380 gfc_conv_descriptor_data_get (desc));
1381 tmp = gfc_build_array_ref (tmp, offset, NULL);
1382
1383 if (expr->ts.type == BT_CHARACTER)
1384 {
1385 int i = gfc_validate_kind (BT_CHARACTER, expr->ts.kind, false);
1386 tree esize;
1387
1388 esize = size_in_bytes (gfc_get_element_type (TREE_TYPE (desc)));
1389 esize = fold_convert (gfc_charlen_type_node, esize);
1390 esize = fold_build2_loc (input_location, TRUNC_DIV_EXPR,
1391 gfc_charlen_type_node, esize,
1392 build_int_cst (gfc_charlen_type_node,
1393 gfc_character_kinds[i].bit_size / 8));
1394
1395 gfc_conv_string_parameter (se);
1396 if (POINTER_TYPE_P (TREE_TYPE (tmp)))
1397 {
1398 /* The temporary is an array of pointers. */
1399 se->expr = fold_convert (TREE_TYPE (tmp), se->expr);
1400 gfc_add_modify (&se->pre, tmp, se->expr);
1401 }
1402 else
1403 {
1404 /* The temporary is an array of string values. */
1405 tmp = gfc_build_addr_expr (gfc_get_pchar_type (expr->ts.kind), tmp);
1406 /* We know the temporary and the value will be the same length,
1407 so can use memcpy. */
1408 gfc_trans_string_copy (&se->pre, esize, tmp, expr->ts.kind,
1409 se->string_length, se->expr, expr->ts.kind);
1410 }
1411 if ((gfc_option.rtcheck & GFC_RTCHECK_BOUNDS) && !typespec_chararray_ctor)
1412 {
1413 if (first_len)
1414 {
1415 gfc_add_modify (&se->pre, first_len_val,
1416 se->string_length);
1417 first_len = false;
1418 }
1419 else
1420 {
1421 /* Verify that all constructor elements are of the same
1422 length. */
1423 tree cond = fold_build2_loc (input_location, NE_EXPR,
1424 boolean_type_node, first_len_val,
1425 se->string_length);
1426 gfc_trans_runtime_check
1427 (true, false, cond, &se->pre, &expr->where,
1428 "Different CHARACTER lengths (%ld/%ld) in array constructor",
1429 fold_convert (long_integer_type_node, first_len_val),
1430 fold_convert (long_integer_type_node, se->string_length));
1431 }
1432 }
1433 }
1434 else
1435 {
1436 /* TODO: Should the frontend already have done this conversion? */
1437 se->expr = fold_convert (TREE_TYPE (tmp), se->expr);
1438 gfc_add_modify (&se->pre, tmp, se->expr);
1439 }
1440
1441 gfc_add_block_to_block (pblock, &se->pre);
1442 gfc_add_block_to_block (pblock, &se->post);
1443 }
1444
1445
1446 /* Add the contents of an array to the constructor. DYNAMIC is as for
1447 gfc_trans_array_constructor_value. */
1448
1449 static void
1450 gfc_trans_array_constructor_subarray (stmtblock_t * pblock,
1451 tree type ATTRIBUTE_UNUSED,
1452 tree desc, gfc_expr * expr,
1453 tree * poffset, tree * offsetvar,
1454 bool dynamic)
1455 {
1456 gfc_se se;
1457 gfc_ss *ss;
1458 gfc_loopinfo loop;
1459 stmtblock_t body;
1460 tree tmp;
1461 tree size;
1462 int n;
1463
1464 /* We need this to be a variable so we can increment it. */
1465 gfc_put_offset_into_var (pblock, poffset, offsetvar);
1466
1467 gfc_init_se (&se, NULL);
1468
1469 /* Walk the array expression. */
1470 ss = gfc_walk_expr (expr);
1471 gcc_assert (ss != gfc_ss_terminator);
1472
1473 /* Initialize the scalarizer. */
1474 gfc_init_loopinfo (&loop);
1475 gfc_add_ss_to_loop (&loop, ss);
1476
1477 /* Initialize the loop. */
1478 gfc_conv_ss_startstride (&loop);
1479 gfc_conv_loop_setup (&loop, &expr->where);
1480
1481 /* Make sure the constructed array has room for the new data. */
1482 if (dynamic)
1483 {
1484 /* Set SIZE to the total number of elements in the subarray. */
1485 size = gfc_index_one_node;
1486 for (n = 0; n < loop.dimen; n++)
1487 {
1488 tmp = gfc_get_iteration_count (loop.from[n], loop.to[n],
1489 gfc_index_one_node);
1490 size = fold_build2_loc (input_location, MULT_EXPR,
1491 gfc_array_index_type, size, tmp);
1492 }
1493
1494 /* Grow the constructed array by SIZE elements. */
1495 gfc_grow_array (&loop.pre, desc, size);
1496 }
1497
1498 /* Make the loop body. */
1499 gfc_mark_ss_chain_used (ss, 1);
1500 gfc_start_scalarized_body (&loop, &body);
1501 gfc_copy_loopinfo_to_se (&se, &loop);
1502 se.ss = ss;
1503
1504 gfc_trans_array_ctor_element (&body, desc, *poffset, &se, expr);
1505 gcc_assert (se.ss == gfc_ss_terminator);
1506
1507 /* Increment the offset. */
1508 tmp = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
1509 *poffset, gfc_index_one_node);
1510 gfc_add_modify (&body, *poffset, tmp);
1511
1512 /* Finish the loop. */
1513 gfc_trans_scalarizing_loops (&loop, &body);
1514 gfc_add_block_to_block (&loop.pre, &loop.post);
1515 tmp = gfc_finish_block (&loop.pre);
1516 gfc_add_expr_to_block (pblock, tmp);
1517
1518 gfc_cleanup_loop (&loop);
1519 }
1520
1521
1522 /* Assign the values to the elements of an array constructor. DYNAMIC
1523 is true if descriptor DESC only contains enough data for the static
1524 size calculated by gfc_get_array_constructor_size. When true, memory
1525 for the dynamic parts must be allocated using realloc. */
1526
1527 static void
1528 gfc_trans_array_constructor_value (stmtblock_t * pblock, tree type,
1529 tree desc, gfc_constructor_base base,
1530 tree * poffset, tree * offsetvar,
1531 bool dynamic)
1532 {
1533 tree tmp;
1534 tree start = NULL_TREE;
1535 tree end = NULL_TREE;
1536 tree step = NULL_TREE;
1537 stmtblock_t body;
1538 gfc_se se;
1539 mpz_t size;
1540 gfc_constructor *c;
1541
1542 tree shadow_loopvar = NULL_TREE;
1543 gfc_saved_var saved_loopvar;
1544
1545 mpz_init (size);
1546 for (c = gfc_constructor_first (base); c; c = gfc_constructor_next (c))
1547 {
1548 /* If this is an iterator or an array, the offset must be a variable. */
1549 if ((c->iterator || c->expr->rank > 0) && INTEGER_CST_P (*poffset))
1550 gfc_put_offset_into_var (pblock, poffset, offsetvar);
1551
1552 /* Shadowing the iterator avoids changing its value and saves us from
1553 keeping track of it. Further, it makes sure that there's always a
1554 backend-decl for the symbol, even if there wasn't one before,
1555 e.g. in the case of an iterator that appears in a specification
1556 expression in an interface mapping. */
1557 if (c->iterator)
1558 {
1559 gfc_symbol *sym;
1560 tree type;
1561
1562 /* Evaluate loop bounds before substituting the loop variable
1563 in case they depend on it. Such a case is invalid, but it is
1564 not more expensive to do the right thing here.
1565 See PR 44354. */
1566 gfc_init_se (&se, NULL);
1567 gfc_conv_expr_val (&se, c->iterator->start);
1568 gfc_add_block_to_block (pblock, &se.pre);
1569 start = gfc_evaluate_now (se.expr, pblock);
1570
1571 gfc_init_se (&se, NULL);
1572 gfc_conv_expr_val (&se, c->iterator->end);
1573 gfc_add_block_to_block (pblock, &se.pre);
1574 end = gfc_evaluate_now (se.expr, pblock);
1575
1576 gfc_init_se (&se, NULL);
1577 gfc_conv_expr_val (&se, c->iterator->step);
1578 gfc_add_block_to_block (pblock, &se.pre);
1579 step = gfc_evaluate_now (se.expr, pblock);
1580
1581 sym = c->iterator->var->symtree->n.sym;
1582 type = gfc_typenode_for_spec (&sym->ts);
1583
1584 shadow_loopvar = gfc_create_var (type, "shadow_loopvar");
1585 gfc_shadow_sym (sym, shadow_loopvar, &saved_loopvar);
1586 }
1587
1588 gfc_start_block (&body);
1589
1590 if (c->expr->expr_type == EXPR_ARRAY)
1591 {
1592 /* Array constructors can be nested. */
1593 gfc_trans_array_constructor_value (&body, type, desc,
1594 c->expr->value.constructor,
1595 poffset, offsetvar, dynamic);
1596 }
1597 else if (c->expr->rank > 0)
1598 {
1599 gfc_trans_array_constructor_subarray (&body, type, desc, c->expr,
1600 poffset, offsetvar, dynamic);
1601 }
1602 else
1603 {
1604 /* This code really upsets the gimplifier so don't bother for now. */
1605 gfc_constructor *p;
1606 HOST_WIDE_INT n;
1607 HOST_WIDE_INT size;
1608
1609 p = c;
1610 n = 0;
1611 while (p && !(p->iterator || p->expr->expr_type != EXPR_CONSTANT))
1612 {
1613 p = gfc_constructor_next (p);
1614 n++;
1615 }
1616 if (n < 4)
1617 {
1618 /* Scalar values. */
1619 gfc_init_se (&se, NULL);
1620 gfc_trans_array_ctor_element (&body, desc, *poffset,
1621 &se, c->expr);
1622
1623 *poffset = fold_build2_loc (input_location, PLUS_EXPR,
1624 gfc_array_index_type,
1625 *poffset, gfc_index_one_node);
1626 }
1627 else
1628 {
1629 /* Collect multiple scalar constants into a constructor. */
1630 vec<constructor_elt, va_gc> *v = NULL;
1631 tree init;
1632 tree bound;
1633 tree tmptype;
1634 HOST_WIDE_INT idx = 0;
1635
1636 p = c;
1637 /* Count the number of consecutive scalar constants. */
1638 while (p && !(p->iterator
1639 || p->expr->expr_type != EXPR_CONSTANT))
1640 {
1641 gfc_init_se (&se, NULL);
1642 gfc_conv_constant (&se, p->expr);
1643
1644 if (c->expr->ts.type != BT_CHARACTER)
1645 se.expr = fold_convert (type, se.expr);
1646 /* For constant character array constructors we build
1647 an array of pointers. */
1648 else if (POINTER_TYPE_P (type))
1649 se.expr = gfc_build_addr_expr
1650 (gfc_get_pchar_type (p->expr->ts.kind),
1651 se.expr);
1652
1653 CONSTRUCTOR_APPEND_ELT (v,
1654 build_int_cst (gfc_array_index_type,
1655 idx++),
1656 se.expr);
1657 c = p;
1658 p = gfc_constructor_next (p);
1659 }
1660
1661 bound = size_int (n - 1);
1662 /* Create an array type to hold them. */
1663 tmptype = build_range_type (gfc_array_index_type,
1664 gfc_index_zero_node, bound);
1665 tmptype = build_array_type (type, tmptype);
1666
1667 init = build_constructor (tmptype, v);
1668 TREE_CONSTANT (init) = 1;
1669 TREE_STATIC (init) = 1;
1670 /* Create a static variable to hold the data. */
1671 tmp = gfc_create_var (tmptype, "data");
1672 TREE_STATIC (tmp) = 1;
1673 TREE_CONSTANT (tmp) = 1;
1674 TREE_READONLY (tmp) = 1;
1675 DECL_INITIAL (tmp) = init;
1676 init = tmp;
1677
1678 /* Use BUILTIN_MEMCPY to assign the values. */
1679 tmp = gfc_conv_descriptor_data_get (desc);
1680 tmp = build_fold_indirect_ref_loc (input_location,
1681 tmp);
1682 tmp = gfc_build_array_ref (tmp, *poffset, NULL);
1683 tmp = gfc_build_addr_expr (NULL_TREE, tmp);
1684 init = gfc_build_addr_expr (NULL_TREE, init);
1685
1686 size = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (type));
1687 bound = build_int_cst (size_type_node, n * size);
1688 tmp = build_call_expr_loc (input_location,
1689 builtin_decl_explicit (BUILT_IN_MEMCPY),
1690 3, tmp, init, bound);
1691 gfc_add_expr_to_block (&body, tmp);
1692
1693 *poffset = fold_build2_loc (input_location, PLUS_EXPR,
1694 gfc_array_index_type, *poffset,
1695 build_int_cst (gfc_array_index_type, n));
1696 }
1697 if (!INTEGER_CST_P (*poffset))
1698 {
1699 gfc_add_modify (&body, *offsetvar, *poffset);
1700 *poffset = *offsetvar;
1701 }
1702 }
1703
1704 /* The frontend should already have done any expansions
1705 at compile-time. */
1706 if (!c->iterator)
1707 {
1708 /* Pass the code as is. */
1709 tmp = gfc_finish_block (&body);
1710 gfc_add_expr_to_block (pblock, tmp);
1711 }
1712 else
1713 {
1714 /* Build the implied do-loop. */
1715 stmtblock_t implied_do_block;
1716 tree cond;
1717 tree exit_label;
1718 tree loopbody;
1719 tree tmp2;
1720
1721 loopbody = gfc_finish_block (&body);
1722
1723 /* Create a new block that holds the implied-do loop. A temporary
1724 loop-variable is used. */
1725 gfc_start_block(&implied_do_block);
1726
1727 /* Initialize the loop. */
1728 gfc_add_modify (&implied_do_block, shadow_loopvar, start);
1729
1730 /* If this array expands dynamically, and the number of iterations
1731 is not constant, we won't have allocated space for the static
1732 part of C->EXPR's size. Do that now. */
1733 if (dynamic && gfc_iterator_has_dynamic_bounds (c->iterator))
1734 {
1735 /* Get the number of iterations. */
1736 tmp = gfc_get_iteration_count (shadow_loopvar, end, step);
1737
1738 /* Get the static part of C->EXPR's size. */
1739 gfc_get_array_constructor_element_size (&size, c->expr);
1740 tmp2 = gfc_conv_mpz_to_tree (size, gfc_index_integer_kind);
1741
1742 /* Grow the array by TMP * TMP2 elements. */
1743 tmp = fold_build2_loc (input_location, MULT_EXPR,
1744 gfc_array_index_type, tmp, tmp2);
1745 gfc_grow_array (&implied_do_block, desc, tmp);
1746 }
1747
1748 /* Generate the loop body. */
1749 exit_label = gfc_build_label_decl (NULL_TREE);
1750 gfc_start_block (&body);
1751
1752 /* Generate the exit condition. Depending on the sign of
1753 the step variable we have to generate the correct
1754 comparison. */
1755 tmp = fold_build2_loc (input_location, GT_EXPR, boolean_type_node,
1756 step, build_int_cst (TREE_TYPE (step), 0));
1757 cond = fold_build3_loc (input_location, COND_EXPR,
1758 boolean_type_node, tmp,
1759 fold_build2_loc (input_location, GT_EXPR,
1760 boolean_type_node, shadow_loopvar, end),
1761 fold_build2_loc (input_location, LT_EXPR,
1762 boolean_type_node, shadow_loopvar, end));
1763 tmp = build1_v (GOTO_EXPR, exit_label);
1764 TREE_USED (exit_label) = 1;
1765 tmp = build3_v (COND_EXPR, cond, tmp,
1766 build_empty_stmt (input_location));
1767 gfc_add_expr_to_block (&body, tmp);
1768
1769 /* The main loop body. */
1770 gfc_add_expr_to_block (&body, loopbody);
1771
1772 /* Increase loop variable by step. */
1773 tmp = fold_build2_loc (input_location, PLUS_EXPR,
1774 TREE_TYPE (shadow_loopvar), shadow_loopvar,
1775 step);
1776 gfc_add_modify (&body, shadow_loopvar, tmp);
1777
1778 /* Finish the loop. */
1779 tmp = gfc_finish_block (&body);
1780 tmp = build1_v (LOOP_EXPR, tmp);
1781 gfc_add_expr_to_block (&implied_do_block, tmp);
1782
1783 /* Add the exit label. */
1784 tmp = build1_v (LABEL_EXPR, exit_label);
1785 gfc_add_expr_to_block (&implied_do_block, tmp);
1786
1787 /* Finish the implied-do loop. */
1788 tmp = gfc_finish_block(&implied_do_block);
1789 gfc_add_expr_to_block(pblock, tmp);
1790
1791 gfc_restore_sym (c->iterator->var->symtree->n.sym, &saved_loopvar);
1792 }
1793 }
1794 mpz_clear (size);
1795 }
1796
1797
1798 /* The array constructor code can create a string length with an operand
1799 in the form of a temporary variable. This variable will retain its
1800 context (current_function_decl). If we store this length tree in a
1801 gfc_charlen structure which is shared by a variable in another
1802 context, the resulting gfc_charlen structure with a variable in a
1803 different context, we could trip the assertion in expand_expr_real_1
1804 when it sees that a variable has been created in one context and
1805 referenced in another.
1806
1807 If this might be the case, we create a new gfc_charlen structure and
1808 link it into the current namespace. */
1809
1810 static void
1811 store_backend_decl (gfc_charlen **clp, tree len, bool force_new_cl)
1812 {
1813 if (force_new_cl)
1814 {
1815 gfc_charlen *new_cl = gfc_new_charlen (gfc_current_ns, *clp);
1816 *clp = new_cl;
1817 }
1818 (*clp)->backend_decl = len;
1819 }
1820
1821 /* A catch-all to obtain the string length for anything that is not
1822 a substring of non-constant length, a constant, array or variable. */
1823
1824 static void
1825 get_array_ctor_all_strlen (stmtblock_t *block, gfc_expr *e, tree *len)
1826 {
1827 gfc_se se;
1828
1829 /* Don't bother if we already know the length is a constant. */
1830 if (*len && INTEGER_CST_P (*len))
1831 return;
1832
1833 if (!e->ref && e->ts.u.cl && e->ts.u.cl->length
1834 && e->ts.u.cl->length->expr_type == EXPR_CONSTANT)
1835 {
1836 /* This is easy. */
1837 gfc_conv_const_charlen (e->ts.u.cl);
1838 *len = e->ts.u.cl->backend_decl;
1839 }
1840 else
1841 {
1842 /* Otherwise, be brutal even if inefficient. */
1843 gfc_init_se (&se, NULL);
1844
1845 /* No function call, in case of side effects. */
1846 se.no_function_call = 1;
1847 if (e->rank == 0)
1848 gfc_conv_expr (&se, e);
1849 else
1850 gfc_conv_expr_descriptor (&se, e);
1851
1852 /* Fix the value. */
1853 *len = gfc_evaluate_now (se.string_length, &se.pre);
1854
1855 gfc_add_block_to_block (block, &se.pre);
1856 gfc_add_block_to_block (block, &se.post);
1857
1858 store_backend_decl (&e->ts.u.cl, *len, true);
1859 }
1860 }
1861
1862
1863 /* Figure out the string length of a variable reference expression.
1864 Used by get_array_ctor_strlen. */
1865
1866 static void
1867 get_array_ctor_var_strlen (stmtblock_t *block, gfc_expr * expr, tree * len)
1868 {
1869 gfc_ref *ref;
1870 gfc_typespec *ts;
1871 mpz_t char_len;
1872
1873 /* Don't bother if we already know the length is a constant. */
1874 if (*len && INTEGER_CST_P (*len))
1875 return;
1876
1877 ts = &expr->symtree->n.sym->ts;
1878 for (ref = expr->ref; ref; ref = ref->next)
1879 {
1880 switch (ref->type)
1881 {
1882 case REF_ARRAY:
1883 /* Array references don't change the string length. */
1884 break;
1885
1886 case REF_COMPONENT:
1887 /* Use the length of the component. */
1888 ts = &ref->u.c.component->ts;
1889 break;
1890
1891 case REF_SUBSTRING:
1892 if (ref->u.ss.start->expr_type != EXPR_CONSTANT
1893 || ref->u.ss.end->expr_type != EXPR_CONSTANT)
1894 {
1895 /* Note that this might evaluate expr. */
1896 get_array_ctor_all_strlen (block, expr, len);
1897 return;
1898 }
1899 mpz_init_set_ui (char_len, 1);
1900 mpz_add (char_len, char_len, ref->u.ss.end->value.integer);
1901 mpz_sub (char_len, char_len, ref->u.ss.start->value.integer);
1902 *len = gfc_conv_mpz_to_tree (char_len, gfc_default_integer_kind);
1903 *len = convert (gfc_charlen_type_node, *len);
1904 mpz_clear (char_len);
1905 return;
1906
1907 default:
1908 gcc_unreachable ();
1909 }
1910 }
1911
1912 *len = ts->u.cl->backend_decl;
1913 }
1914
1915
1916 /* Figure out the string length of a character array constructor.
1917 If len is NULL, don't calculate the length; this happens for recursive calls
1918 when a sub-array-constructor is an element but not at the first position,
1919 so when we're not interested in the length.
1920 Returns TRUE if all elements are character constants. */
1921
1922 bool
1923 get_array_ctor_strlen (stmtblock_t *block, gfc_constructor_base base, tree * len)
1924 {
1925 gfc_constructor *c;
1926 bool is_const;
1927
1928 is_const = TRUE;
1929
1930 if (gfc_constructor_first (base) == NULL)
1931 {
1932 if (len)
1933 *len = build_int_cstu (gfc_charlen_type_node, 0);
1934 return is_const;
1935 }
1936
1937 /* Loop over all constructor elements to find out is_const, but in len we
1938 want to store the length of the first, not the last, element. We can
1939 of course exit the loop as soon as is_const is found to be false. */
1940 for (c = gfc_constructor_first (base);
1941 c && is_const; c = gfc_constructor_next (c))
1942 {
1943 switch (c->expr->expr_type)
1944 {
1945 case EXPR_CONSTANT:
1946 if (len && !(*len && INTEGER_CST_P (*len)))
1947 *len = build_int_cstu (gfc_charlen_type_node,
1948 c->expr->value.character.length);
1949 break;
1950
1951 case EXPR_ARRAY:
1952 if (!get_array_ctor_strlen (block, c->expr->value.constructor, len))
1953 is_const = false;
1954 break;
1955
1956 case EXPR_VARIABLE:
1957 is_const = false;
1958 if (len)
1959 get_array_ctor_var_strlen (block, c->expr, len);
1960 break;
1961
1962 default:
1963 is_const = false;
1964 if (len)
1965 get_array_ctor_all_strlen (block, c->expr, len);
1966 break;
1967 }
1968
1969 /* After the first iteration, we don't want the length modified. */
1970 len = NULL;
1971 }
1972
1973 return is_const;
1974 }
1975
1976 /* Check whether the array constructor C consists entirely of constant
1977 elements, and if so returns the number of those elements, otherwise
1978 return zero. Note, an empty or NULL array constructor returns zero. */
1979
1980 unsigned HOST_WIDE_INT
1981 gfc_constant_array_constructor_p (gfc_constructor_base base)
1982 {
1983 unsigned HOST_WIDE_INT nelem = 0;
1984
1985 gfc_constructor *c = gfc_constructor_first (base);
1986 while (c)
1987 {
1988 if (c->iterator
1989 || c->expr->rank > 0
1990 || c->expr->expr_type != EXPR_CONSTANT)
1991 return 0;
1992 c = gfc_constructor_next (c);
1993 nelem++;
1994 }
1995 return nelem;
1996 }
1997
1998
1999 /* Given EXPR, the constant array constructor specified by an EXPR_ARRAY,
2000 and the tree type of it's elements, TYPE, return a static constant
2001 variable that is compile-time initialized. */
2002
2003 tree
2004 gfc_build_constant_array_constructor (gfc_expr * expr, tree type)
2005 {
2006 tree tmptype, init, tmp;
2007 HOST_WIDE_INT nelem;
2008 gfc_constructor *c;
2009 gfc_array_spec as;
2010 gfc_se se;
2011 int i;
2012 vec<constructor_elt, va_gc> *v = NULL;
2013
2014 /* First traverse the constructor list, converting the constants
2015 to tree to build an initializer. */
2016 nelem = 0;
2017 c = gfc_constructor_first (expr->value.constructor);
2018 while (c)
2019 {
2020 gfc_init_se (&se, NULL);
2021 gfc_conv_constant (&se, c->expr);
2022 if (c->expr->ts.type != BT_CHARACTER)
2023 se.expr = fold_convert (type, se.expr);
2024 else if (POINTER_TYPE_P (type))
2025 se.expr = gfc_build_addr_expr (gfc_get_pchar_type (c->expr->ts.kind),
2026 se.expr);
2027 CONSTRUCTOR_APPEND_ELT (v, build_int_cst (gfc_array_index_type, nelem),
2028 se.expr);
2029 c = gfc_constructor_next (c);
2030 nelem++;
2031 }
2032
2033 /* Next determine the tree type for the array. We use the gfortran
2034 front-end's gfc_get_nodesc_array_type in order to create a suitable
2035 GFC_ARRAY_TYPE_P that may be used by the scalarizer. */
2036
2037 memset (&as, 0, sizeof (gfc_array_spec));
2038
2039 as.rank = expr->rank;
2040 as.type = AS_EXPLICIT;
2041 if (!expr->shape)
2042 {
2043 as.lower[0] = gfc_get_int_expr (gfc_default_integer_kind, NULL, 0);
2044 as.upper[0] = gfc_get_int_expr (gfc_default_integer_kind,
2045 NULL, nelem - 1);
2046 }
2047 else
2048 for (i = 0; i < expr->rank; i++)
2049 {
2050 int tmp = (int) mpz_get_si (expr->shape[i]);
2051 as.lower[i] = gfc_get_int_expr (gfc_default_integer_kind, NULL, 0);
2052 as.upper[i] = gfc_get_int_expr (gfc_default_integer_kind,
2053 NULL, tmp - 1);
2054 }
2055
2056 tmptype = gfc_get_nodesc_array_type (type, &as, PACKED_STATIC, true);
2057
2058 /* as is not needed anymore. */
2059 for (i = 0; i < as.rank + as.corank; i++)
2060 {
2061 gfc_free_expr (as.lower[i]);
2062 gfc_free_expr (as.upper[i]);
2063 }
2064
2065 init = build_constructor (tmptype, v);
2066
2067 TREE_CONSTANT (init) = 1;
2068 TREE_STATIC (init) = 1;
2069
2070 tmp = build_decl (input_location, VAR_DECL, create_tmp_var_name ("A"),
2071 tmptype);
2072 DECL_ARTIFICIAL (tmp) = 1;
2073 DECL_IGNORED_P (tmp) = 1;
2074 TREE_STATIC (tmp) = 1;
2075 TREE_CONSTANT (tmp) = 1;
2076 TREE_READONLY (tmp) = 1;
2077 DECL_INITIAL (tmp) = init;
2078 pushdecl (tmp);
2079
2080 return tmp;
2081 }
2082
2083
2084 /* Translate a constant EXPR_ARRAY array constructor for the scalarizer.
2085 This mostly initializes the scalarizer state info structure with the
2086 appropriate values to directly use the array created by the function
2087 gfc_build_constant_array_constructor. */
2088
2089 static void
2090 trans_constant_array_constructor (gfc_ss * ss, tree type)
2091 {
2092 gfc_array_info *info;
2093 tree tmp;
2094 int i;
2095
2096 tmp = gfc_build_constant_array_constructor (ss->info->expr, type);
2097
2098 info = &ss->info->data.array;
2099
2100 info->descriptor = tmp;
2101 info->data = gfc_build_addr_expr (NULL_TREE, tmp);
2102 info->offset = gfc_index_zero_node;
2103
2104 for (i = 0; i < ss->dimen; i++)
2105 {
2106 info->delta[i] = gfc_index_zero_node;
2107 info->start[i] = gfc_index_zero_node;
2108 info->end[i] = gfc_index_zero_node;
2109 info->stride[i] = gfc_index_one_node;
2110 }
2111 }
2112
2113
2114 static int
2115 get_rank (gfc_loopinfo *loop)
2116 {
2117 int rank;
2118
2119 rank = 0;
2120 for (; loop; loop = loop->parent)
2121 rank += loop->dimen;
2122
2123 return rank;
2124 }
2125
2126
2127 /* Helper routine of gfc_trans_array_constructor to determine if the
2128 bounds of the loop specified by LOOP are constant and simple enough
2129 to use with trans_constant_array_constructor. Returns the
2130 iteration count of the loop if suitable, and NULL_TREE otherwise. */
2131
2132 static tree
2133 constant_array_constructor_loop_size (gfc_loopinfo * l)
2134 {
2135 gfc_loopinfo *loop;
2136 tree size = gfc_index_one_node;
2137 tree tmp;
2138 int i, total_dim;
2139
2140 total_dim = get_rank (l);
2141
2142 for (loop = l; loop; loop = loop->parent)
2143 {
2144 for (i = 0; i < loop->dimen; i++)
2145 {
2146 /* If the bounds aren't constant, return NULL_TREE. */
2147 if (!INTEGER_CST_P (loop->from[i]) || !INTEGER_CST_P (loop->to[i]))
2148 return NULL_TREE;
2149 if (!integer_zerop (loop->from[i]))
2150 {
2151 /* Only allow nonzero "from" in one-dimensional arrays. */
2152 if (total_dim != 1)
2153 return NULL_TREE;
2154 tmp = fold_build2_loc (input_location, MINUS_EXPR,
2155 gfc_array_index_type,
2156 loop->to[i], loop->from[i]);
2157 }
2158 else
2159 tmp = loop->to[i];
2160 tmp = fold_build2_loc (input_location, PLUS_EXPR,
2161 gfc_array_index_type, tmp, gfc_index_one_node);
2162 size = fold_build2_loc (input_location, MULT_EXPR,
2163 gfc_array_index_type, size, tmp);
2164 }
2165 }
2166
2167 return size;
2168 }
2169
2170
2171 static tree *
2172 get_loop_upper_bound_for_array (gfc_ss *array, int array_dim)
2173 {
2174 gfc_ss *ss;
2175 int n;
2176
2177 gcc_assert (array->nested_ss == NULL);
2178
2179 for (ss = array; ss; ss = ss->parent)
2180 for (n = 0; n < ss->loop->dimen; n++)
2181 if (array_dim == get_array_ref_dim_for_loop_dim (ss, n))
2182 return &(ss->loop->to[n]);
2183
2184 gcc_unreachable ();
2185 }
2186
2187
2188 static gfc_loopinfo *
2189 outermost_loop (gfc_loopinfo * loop)
2190 {
2191 while (loop->parent != NULL)
2192 loop = loop->parent;
2193
2194 return loop;
2195 }
2196
2197
2198 /* Array constructors are handled by constructing a temporary, then using that
2199 within the scalarization loop. This is not optimal, but seems by far the
2200 simplest method. */
2201
2202 static void
2203 trans_array_constructor (gfc_ss * ss, locus * where)
2204 {
2205 gfc_constructor_base c;
2206 tree offset;
2207 tree offsetvar;
2208 tree desc;
2209 tree type;
2210 tree tmp;
2211 tree *loop_ubound0;
2212 bool dynamic;
2213 bool old_first_len, old_typespec_chararray_ctor;
2214 tree old_first_len_val;
2215 gfc_loopinfo *loop, *outer_loop;
2216 gfc_ss_info *ss_info;
2217 gfc_expr *expr;
2218 gfc_ss *s;
2219
2220 /* Save the old values for nested checking. */
2221 old_first_len = first_len;
2222 old_first_len_val = first_len_val;
2223 old_typespec_chararray_ctor = typespec_chararray_ctor;
2224
2225 loop = ss->loop;
2226 outer_loop = outermost_loop (loop);
2227 ss_info = ss->info;
2228 expr = ss_info->expr;
2229
2230 /* Do bounds-checking here and in gfc_trans_array_ctor_element only if no
2231 typespec was given for the array constructor. */
2232 typespec_chararray_ctor = (expr->ts.u.cl
2233 && expr->ts.u.cl->length_from_typespec);
2234
2235 if ((gfc_option.rtcheck & GFC_RTCHECK_BOUNDS)
2236 && expr->ts.type == BT_CHARACTER && !typespec_chararray_ctor)
2237 {
2238 first_len_val = gfc_create_var (gfc_charlen_type_node, "len");
2239 first_len = true;
2240 }
2241
2242 gcc_assert (ss->dimen == ss->loop->dimen);
2243
2244 c = expr->value.constructor;
2245 if (expr->ts.type == BT_CHARACTER)
2246 {
2247 bool const_string;
2248 bool force_new_cl = false;
2249
2250 /* get_array_ctor_strlen walks the elements of the constructor, if a
2251 typespec was given, we already know the string length and want the one
2252 specified there. */
2253 if (typespec_chararray_ctor && expr->ts.u.cl->length
2254 && expr->ts.u.cl->length->expr_type != EXPR_CONSTANT)
2255 {
2256 gfc_se length_se;
2257
2258 const_string = false;
2259 gfc_init_se (&length_se, NULL);
2260 gfc_conv_expr_type (&length_se, expr->ts.u.cl->length,
2261 gfc_charlen_type_node);
2262 ss_info->string_length = length_se.expr;
2263 gfc_add_block_to_block (&outer_loop->pre, &length_se.pre);
2264 gfc_add_block_to_block (&outer_loop->post, &length_se.post);
2265 }
2266 else
2267 {
2268 const_string = get_array_ctor_strlen (&outer_loop->pre, c,
2269 &ss_info->string_length);
2270 force_new_cl = true;
2271 }
2272
2273 /* Complex character array constructors should have been taken care of
2274 and not end up here. */
2275 gcc_assert (ss_info->string_length);
2276
2277 store_backend_decl (&expr->ts.u.cl, ss_info->string_length, force_new_cl);
2278
2279 type = gfc_get_character_type_len (expr->ts.kind, ss_info->string_length);
2280 if (const_string)
2281 type = build_pointer_type (type);
2282 }
2283 else
2284 type = gfc_typenode_for_spec (&expr->ts);
2285
2286 /* See if the constructor determines the loop bounds. */
2287 dynamic = false;
2288
2289 loop_ubound0 = get_loop_upper_bound_for_array (ss, 0);
2290
2291 if (expr->shape && get_rank (loop) > 1 && *loop_ubound0 == NULL_TREE)
2292 {
2293 /* We have a multidimensional parameter. */
2294 for (s = ss; s; s = s->parent)
2295 {
2296 int n;
2297 for (n = 0; n < s->loop->dimen; n++)
2298 {
2299 s->loop->from[n] = gfc_index_zero_node;
2300 s->loop->to[n] = gfc_conv_mpz_to_tree (expr->shape[s->dim[n]],
2301 gfc_index_integer_kind);
2302 s->loop->to[n] = fold_build2_loc (input_location, MINUS_EXPR,
2303 gfc_array_index_type,
2304 s->loop->to[n],
2305 gfc_index_one_node);
2306 }
2307 }
2308 }
2309
2310 if (*loop_ubound0 == NULL_TREE)
2311 {
2312 mpz_t size;
2313
2314 /* We should have a 1-dimensional, zero-based loop. */
2315 gcc_assert (loop->parent == NULL && loop->nested == NULL);
2316 gcc_assert (loop->dimen == 1);
2317 gcc_assert (integer_zerop (loop->from[0]));
2318
2319 /* Split the constructor size into a static part and a dynamic part.
2320 Allocate the static size up-front and record whether the dynamic
2321 size might be nonzero. */
2322 mpz_init (size);
2323 dynamic = gfc_get_array_constructor_size (&size, c);
2324 mpz_sub_ui (size, size, 1);
2325 loop->to[0] = gfc_conv_mpz_to_tree (size, gfc_index_integer_kind);
2326 mpz_clear (size);
2327 }
2328
2329 /* Special case constant array constructors. */
2330 if (!dynamic)
2331 {
2332 unsigned HOST_WIDE_INT nelem = gfc_constant_array_constructor_p (c);
2333 if (nelem > 0)
2334 {
2335 tree size = constant_array_constructor_loop_size (loop);
2336 if (size && compare_tree_int (size, nelem) == 0)
2337 {
2338 trans_constant_array_constructor (ss, type);
2339 goto finish;
2340 }
2341 }
2342 }
2343
2344 gfc_trans_create_temp_array (&outer_loop->pre, &outer_loop->post, ss, type,
2345 NULL_TREE, dynamic, true, false, where);
2346
2347 desc = ss_info->data.array.descriptor;
2348 offset = gfc_index_zero_node;
2349 offsetvar = gfc_create_var_np (gfc_array_index_type, "offset");
2350 TREE_NO_WARNING (offsetvar) = 1;
2351 TREE_USED (offsetvar) = 0;
2352 gfc_trans_array_constructor_value (&outer_loop->pre, type, desc, c,
2353 &offset, &offsetvar, dynamic);
2354
2355 /* If the array grows dynamically, the upper bound of the loop variable
2356 is determined by the array's final upper bound. */
2357 if (dynamic)
2358 {
2359 tmp = fold_build2_loc (input_location, MINUS_EXPR,
2360 gfc_array_index_type,
2361 offsetvar, gfc_index_one_node);
2362 tmp = gfc_evaluate_now (tmp, &outer_loop->pre);
2363 gfc_conv_descriptor_ubound_set (&loop->pre, desc, gfc_rank_cst[0], tmp);
2364 if (*loop_ubound0 && TREE_CODE (*loop_ubound0) == VAR_DECL)
2365 gfc_add_modify (&outer_loop->pre, *loop_ubound0, tmp);
2366 else
2367 *loop_ubound0 = tmp;
2368 }
2369
2370 if (TREE_USED (offsetvar))
2371 pushdecl (offsetvar);
2372 else
2373 gcc_assert (INTEGER_CST_P (offset));
2374
2375 #if 0
2376 /* Disable bound checking for now because it's probably broken. */
2377 if (gfc_option.rtcheck & GFC_RTCHECK_BOUNDS)
2378 {
2379 gcc_unreachable ();
2380 }
2381 #endif
2382
2383 finish:
2384 /* Restore old values of globals. */
2385 first_len = old_first_len;
2386 first_len_val = old_first_len_val;
2387 typespec_chararray_ctor = old_typespec_chararray_ctor;
2388 }
2389
2390
2391 /* INFO describes a GFC_SS_SECTION in loop LOOP, and this function is
2392 called after evaluating all of INFO's vector dimensions. Go through
2393 each such vector dimension and see if we can now fill in any missing
2394 loop bounds. */
2395
2396 static void
2397 set_vector_loop_bounds (gfc_ss * ss)
2398 {
2399 gfc_loopinfo *loop, *outer_loop;
2400 gfc_array_info *info;
2401 gfc_se se;
2402 tree tmp;
2403 tree desc;
2404 tree zero;
2405 int n;
2406 int dim;
2407
2408 outer_loop = outermost_loop (ss->loop);
2409
2410 info = &ss->info->data.array;
2411
2412 for (; ss; ss = ss->parent)
2413 {
2414 loop = ss->loop;
2415
2416 for (n = 0; n < loop->dimen; n++)
2417 {
2418 dim = ss->dim[n];
2419 if (info->ref->u.ar.dimen_type[dim] != DIMEN_VECTOR
2420 || loop->to[n] != NULL)
2421 continue;
2422
2423 /* Loop variable N indexes vector dimension DIM, and we don't
2424 yet know the upper bound of loop variable N. Set it to the
2425 difference between the vector's upper and lower bounds. */
2426 gcc_assert (loop->from[n] == gfc_index_zero_node);
2427 gcc_assert (info->subscript[dim]
2428 && info->subscript[dim]->info->type == GFC_SS_VECTOR);
2429
2430 gfc_init_se (&se, NULL);
2431 desc = info->subscript[dim]->info->data.array.descriptor;
2432 zero = gfc_rank_cst[0];
2433 tmp = fold_build2_loc (input_location, MINUS_EXPR,
2434 gfc_array_index_type,
2435 gfc_conv_descriptor_ubound_get (desc, zero),
2436 gfc_conv_descriptor_lbound_get (desc, zero));
2437 tmp = gfc_evaluate_now (tmp, &outer_loop->pre);
2438 loop->to[n] = tmp;
2439 }
2440 }
2441 }
2442
2443
2444 /* Tells whether a scalar argument to an elemental procedure is saved out
2445 of a scalarization loop as a value or as a reference. */
2446
2447 bool
2448 gfc_scalar_elemental_arg_saved_as_reference (gfc_ss_info * ss_info)
2449 {
2450 if (ss_info->type != GFC_SS_REFERENCE)
2451 return false;
2452
2453 /* If the actual argument can be absent (in other words, it can
2454 be a NULL reference), don't try to evaluate it; pass instead
2455 the reference directly. */
2456 if (ss_info->can_be_null_ref)
2457 return true;
2458
2459 /* If the expression is of polymorphic type, it's actual size is not known,
2460 so we avoid copying it anywhere. */
2461 if (ss_info->data.scalar.dummy_arg
2462 && ss_info->data.scalar.dummy_arg->ts.type == BT_CLASS
2463 && ss_info->expr->ts.type == BT_CLASS)
2464 return true;
2465
2466 /* If the expression is a data reference of aggregate type,
2467 avoid a copy by saving a reference to the content. */
2468 if (ss_info->expr->expr_type == EXPR_VARIABLE
2469 && (ss_info->expr->ts.type == BT_DERIVED
2470 || ss_info->expr->ts.type == BT_CLASS))
2471 return true;
2472
2473 /* Otherwise the expression is evaluated to a temporary variable before the
2474 scalarization loop. */
2475 return false;
2476 }
2477
2478
2479 /* Add the pre and post chains for all the scalar expressions in a SS chain
2480 to loop. This is called after the loop parameters have been calculated,
2481 but before the actual scalarizing loops. */
2482
2483 static void
2484 gfc_add_loop_ss_code (gfc_loopinfo * loop, gfc_ss * ss, bool subscript,
2485 locus * where)
2486 {
2487 gfc_loopinfo *nested_loop, *outer_loop;
2488 gfc_se se;
2489 gfc_ss_info *ss_info;
2490 gfc_array_info *info;
2491 gfc_expr *expr;
2492 int n;
2493
2494 /* Don't evaluate the arguments for realloc_lhs_loop_for_fcn_call; otherwise,
2495 arguments could get evaluated multiple times. */
2496 if (ss->is_alloc_lhs)
2497 return;
2498
2499 outer_loop = outermost_loop (loop);
2500
2501 /* TODO: This can generate bad code if there are ordering dependencies,
2502 e.g., a callee allocated function and an unknown size constructor. */
2503 gcc_assert (ss != NULL);
2504
2505 for (; ss != gfc_ss_terminator; ss = ss->loop_chain)
2506 {
2507 gcc_assert (ss);
2508
2509 /* Cross loop arrays are handled from within the most nested loop. */
2510 if (ss->nested_ss != NULL)
2511 continue;
2512
2513 ss_info = ss->info;
2514 expr = ss_info->expr;
2515 info = &ss_info->data.array;
2516
2517 switch (ss_info->type)
2518 {
2519 case GFC_SS_SCALAR:
2520 /* Scalar expression. Evaluate this now. This includes elemental
2521 dimension indices, but not array section bounds. */
2522 gfc_init_se (&se, NULL);
2523 gfc_conv_expr (&se, expr);
2524 gfc_add_block_to_block (&outer_loop->pre, &se.pre);
2525
2526 if (expr->ts.type != BT_CHARACTER
2527 && !gfc_is_alloc_class_scalar_function (expr))
2528 {
2529 /* Move the evaluation of scalar expressions outside the
2530 scalarization loop, except for WHERE assignments. */
2531 if (subscript)
2532 se.expr = convert(gfc_array_index_type, se.expr);
2533 if (!ss_info->where)
2534 se.expr = gfc_evaluate_now (se.expr, &outer_loop->pre);
2535 gfc_add_block_to_block (&outer_loop->pre, &se.post);
2536 }
2537 else
2538 gfc_add_block_to_block (&outer_loop->post, &se.post);
2539
2540 ss_info->data.scalar.value = se.expr;
2541 ss_info->string_length = se.string_length;
2542 break;
2543
2544 case GFC_SS_REFERENCE:
2545 /* Scalar argument to elemental procedure. */
2546 gfc_init_se (&se, NULL);
2547 if (gfc_scalar_elemental_arg_saved_as_reference (ss_info))
2548 gfc_conv_expr_reference (&se, expr);
2549 else
2550 {
2551 /* Evaluate the argument outside the loop and pass
2552 a reference to the value. */
2553 gfc_conv_expr (&se, expr);
2554 }
2555
2556 /* Ensure that a pointer to the string is stored. */
2557 if (expr->ts.type == BT_CHARACTER)
2558 gfc_conv_string_parameter (&se);
2559
2560 gfc_add_block_to_block (&outer_loop->pre, &se.pre);
2561 gfc_add_block_to_block (&outer_loop->post, &se.post);
2562 if (gfc_is_class_scalar_expr (expr))
2563 /* This is necessary because the dynamic type will always be
2564 large than the declared type. In consequence, assigning
2565 the value to a temporary could segfault.
2566 OOP-TODO: see if this is generally correct or is the value
2567 has to be written to an allocated temporary, whose address
2568 is passed via ss_info. */
2569 ss_info->data.scalar.value = se.expr;
2570 else
2571 ss_info->data.scalar.value = gfc_evaluate_now (se.expr,
2572 &outer_loop->pre);
2573
2574 ss_info->string_length = se.string_length;
2575 break;
2576
2577 case GFC_SS_SECTION:
2578 /* Add the expressions for scalar and vector subscripts. */
2579 for (n = 0; n < GFC_MAX_DIMENSIONS; n++)
2580 if (info->subscript[n])
2581 gfc_add_loop_ss_code (loop, info->subscript[n], true, where);
2582
2583 set_vector_loop_bounds (ss);
2584 break;
2585
2586 case GFC_SS_VECTOR:
2587 /* Get the vector's descriptor and store it in SS. */
2588 gfc_init_se (&se, NULL);
2589 gfc_conv_expr_descriptor (&se, expr);
2590 gfc_add_block_to_block (&outer_loop->pre, &se.pre);
2591 gfc_add_block_to_block (&outer_loop->post, &se.post);
2592 info->descriptor = se.expr;
2593 break;
2594
2595 case GFC_SS_INTRINSIC:
2596 gfc_add_intrinsic_ss_code (loop, ss);
2597 break;
2598
2599 case GFC_SS_FUNCTION:
2600 /* Array function return value. We call the function and save its
2601 result in a temporary for use inside the loop. */
2602 gfc_init_se (&se, NULL);
2603 se.loop = loop;
2604 se.ss = ss;
2605 gfc_conv_expr (&se, expr);
2606 gfc_add_block_to_block (&outer_loop->pre, &se.pre);
2607 gfc_add_block_to_block (&outer_loop->post, &se.post);
2608 ss_info->string_length = se.string_length;
2609 break;
2610
2611 case GFC_SS_CONSTRUCTOR:
2612 if (expr->ts.type == BT_CHARACTER
2613 && ss_info->string_length == NULL
2614 && expr->ts.u.cl
2615 && expr->ts.u.cl->length
2616 && expr->ts.u.cl->length->expr_type == EXPR_CONSTANT)
2617 {
2618 gfc_init_se (&se, NULL);
2619 gfc_conv_expr_type (&se, expr->ts.u.cl->length,
2620 gfc_charlen_type_node);
2621 ss_info->string_length = se.expr;
2622 gfc_add_block_to_block (&outer_loop->pre, &se.pre);
2623 gfc_add_block_to_block (&outer_loop->post, &se.post);
2624 }
2625 trans_array_constructor (ss, where);
2626 break;
2627
2628 case GFC_SS_TEMP:
2629 case GFC_SS_COMPONENT:
2630 /* Do nothing. These are handled elsewhere. */
2631 break;
2632
2633 default:
2634 gcc_unreachable ();
2635 }
2636 }
2637
2638 if (!subscript)
2639 for (nested_loop = loop->nested; nested_loop;
2640 nested_loop = nested_loop->next)
2641 gfc_add_loop_ss_code (nested_loop, nested_loop->ss, subscript, where);
2642 }
2643
2644
2645 /* Translate expressions for the descriptor and data pointer of a SS. */
2646 /*GCC ARRAYS*/
2647
2648 static void
2649 gfc_conv_ss_descriptor (stmtblock_t * block, gfc_ss * ss, int base)
2650 {
2651 gfc_se se;
2652 gfc_ss_info *ss_info;
2653 gfc_array_info *info;
2654 tree tmp;
2655
2656 ss_info = ss->info;
2657 info = &ss_info->data.array;
2658
2659 /* Get the descriptor for the array to be scalarized. */
2660 gcc_assert (ss_info->expr->expr_type == EXPR_VARIABLE);
2661 gfc_init_se (&se, NULL);
2662 se.descriptor_only = 1;
2663 gfc_conv_expr_lhs (&se, ss_info->expr);
2664 gfc_add_block_to_block (block, &se.pre);
2665 info->descriptor = se.expr;
2666 ss_info->string_length = se.string_length;
2667
2668 if (base)
2669 {
2670 /* Also the data pointer. */
2671 tmp = gfc_conv_array_data (se.expr);
2672 /* If this is a variable or address of a variable we use it directly.
2673 Otherwise we must evaluate it now to avoid breaking dependency
2674 analysis by pulling the expressions for elemental array indices
2675 inside the loop. */
2676 if (!(DECL_P (tmp)
2677 || (TREE_CODE (tmp) == ADDR_EXPR
2678 && DECL_P (TREE_OPERAND (tmp, 0)))))
2679 tmp = gfc_evaluate_now (tmp, block);
2680 info->data = tmp;
2681
2682 tmp = gfc_conv_array_offset (se.expr);
2683 info->offset = gfc_evaluate_now (tmp, block);
2684
2685 /* Make absolutely sure that the saved_offset is indeed saved
2686 so that the variable is still accessible after the loops
2687 are translated. */
2688 info->saved_offset = info->offset;
2689 }
2690 }
2691
2692
2693 /* Initialize a gfc_loopinfo structure. */
2694
2695 void
2696 gfc_init_loopinfo (gfc_loopinfo * loop)
2697 {
2698 int n;
2699
2700 memset (loop, 0, sizeof (gfc_loopinfo));
2701 gfc_init_block (&loop->pre);
2702 gfc_init_block (&loop->post);
2703
2704 /* Initially scalarize in order and default to no loop reversal. */
2705 for (n = 0; n < GFC_MAX_DIMENSIONS; n++)
2706 {
2707 loop->order[n] = n;
2708 loop->reverse[n] = GFC_INHIBIT_REVERSE;
2709 }
2710
2711 loop->ss = gfc_ss_terminator;
2712 }
2713
2714
2715 /* Copies the loop variable info to a gfc_se structure. Does not copy the SS
2716 chain. */
2717
2718 void
2719 gfc_copy_loopinfo_to_se (gfc_se * se, gfc_loopinfo * loop)
2720 {
2721 se->loop = loop;
2722 }
2723
2724
2725 /* Return an expression for the data pointer of an array. */
2726
2727 tree
2728 gfc_conv_array_data (tree descriptor)
2729 {
2730 tree type;
2731
2732 type = TREE_TYPE (descriptor);
2733 if (GFC_ARRAY_TYPE_P (type))
2734 {
2735 if (TREE_CODE (type) == POINTER_TYPE)
2736 return descriptor;
2737 else
2738 {
2739 /* Descriptorless arrays. */
2740 return gfc_build_addr_expr (NULL_TREE, descriptor);
2741 }
2742 }
2743 else
2744 return gfc_conv_descriptor_data_get (descriptor);
2745 }
2746
2747
2748 /* Return an expression for the base offset of an array. */
2749
2750 tree
2751 gfc_conv_array_offset (tree descriptor)
2752 {
2753 tree type;
2754
2755 type = TREE_TYPE (descriptor);
2756 if (GFC_ARRAY_TYPE_P (type))
2757 return GFC_TYPE_ARRAY_OFFSET (type);
2758 else
2759 return gfc_conv_descriptor_offset_get (descriptor);
2760 }
2761
2762
2763 /* Get an expression for the array stride. */
2764
2765 tree
2766 gfc_conv_array_stride (tree descriptor, int dim)
2767 {
2768 tree tmp;
2769 tree type;
2770
2771 type = TREE_TYPE (descriptor);
2772
2773 /* For descriptorless arrays use the array size. */
2774 tmp = GFC_TYPE_ARRAY_STRIDE (type, dim);
2775 if (tmp != NULL_TREE)
2776 return tmp;
2777
2778 tmp = gfc_conv_descriptor_stride_get (descriptor, gfc_rank_cst[dim]);
2779 return tmp;
2780 }
2781
2782
2783 /* Like gfc_conv_array_stride, but for the lower bound. */
2784
2785 tree
2786 gfc_conv_array_lbound (tree descriptor, int dim)
2787 {
2788 tree tmp;
2789 tree type;
2790
2791 type = TREE_TYPE (descriptor);
2792
2793 tmp = GFC_TYPE_ARRAY_LBOUND (type, dim);
2794 if (tmp != NULL_TREE)
2795 return tmp;
2796
2797 tmp = gfc_conv_descriptor_lbound_get (descriptor, gfc_rank_cst[dim]);
2798 return tmp;
2799 }
2800
2801
2802 /* Like gfc_conv_array_stride, but for the upper bound. */
2803
2804 tree
2805 gfc_conv_array_ubound (tree descriptor, int dim)
2806 {
2807 tree tmp;
2808 tree type;
2809
2810 type = TREE_TYPE (descriptor);
2811
2812 tmp = GFC_TYPE_ARRAY_UBOUND (type, dim);
2813 if (tmp != NULL_TREE)
2814 return tmp;
2815
2816 /* This should only ever happen when passing an assumed shape array
2817 as an actual parameter. The value will never be used. */
2818 if (GFC_ARRAY_TYPE_P (TREE_TYPE (descriptor)))
2819 return gfc_index_zero_node;
2820
2821 tmp = gfc_conv_descriptor_ubound_get (descriptor, gfc_rank_cst[dim]);
2822 return tmp;
2823 }
2824
2825
2826 /* Generate code to perform an array index bound check. */
2827
2828 static tree
2829 trans_array_bound_check (gfc_se * se, gfc_ss *ss, tree index, int n,
2830 locus * where, bool check_upper)
2831 {
2832 tree fault;
2833 tree tmp_lo, tmp_up;
2834 tree descriptor;
2835 char *msg;
2836 const char * name = NULL;
2837
2838 if (!(gfc_option.rtcheck & GFC_RTCHECK_BOUNDS))
2839 return index;
2840
2841 descriptor = ss->info->data.array.descriptor;
2842
2843 index = gfc_evaluate_now (index, &se->pre);
2844
2845 /* We find a name for the error message. */
2846 name = ss->info->expr->symtree->n.sym->name;
2847 gcc_assert (name != NULL);
2848
2849 if (TREE_CODE (descriptor) == VAR_DECL)
2850 name = IDENTIFIER_POINTER (DECL_NAME (descriptor));
2851
2852 /* If upper bound is present, include both bounds in the error message. */
2853 if (check_upper)
2854 {
2855 tmp_lo = gfc_conv_array_lbound (descriptor, n);
2856 tmp_up = gfc_conv_array_ubound (descriptor, n);
2857
2858 if (name)
2859 msg = xasprintf ("Index '%%ld' of dimension %d of array '%s' "
2860 "outside of expected range (%%ld:%%ld)", n+1, name);
2861 else
2862 msg = xasprintf ("Index '%%ld' of dimension %d "
2863 "outside of expected range (%%ld:%%ld)", n+1);
2864
2865 fault = fold_build2_loc (input_location, LT_EXPR, boolean_type_node,
2866 index, tmp_lo);
2867 gfc_trans_runtime_check (true, false, fault, &se->pre, where, msg,
2868 fold_convert (long_integer_type_node, index),
2869 fold_convert (long_integer_type_node, tmp_lo),
2870 fold_convert (long_integer_type_node, tmp_up));
2871 fault = fold_build2_loc (input_location, GT_EXPR, boolean_type_node,
2872 index, tmp_up);
2873 gfc_trans_runtime_check (true, false, fault, &se->pre, where, msg,
2874 fold_convert (long_integer_type_node, index),
2875 fold_convert (long_integer_type_node, tmp_lo),
2876 fold_convert (long_integer_type_node, tmp_up));
2877 free (msg);
2878 }
2879 else
2880 {
2881 tmp_lo = gfc_conv_array_lbound (descriptor, n);
2882
2883 if (name)
2884 msg = xasprintf ("Index '%%ld' of dimension %d of array '%s' "
2885 "below lower bound of %%ld", n+1, name);
2886 else
2887 msg = xasprintf ("Index '%%ld' of dimension %d "
2888 "below lower bound of %%ld", n+1);
2889
2890 fault = fold_build2_loc (input_location, LT_EXPR, boolean_type_node,
2891 index, tmp_lo);
2892 gfc_trans_runtime_check (true, false, fault, &se->pre, where, msg,
2893 fold_convert (long_integer_type_node, index),
2894 fold_convert (long_integer_type_node, tmp_lo));
2895 free (msg);
2896 }
2897
2898 return index;
2899 }
2900
2901
2902 /* Return the offset for an index. Performs bound checking for elemental
2903 dimensions. Single element references are processed separately.
2904 DIM is the array dimension, I is the loop dimension. */
2905
2906 static tree
2907 conv_array_index_offset (gfc_se * se, gfc_ss * ss, int dim, int i,
2908 gfc_array_ref * ar, tree stride)
2909 {
2910 gfc_array_info *info;
2911 tree index;
2912 tree desc;
2913 tree data;
2914
2915 info = &ss->info->data.array;
2916
2917 /* Get the index into the array for this dimension. */
2918 if (ar)
2919 {
2920 gcc_assert (ar->type != AR_ELEMENT);
2921 switch (ar->dimen_type[dim])
2922 {
2923 case DIMEN_THIS_IMAGE:
2924 gcc_unreachable ();
2925 break;
2926 case DIMEN_ELEMENT:
2927 /* Elemental dimension. */
2928 gcc_assert (info->subscript[dim]
2929 && info->subscript[dim]->info->type == GFC_SS_SCALAR);
2930 /* We've already translated this value outside the loop. */
2931 index = info->subscript[dim]->info->data.scalar.value;
2932
2933 index = trans_array_bound_check (se, ss, index, dim, &ar->where,
2934 ar->as->type != AS_ASSUMED_SIZE
2935 || dim < ar->dimen - 1);
2936 break;
2937
2938 case DIMEN_VECTOR:
2939 gcc_assert (info && se->loop);
2940 gcc_assert (info->subscript[dim]
2941 && info->subscript[dim]->info->type == GFC_SS_VECTOR);
2942 desc = info->subscript[dim]->info->data.array.descriptor;
2943
2944 /* Get a zero-based index into the vector. */
2945 index = fold_build2_loc (input_location, MINUS_EXPR,
2946 gfc_array_index_type,
2947 se->loop->loopvar[i], se->loop->from[i]);
2948
2949 /* Multiply the index by the stride. */
2950 index = fold_build2_loc (input_location, MULT_EXPR,
2951 gfc_array_index_type,
2952 index, gfc_conv_array_stride (desc, 0));
2953
2954 /* Read the vector to get an index into info->descriptor. */
2955 data = build_fold_indirect_ref_loc (input_location,
2956 gfc_conv_array_data (desc));
2957 index = gfc_build_array_ref (data, index, NULL);
2958 index = gfc_evaluate_now (index, &se->pre);
2959 index = fold_convert (gfc_array_index_type, index);
2960
2961 /* Do any bounds checking on the final info->descriptor index. */
2962 index = trans_array_bound_check (se, ss, index, dim, &ar->where,
2963 ar->as->type != AS_ASSUMED_SIZE
2964 || dim < ar->dimen - 1);
2965 break;
2966
2967 case DIMEN_RANGE:
2968 /* Scalarized dimension. */
2969 gcc_assert (info && se->loop);
2970
2971 /* Multiply the loop variable by the stride and delta. */
2972 index = se->loop->loopvar[i];
2973 if (!integer_onep (info->stride[dim]))
2974 index = fold_build2_loc (input_location, MULT_EXPR,
2975 gfc_array_index_type, index,
2976 info->stride[dim]);
2977 if (!integer_zerop (info->delta[dim]))
2978 index = fold_build2_loc (input_location, PLUS_EXPR,
2979 gfc_array_index_type, index,
2980 info->delta[dim]);
2981 break;
2982
2983 default:
2984 gcc_unreachable ();
2985 }
2986 }
2987 else
2988 {
2989 /* Temporary array or derived type component. */
2990 gcc_assert (se->loop);
2991 index = se->loop->loopvar[se->loop->order[i]];
2992
2993 /* Pointer functions can have stride[0] different from unity.
2994 Use the stride returned by the function call and stored in
2995 the descriptor for the temporary. */
2996 if (se->ss && se->ss->info->type == GFC_SS_FUNCTION
2997 && se->ss->info->expr
2998 && se->ss->info->expr->symtree
2999 && se->ss->info->expr->symtree->n.sym->result
3000 && se->ss->info->expr->symtree->n.sym->result->attr.pointer)
3001 stride = gfc_conv_descriptor_stride_get (info->descriptor,
3002 gfc_rank_cst[dim]);
3003
3004 if (info->delta[dim] && !integer_zerop (info->delta[dim]))
3005 index = fold_build2_loc (input_location, PLUS_EXPR,
3006 gfc_array_index_type, index, info->delta[dim]);
3007 }
3008
3009 /* Multiply by the stride. */
3010 if (!integer_onep (stride))
3011 index = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
3012 index, stride);
3013
3014 return index;
3015 }
3016
3017
3018 /* Build a scalarized array reference using the vptr 'size'. */
3019
3020 static bool
3021 build_class_array_ref (gfc_se *se, tree base, tree index)
3022 {
3023 tree type;
3024 tree size;
3025 tree offset;
3026 tree decl;
3027 tree tmp;
3028 gfc_expr *expr = se->ss->info->expr;
3029 gfc_ref *ref;
3030 gfc_ref *class_ref;
3031 gfc_typespec *ts;
3032
3033 if (expr == NULL
3034 || (expr->ts.type != BT_CLASS
3035 && !gfc_is_alloc_class_array_function (expr)))
3036 return false;
3037
3038 if (expr->symtree && expr->symtree->n.sym->ts.type == BT_CLASS)
3039 ts = &expr->symtree->n.sym->ts;
3040 else
3041 ts = NULL;
3042 class_ref = NULL;
3043
3044 for (ref = expr->ref; ref; ref = ref->next)
3045 {
3046 if (ref->type == REF_COMPONENT
3047 && ref->u.c.component->ts.type == BT_CLASS
3048 && ref->next && ref->next->type == REF_COMPONENT
3049 && strcmp (ref->next->u.c.component->name, "_data") == 0
3050 && ref->next->next
3051 && ref->next->next->type == REF_ARRAY
3052 && ref->next->next->u.ar.type != AR_ELEMENT)
3053 {
3054 ts = &ref->u.c.component->ts;
3055 class_ref = ref;
3056 break;
3057 }
3058 }
3059
3060 if (ts == NULL)
3061 return false;
3062
3063 if (class_ref == NULL && expr->symtree->n.sym->attr.function
3064 && expr->symtree->n.sym == expr->symtree->n.sym->result)
3065 {
3066 gcc_assert (expr->symtree->n.sym->backend_decl == current_function_decl);
3067 decl = gfc_get_fake_result_decl (expr->symtree->n.sym, 0);
3068 }
3069 else if (gfc_is_alloc_class_array_function (expr))
3070 {
3071 size = NULL_TREE;
3072 decl = NULL_TREE;
3073 for (tmp = base; tmp; tmp = TREE_OPERAND (tmp, 0))
3074 {
3075 tree type;
3076 type = TREE_TYPE (tmp);
3077 while (type)
3078 {
3079 if (GFC_CLASS_TYPE_P (type))
3080 decl = tmp;
3081 if (type != TYPE_CANONICAL (type))
3082 type = TYPE_CANONICAL (type);
3083 else
3084 type = NULL_TREE;
3085 }
3086 if (TREE_CODE (tmp) == VAR_DECL)
3087 break;
3088 }
3089
3090 if (decl == NULL_TREE)
3091 return false;
3092 }
3093 else if (class_ref == NULL)
3094 {
3095 decl = expr->symtree->n.sym->backend_decl;
3096 /* For class arrays the tree containing the class is stored in
3097 GFC_DECL_SAVED_DESCRIPTOR of the sym's backend_decl.
3098 For all others it's sym's backend_decl directly. */
3099 if (DECL_LANG_SPECIFIC (decl) && GFC_DECL_SAVED_DESCRIPTOR (decl))
3100 decl = GFC_DECL_SAVED_DESCRIPTOR (decl);
3101 }
3102 else
3103 {
3104 /* Remove everything after the last class reference, convert the
3105 expression and then recover its tailend once more. */
3106 gfc_se tmpse;
3107 ref = class_ref->next;
3108 class_ref->next = NULL;
3109 gfc_init_se (&tmpse, NULL);
3110 gfc_conv_expr (&tmpse, expr);
3111 decl = tmpse.expr;
3112 class_ref->next = ref;
3113 }
3114
3115 if (POINTER_TYPE_P (TREE_TYPE (decl)))
3116 decl = build_fold_indirect_ref_loc (input_location, decl);
3117
3118 if (!GFC_CLASS_TYPE_P (TREE_TYPE (decl)))
3119 return false;
3120
3121 size = gfc_class_vtab_size_get (decl);
3122
3123 /* Build the address of the element. */
3124 type = TREE_TYPE (TREE_TYPE (base));
3125 size = fold_convert (TREE_TYPE (index), size);
3126 offset = fold_build2_loc (input_location, MULT_EXPR,
3127 gfc_array_index_type,
3128 index, size);
3129 tmp = gfc_build_addr_expr (pvoid_type_node, base);
3130 tmp = fold_build_pointer_plus_loc (input_location, tmp, offset);
3131 tmp = fold_convert (build_pointer_type (type), tmp);
3132
3133 /* Return the element in the se expression. */
3134 se->expr = build_fold_indirect_ref_loc (input_location, tmp);
3135 return true;
3136 }
3137
3138
3139 /* Build a scalarized reference to an array. */
3140
3141 static void
3142 gfc_conv_scalarized_array_ref (gfc_se * se, gfc_array_ref * ar)
3143 {
3144 gfc_array_info *info;
3145 tree decl = NULL_TREE;
3146 tree index;
3147 tree tmp;
3148 gfc_ss *ss;
3149 gfc_expr *expr;
3150 int n;
3151
3152 ss = se->ss;
3153 expr = ss->info->expr;
3154 info = &ss->info->data.array;
3155 if (ar)
3156 n = se->loop->order[0];
3157 else
3158 n = 0;
3159
3160 index = conv_array_index_offset (se, ss, ss->dim[n], n, ar, info->stride0);
3161 /* Add the offset for this dimension to the stored offset for all other
3162 dimensions. */
3163 if (info->offset && !integer_zerop (info->offset))
3164 index = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
3165 index, info->offset);
3166
3167 if (expr && (is_subref_array (expr)
3168 || (expr->ts.deferred && expr->expr_type == EXPR_VARIABLE)))
3169 decl = expr->symtree->n.sym->backend_decl;
3170
3171 tmp = build_fold_indirect_ref_loc (input_location, info->data);
3172
3173 /* Use the vptr 'size' field to access a class the element of a class
3174 array. */
3175 if (build_class_array_ref (se, tmp, index))
3176 return;
3177
3178 se->expr = gfc_build_array_ref (tmp, index, decl);
3179 }
3180
3181
3182 /* Translate access of temporary array. */
3183
3184 void
3185 gfc_conv_tmp_array_ref (gfc_se * se)
3186 {
3187 se->string_length = se->ss->info->string_length;
3188 gfc_conv_scalarized_array_ref (se, NULL);
3189 gfc_advance_se_ss_chain (se);
3190 }
3191
3192 /* Add T to the offset pair *OFFSET, *CST_OFFSET. */
3193
3194 static void
3195 add_to_offset (tree *cst_offset, tree *offset, tree t)
3196 {
3197 if (TREE_CODE (t) == INTEGER_CST)
3198 *cst_offset = int_const_binop (PLUS_EXPR, *cst_offset, t);
3199 else
3200 {
3201 if (!integer_zerop (*offset))
3202 *offset = fold_build2_loc (input_location, PLUS_EXPR,
3203 gfc_array_index_type, *offset, t);
3204 else
3205 *offset = t;
3206 }
3207 }
3208
3209
3210 static tree
3211 build_array_ref (tree desc, tree offset, tree decl, tree vptr)
3212 {
3213 tree tmp;
3214 tree type;
3215 tree cdecl;
3216 bool classarray = false;
3217
3218 /* For class arrays the class declaration is stored in the saved
3219 descriptor. */
3220 if (INDIRECT_REF_P (desc)
3221 && DECL_LANG_SPECIFIC (TREE_OPERAND (desc, 0))
3222 && GFC_DECL_SAVED_DESCRIPTOR (TREE_OPERAND (desc, 0)))
3223 cdecl = gfc_class_data_get (GFC_DECL_SAVED_DESCRIPTOR (
3224 TREE_OPERAND (desc, 0)));
3225 else
3226 cdecl = desc;
3227
3228 /* Class container types do not always have the GFC_CLASS_TYPE_P
3229 but the canonical type does. */
3230 if (GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (cdecl))
3231 && TREE_CODE (cdecl) == COMPONENT_REF)
3232 {
3233 type = TREE_TYPE (TREE_OPERAND (cdecl, 0));
3234 if (TYPE_CANONICAL (type)
3235 && GFC_CLASS_TYPE_P (TYPE_CANONICAL (type)))
3236 {
3237 type = TREE_TYPE (desc);
3238 classarray = true;
3239 }
3240 }
3241 else
3242 type = NULL;
3243
3244 /* Class array references need special treatment because the assigned
3245 type size needs to be used to point to the element. */
3246 if (classarray)
3247 {
3248 type = gfc_get_element_type (type);
3249 tmp = TREE_OPERAND (cdecl, 0);
3250 tmp = gfc_get_class_array_ref (offset, tmp, NULL_TREE);
3251 tmp = fold_convert (build_pointer_type (type), tmp);
3252 tmp = build_fold_indirect_ref_loc (input_location, tmp);
3253 return tmp;
3254 }
3255
3256 tmp = gfc_conv_array_data (desc);
3257 tmp = build_fold_indirect_ref_loc (input_location, tmp);
3258 tmp = gfc_build_array_ref (tmp, offset, decl, vptr);
3259 return tmp;
3260 }
3261
3262
3263 /* Build an array reference. se->expr already holds the array descriptor.
3264 This should be either a variable, indirect variable reference or component
3265 reference. For arrays which do not have a descriptor, se->expr will be
3266 the data pointer.
3267 a(i, j, k) = base[offset + i * stride[0] + j * stride[1] + k * stride[2]]*/
3268
3269 void
3270 gfc_conv_array_ref (gfc_se * se, gfc_array_ref * ar, gfc_expr *expr,
3271 locus * where)
3272 {
3273 int n;
3274 tree offset, cst_offset;
3275 tree tmp;
3276 tree stride;
3277 gfc_se indexse;
3278 gfc_se tmpse;
3279 gfc_symbol * sym = expr->symtree->n.sym;
3280 char *var_name = NULL;
3281
3282 if (ar->dimen == 0)
3283 {
3284 gcc_assert (ar->codimen);
3285
3286 if (GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (se->expr)))
3287 se->expr = build_fold_indirect_ref (gfc_conv_array_data (se->expr));
3288 else
3289 {
3290 if (GFC_ARRAY_TYPE_P (TREE_TYPE (se->expr))
3291 && TREE_CODE (TREE_TYPE (se->expr)) == POINTER_TYPE)
3292 se->expr = build_fold_indirect_ref_loc (input_location, se->expr);
3293
3294 /* Use the actual tree type and not the wrapped coarray. */
3295 if (!se->want_pointer)
3296 se->expr = fold_convert (TYPE_MAIN_VARIANT (TREE_TYPE (se->expr)),
3297 se->expr);
3298 }
3299
3300 return;
3301 }
3302
3303 /* Handle scalarized references separately. */
3304 if (ar->type != AR_ELEMENT)
3305 {
3306 gfc_conv_scalarized_array_ref (se, ar);
3307 gfc_advance_se_ss_chain (se);
3308 return;
3309 }
3310
3311 if (gfc_option.rtcheck & GFC_RTCHECK_BOUNDS)
3312 {
3313 size_t len;
3314 gfc_ref *ref;
3315
3316 len = strlen (sym->name) + 1;
3317 for (ref = expr->ref; ref; ref = ref->next)
3318 {
3319 if (ref->type == REF_ARRAY && &ref->u.ar == ar)
3320 break;
3321 if (ref->type == REF_COMPONENT)
3322 len += 1 + strlen (ref->u.c.component->name);
3323 }
3324
3325 var_name = XALLOCAVEC (char, len);
3326 strcpy (var_name, sym->name);
3327
3328 for (ref = expr->ref; ref; ref = ref->next)
3329 {
3330 if (ref->type == REF_ARRAY && &ref->u.ar == ar)
3331 break;
3332 if (ref->type == REF_COMPONENT)
3333 {
3334 strcat (var_name, "%%");
3335 strcat (var_name, ref->u.c.component->name);
3336 }
3337 }
3338 }
3339
3340 cst_offset = offset = gfc_index_zero_node;
3341 add_to_offset (&cst_offset, &offset, gfc_conv_array_offset (se->expr));
3342
3343 /* Calculate the offsets from all the dimensions. Make sure to associate
3344 the final offset so that we form a chain of loop invariant summands. */
3345 for (n = ar->dimen - 1; n >= 0; n--)
3346 {
3347 /* Calculate the index for this dimension. */
3348 gfc_init_se (&indexse, se);
3349 gfc_conv_expr_type (&indexse, ar->start[n], gfc_array_index_type);
3350 gfc_add_block_to_block (&se->pre, &indexse.pre);
3351
3352 if (gfc_option.rtcheck & GFC_RTCHECK_BOUNDS)
3353 {
3354 /* Check array bounds. */
3355 tree cond;
3356 char *msg;
3357
3358 /* Evaluate the indexse.expr only once. */
3359 indexse.expr = save_expr (indexse.expr);
3360
3361 /* Lower bound. */
3362 tmp = gfc_conv_array_lbound (se->expr, n);
3363 if (sym->attr.temporary)
3364 {
3365 gfc_init_se (&tmpse, se);
3366 gfc_conv_expr_type (&tmpse, ar->as->lower[n],
3367 gfc_array_index_type);
3368 gfc_add_block_to_block (&se->pre, &tmpse.pre);
3369 tmp = tmpse.expr;
3370 }
3371
3372 cond = fold_build2_loc (input_location, LT_EXPR, boolean_type_node,
3373 indexse.expr, tmp);
3374 msg = xasprintf ("Index '%%ld' of dimension %d of array '%s' "
3375 "below lower bound of %%ld", n+1, var_name);
3376 gfc_trans_runtime_check (true, false, cond, &se->pre, where, msg,
3377 fold_convert (long_integer_type_node,
3378 indexse.expr),
3379 fold_convert (long_integer_type_node, tmp));
3380 free (msg);
3381
3382 /* Upper bound, but not for the last dimension of assumed-size
3383 arrays. */
3384 if (n < ar->dimen - 1 || ar->as->type != AS_ASSUMED_SIZE)
3385 {
3386 tmp = gfc_conv_array_ubound (se->expr, n);
3387 if (sym->attr.temporary)
3388 {
3389 gfc_init_se (&tmpse, se);
3390 gfc_conv_expr_type (&tmpse, ar->as->upper[n],
3391 gfc_array_index_type);
3392 gfc_add_block_to_block (&se->pre, &tmpse.pre);
3393 tmp = tmpse.expr;
3394 }
3395
3396 cond = fold_build2_loc (input_location, GT_EXPR,
3397 boolean_type_node, indexse.expr, tmp);
3398 msg = xasprintf ("Index '%%ld' of dimension %d of array '%s' "
3399 "above upper bound of %%ld", n+1, var_name);
3400 gfc_trans_runtime_check (true, false, cond, &se->pre, where, msg,
3401 fold_convert (long_integer_type_node,
3402 indexse.expr),
3403 fold_convert (long_integer_type_node, tmp));
3404 free (msg);
3405 }
3406 }
3407
3408 /* Multiply the index by the stride. */
3409 stride = gfc_conv_array_stride (se->expr, n);
3410 tmp = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
3411 indexse.expr, stride);
3412
3413 /* And add it to the total. */
3414 add_to_offset (&cst_offset, &offset, tmp);
3415 }
3416
3417 if (!integer_zerop (cst_offset))
3418 offset = fold_build2_loc (input_location, PLUS_EXPR,
3419 gfc_array_index_type, offset, cst_offset);
3420
3421 se->expr = build_array_ref (se->expr, offset, sym->ts.type == BT_CLASS ?
3422 NULL_TREE : sym->backend_decl, se->class_vptr);
3423 }
3424
3425
3426 /* Add the offset corresponding to array's ARRAY_DIM dimension and loop's
3427 LOOP_DIM dimension (if any) to array's offset. */
3428
3429 static void
3430 add_array_offset (stmtblock_t *pblock, gfc_loopinfo *loop, gfc_ss *ss,
3431 gfc_array_ref *ar, int array_dim, int loop_dim)
3432 {
3433 gfc_se se;
3434 gfc_array_info *info;
3435 tree stride, index;
3436
3437 info = &ss->info->data.array;
3438
3439 gfc_init_se (&se, NULL);
3440 se.loop = loop;
3441 se.expr = info->descriptor;
3442 stride = gfc_conv_array_stride (info->descriptor, array_dim);
3443 index = conv_array_index_offset (&se, ss, array_dim, loop_dim, ar, stride);
3444 gfc_add_block_to_block (pblock, &se.pre);
3445
3446 info->offset = fold_build2_loc (input_location, PLUS_EXPR,
3447 gfc_array_index_type,
3448 info->offset, index);
3449 info->offset = gfc_evaluate_now (info->offset, pblock);
3450 }
3451
3452
3453 /* Generate the code to be executed immediately before entering a
3454 scalarization loop. */
3455
3456 static void
3457 gfc_trans_preloop_setup (gfc_loopinfo * loop, int dim, int flag,
3458 stmtblock_t * pblock)
3459 {
3460 tree stride;
3461 gfc_ss_info *ss_info;
3462 gfc_array_info *info;
3463 gfc_ss_type ss_type;
3464 gfc_ss *ss, *pss;
3465 gfc_loopinfo *ploop;
3466 gfc_array_ref *ar;
3467 int i;
3468
3469 /* This code will be executed before entering the scalarization loop
3470 for this dimension. */
3471 for (ss = loop->ss; ss != gfc_ss_terminator; ss = ss->loop_chain)
3472 {
3473 ss_info = ss->info;
3474
3475 if ((ss_info->useflags & flag) == 0)
3476 continue;
3477
3478 ss_type = ss_info->type;
3479 if (ss_type != GFC_SS_SECTION
3480 && ss_type != GFC_SS_FUNCTION
3481 && ss_type != GFC_SS_CONSTRUCTOR
3482 && ss_type != GFC_SS_COMPONENT)
3483 continue;
3484
3485 info = &ss_info->data.array;
3486
3487 gcc_assert (dim < ss->dimen);
3488 gcc_assert (ss->dimen == loop->dimen);
3489
3490 if (info->ref)
3491 ar = &info->ref->u.ar;
3492 else
3493 ar = NULL;
3494
3495 if (dim == loop->dimen - 1 && loop->parent != NULL)
3496 {
3497 /* If we are in the outermost dimension of this loop, the previous
3498 dimension shall be in the parent loop. */
3499 gcc_assert (ss->parent != NULL);
3500
3501 pss = ss->parent;
3502 ploop = loop->parent;
3503
3504 /* ss and ss->parent are about the same array. */
3505 gcc_assert (ss_info == pss->info);
3506 }
3507 else
3508 {
3509 ploop = loop;
3510 pss = ss;
3511 }
3512
3513 if (dim == loop->dimen - 1)
3514 i = 0;
3515 else
3516 i = dim + 1;
3517
3518 /* For the time being, there is no loop reordering. */
3519 gcc_assert (i == ploop->order[i]);
3520 i = ploop->order[i];
3521
3522 if (dim == loop->dimen - 1 && loop->parent == NULL)
3523 {
3524 stride = gfc_conv_array_stride (info->descriptor,
3525 innermost_ss (ss)->dim[i]);
3526
3527 /* Calculate the stride of the innermost loop. Hopefully this will
3528 allow the backend optimizers to do their stuff more effectively.
3529 */
3530 info->stride0 = gfc_evaluate_now (stride, pblock);
3531
3532 /* For the outermost loop calculate the offset due to any
3533 elemental dimensions. It will have been initialized with the
3534 base offset of the array. */
3535 if (info->ref)
3536 {
3537 for (i = 0; i < ar->dimen; i++)
3538 {
3539 if (ar->dimen_type[i] != DIMEN_ELEMENT)
3540 continue;
3541
3542 add_array_offset (pblock, loop, ss, ar, i, /* unused */ -1);
3543 }
3544 }
3545 }
3546 else
3547 /* Add the offset for the previous loop dimension. */
3548 add_array_offset (pblock, ploop, ss, ar, pss->dim[i], i);
3549
3550 /* Remember this offset for the second loop. */
3551 if (dim == loop->temp_dim - 1 && loop->parent == NULL)
3552 info->saved_offset = info->offset;
3553 }
3554 }
3555
3556
3557 /* Start a scalarized expression. Creates a scope and declares loop
3558 variables. */
3559
3560 void
3561 gfc_start_scalarized_body (gfc_loopinfo * loop, stmtblock_t * pbody)
3562 {
3563 int dim;
3564 int n;
3565 int flags;
3566
3567 gcc_assert (!loop->array_parameter);
3568
3569 for (dim = loop->dimen - 1; dim >= 0; dim--)
3570 {
3571 n = loop->order[dim];
3572
3573 gfc_start_block (&loop->code[n]);
3574
3575 /* Create the loop variable. */
3576 loop->loopvar[n] = gfc_create_var (gfc_array_index_type, "S");
3577
3578 if (dim < loop->temp_dim)
3579 flags = 3;
3580 else
3581 flags = 1;
3582 /* Calculate values that will be constant within this loop. */
3583 gfc_trans_preloop_setup (loop, dim, flags, &loop->code[n]);
3584 }
3585 gfc_start_block (pbody);
3586 }
3587
3588
3589 /* Generates the actual loop code for a scalarization loop. */
3590
3591 void
3592 gfc_trans_scalarized_loop_end (gfc_loopinfo * loop, int n,
3593 stmtblock_t * pbody)
3594 {
3595 stmtblock_t block;
3596 tree cond;
3597 tree tmp;
3598 tree loopbody;
3599 tree exit_label;
3600 tree stmt;
3601 tree init;
3602 tree incr;
3603
3604 if ((ompws_flags & (OMPWS_WORKSHARE_FLAG | OMPWS_SCALARIZER_WS))
3605 == (OMPWS_WORKSHARE_FLAG | OMPWS_SCALARIZER_WS)
3606 && n == loop->dimen - 1)
3607 {
3608 /* We create an OMP_FOR construct for the outermost scalarized loop. */
3609 init = make_tree_vec (1);
3610 cond = make_tree_vec (1);
3611 incr = make_tree_vec (1);
3612
3613 /* Cycle statement is implemented with a goto. Exit statement must not
3614 be present for this loop. */
3615 exit_label = gfc_build_label_decl (NULL_TREE);
3616 TREE_USED (exit_label) = 1;
3617
3618 /* Label for cycle statements (if needed). */
3619 tmp = build1_v (LABEL_EXPR, exit_label);
3620 gfc_add_expr_to_block (pbody, tmp);
3621
3622 stmt = make_node (OMP_FOR);
3623
3624 TREE_TYPE (stmt) = void_type_node;
3625 OMP_FOR_BODY (stmt) = loopbody = gfc_finish_block (pbody);
3626
3627 OMP_FOR_CLAUSES (stmt) = build_omp_clause (input_location,
3628 OMP_CLAUSE_SCHEDULE);
3629 OMP_CLAUSE_SCHEDULE_KIND (OMP_FOR_CLAUSES (stmt))
3630 = OMP_CLAUSE_SCHEDULE_STATIC;
3631 if (ompws_flags & OMPWS_NOWAIT)
3632 OMP_CLAUSE_CHAIN (OMP_FOR_CLAUSES (stmt))
3633 = build_omp_clause (input_location, OMP_CLAUSE_NOWAIT);
3634
3635 /* Initialize the loopvar. */
3636 TREE_VEC_ELT (init, 0) = build2_v (MODIFY_EXPR, loop->loopvar[n],
3637 loop->from[n]);
3638 OMP_FOR_INIT (stmt) = init;
3639 /* The exit condition. */
3640 TREE_VEC_ELT (cond, 0) = build2_loc (input_location, LE_EXPR,
3641 boolean_type_node,
3642 loop->loopvar[n], loop->to[n]);
3643 SET_EXPR_LOCATION (TREE_VEC_ELT (cond, 0), input_location);
3644 OMP_FOR_COND (stmt) = cond;
3645 /* Increment the loopvar. */
3646 tmp = build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
3647 loop->loopvar[n], gfc_index_one_node);
3648 TREE_VEC_ELT (incr, 0) = fold_build2_loc (input_location, MODIFY_EXPR,
3649 void_type_node, loop->loopvar[n], tmp);
3650 OMP_FOR_INCR (stmt) = incr;
3651
3652 ompws_flags &= ~OMPWS_CURR_SINGLEUNIT;
3653 gfc_add_expr_to_block (&loop->code[n], stmt);
3654 }
3655 else
3656 {
3657 bool reverse_loop = (loop->reverse[n] == GFC_REVERSE_SET)
3658 && (loop->temp_ss == NULL);
3659
3660 loopbody = gfc_finish_block (pbody);
3661
3662 if (reverse_loop)
3663 std::swap (loop->from[n], loop->to[n]);
3664
3665 /* Initialize the loopvar. */
3666 if (loop->loopvar[n] != loop->from[n])
3667 gfc_add_modify (&loop->code[n], loop->loopvar[n], loop->from[n]);
3668
3669 exit_label = gfc_build_label_decl (NULL_TREE);
3670
3671 /* Generate the loop body. */
3672 gfc_init_block (&block);
3673
3674 /* The exit condition. */
3675 cond = fold_build2_loc (input_location, reverse_loop ? LT_EXPR : GT_EXPR,
3676 boolean_type_node, loop->loopvar[n], loop->to[n]);
3677 tmp = build1_v (GOTO_EXPR, exit_label);
3678 TREE_USED (exit_label) = 1;
3679 tmp = build3_v (COND_EXPR, cond, tmp, build_empty_stmt (input_location));
3680 gfc_add_expr_to_block (&block, tmp);
3681
3682 /* The main body. */
3683 gfc_add_expr_to_block (&block, loopbody);
3684
3685 /* Increment the loopvar. */
3686 tmp = fold_build2_loc (input_location,
3687 reverse_loop ? MINUS_EXPR : PLUS_EXPR,
3688 gfc_array_index_type, loop->loopvar[n],
3689 gfc_index_one_node);
3690
3691 gfc_add_modify (&block, loop->loopvar[n], tmp);
3692
3693 /* Build the loop. */
3694 tmp = gfc_finish_block (&block);
3695 tmp = build1_v (LOOP_EXPR, tmp);
3696 gfc_add_expr_to_block (&loop->code[n], tmp);
3697
3698 /* Add the exit label. */
3699 tmp = build1_v (LABEL_EXPR, exit_label);
3700 gfc_add_expr_to_block (&loop->code[n], tmp);
3701 }
3702
3703 }
3704
3705
3706 /* Finishes and generates the loops for a scalarized expression. */
3707
3708 void
3709 gfc_trans_scalarizing_loops (gfc_loopinfo * loop, stmtblock_t * body)
3710 {
3711 int dim;
3712 int n;
3713 gfc_ss *ss;
3714 stmtblock_t *pblock;
3715 tree tmp;
3716
3717 pblock = body;
3718 /* Generate the loops. */
3719 for (dim = 0; dim < loop->dimen; dim++)
3720 {
3721 n = loop->order[dim];
3722 gfc_trans_scalarized_loop_end (loop, n, pblock);
3723 loop->loopvar[n] = NULL_TREE;
3724 pblock = &loop->code[n];
3725 }
3726
3727 tmp = gfc_finish_block (pblock);
3728 gfc_add_expr_to_block (&loop->pre, tmp);
3729
3730 /* Clear all the used flags. */
3731 for (ss = loop->ss; ss != gfc_ss_terminator; ss = ss->loop_chain)
3732 if (ss->parent == NULL)
3733 ss->info->useflags = 0;
3734 }
3735
3736
3737 /* Finish the main body of a scalarized expression, and start the secondary
3738 copying body. */
3739
3740 void
3741 gfc_trans_scalarized_loop_boundary (gfc_loopinfo * loop, stmtblock_t * body)
3742 {
3743 int dim;
3744 int n;
3745 stmtblock_t *pblock;
3746 gfc_ss *ss;
3747
3748 pblock = body;
3749 /* We finish as many loops as are used by the temporary. */
3750 for (dim = 0; dim < loop->temp_dim - 1; dim++)
3751 {
3752 n = loop->order[dim];
3753 gfc_trans_scalarized_loop_end (loop, n, pblock);
3754 loop->loopvar[n] = NULL_TREE;
3755 pblock = &loop->code[n];
3756 }
3757
3758 /* We don't want to finish the outermost loop entirely. */
3759 n = loop->order[loop->temp_dim - 1];
3760 gfc_trans_scalarized_loop_end (loop, n, pblock);
3761
3762 /* Restore the initial offsets. */
3763 for (ss = loop->ss; ss != gfc_ss_terminator; ss = ss->loop_chain)
3764 {
3765 gfc_ss_type ss_type;
3766 gfc_ss_info *ss_info;
3767
3768 ss_info = ss->info;
3769
3770 if ((ss_info->useflags & 2) == 0)
3771 continue;
3772
3773 ss_type = ss_info->type;
3774 if (ss_type != GFC_SS_SECTION
3775 && ss_type != GFC_SS_FUNCTION
3776 && ss_type != GFC_SS_CONSTRUCTOR
3777 && ss_type != GFC_SS_COMPONENT)
3778 continue;
3779
3780 ss_info->data.array.offset = ss_info->data.array.saved_offset;
3781 }
3782
3783 /* Restart all the inner loops we just finished. */
3784 for (dim = loop->temp_dim - 2; dim >= 0; dim--)
3785 {
3786 n = loop->order[dim];
3787
3788 gfc_start_block (&loop->code[n]);
3789
3790 loop->loopvar[n] = gfc_create_var (gfc_array_index_type, "Q");
3791
3792 gfc_trans_preloop_setup (loop, dim, 2, &loop->code[n]);
3793 }
3794
3795 /* Start a block for the secondary copying code. */
3796 gfc_start_block (body);
3797 }
3798
3799
3800 /* Precalculate (either lower or upper) bound of an array section.
3801 BLOCK: Block in which the (pre)calculation code will go.
3802 BOUNDS[DIM]: Where the bound value will be stored once evaluated.
3803 VALUES[DIM]: Specified bound (NULL <=> unspecified).
3804 DESC: Array descriptor from which the bound will be picked if unspecified
3805 (either lower or upper bound according to LBOUND). */
3806
3807 static void
3808 evaluate_bound (stmtblock_t *block, tree *bounds, gfc_expr ** values,
3809 tree desc, int dim, bool lbound, bool deferred)
3810 {
3811 gfc_se se;
3812 gfc_expr * input_val = values[dim];
3813 tree *output = &bounds[dim];
3814
3815
3816 if (input_val)
3817 {
3818 /* Specified section bound. */
3819 gfc_init_se (&se, NULL);
3820 gfc_conv_expr_type (&se, input_val, gfc_array_index_type);
3821 gfc_add_block_to_block (block, &se.pre);
3822 *output = se.expr;
3823 }
3824 else if (deferred)
3825 {
3826 /* The gfc_conv_array_lbound () routine returns a constant zero for
3827 deferred length arrays, which in the scalarizer wrecks havoc, when
3828 copying to a (newly allocated) one-based array.
3829 Keep returning the actual result in sync for both bounds. */
3830 *output = lbound ? gfc_conv_descriptor_lbound_get (desc,
3831 gfc_rank_cst[dim]):
3832 gfc_conv_descriptor_ubound_get (desc,
3833 gfc_rank_cst[dim]);
3834 }
3835 else
3836 {
3837 /* No specific bound specified so use the bound of the array. */
3838 *output = lbound ? gfc_conv_array_lbound (desc, dim) :
3839 gfc_conv_array_ubound (desc, dim);
3840 }
3841 *output = gfc_evaluate_now (*output, block);
3842 }
3843
3844
3845 /* Calculate the lower bound of an array section. */
3846
3847 static void
3848 gfc_conv_section_startstride (stmtblock_t * block, gfc_ss * ss, int dim)
3849 {
3850 gfc_expr *stride = NULL;
3851 tree desc;
3852 gfc_se se;
3853 gfc_array_info *info;
3854 gfc_array_ref *ar;
3855
3856 gcc_assert (ss->info->type == GFC_SS_SECTION);
3857
3858 info = &ss->info->data.array;
3859 ar = &info->ref->u.ar;
3860
3861 if (ar->dimen_type[dim] == DIMEN_VECTOR)
3862 {
3863 /* We use a zero-based index to access the vector. */
3864 info->start[dim] = gfc_index_zero_node;
3865 info->end[dim] = NULL;
3866 info->stride[dim] = gfc_index_one_node;
3867 return;
3868 }
3869
3870 gcc_assert (ar->dimen_type[dim] == DIMEN_RANGE
3871 || ar->dimen_type[dim] == DIMEN_THIS_IMAGE);
3872 desc = info->descriptor;
3873 stride = ar->stride[dim];
3874
3875
3876 /* Calculate the start of the range. For vector subscripts this will
3877 be the range of the vector. */
3878 evaluate_bound (block, info->start, ar->start, desc, dim, true,
3879 ar->as->type == AS_DEFERRED);
3880
3881 /* Similarly calculate the end. Although this is not used in the
3882 scalarizer, it is needed when checking bounds and where the end
3883 is an expression with side-effects. */
3884 evaluate_bound (block, info->end, ar->end, desc, dim, false,
3885 ar->as->type == AS_DEFERRED);
3886
3887
3888 /* Calculate the stride. */
3889 if (stride == NULL)
3890 info->stride[dim] = gfc_index_one_node;
3891 else
3892 {
3893 gfc_init_se (&se, NULL);
3894 gfc_conv_expr_type (&se, stride, gfc_array_index_type);
3895 gfc_add_block_to_block (block, &se.pre);
3896 info->stride[dim] = gfc_evaluate_now (se.expr, block);
3897 }
3898 }
3899
3900
3901 /* Calculates the range start and stride for a SS chain. Also gets the
3902 descriptor and data pointer. The range of vector subscripts is the size
3903 of the vector. Array bounds are also checked. */
3904
3905 void
3906 gfc_conv_ss_startstride (gfc_loopinfo * loop)
3907 {
3908 int n;
3909 tree tmp;
3910 gfc_ss *ss;
3911 tree desc;
3912
3913 gfc_loopinfo * const outer_loop = outermost_loop (loop);
3914
3915 loop->dimen = 0;
3916 /* Determine the rank of the loop. */
3917 for (ss = loop->ss; ss != gfc_ss_terminator; ss = ss->loop_chain)
3918 {
3919 switch (ss->info->type)
3920 {
3921 case GFC_SS_SECTION:
3922 case GFC_SS_CONSTRUCTOR:
3923 case GFC_SS_FUNCTION:
3924 case GFC_SS_COMPONENT:
3925 loop->dimen = ss->dimen;
3926 goto done;
3927
3928 /* As usual, lbound and ubound are exceptions!. */
3929 case GFC_SS_INTRINSIC:
3930 switch (ss->info->expr->value.function.isym->id)
3931 {
3932 case GFC_ISYM_LBOUND:
3933 case GFC_ISYM_UBOUND:
3934 case GFC_ISYM_LCOBOUND:
3935 case GFC_ISYM_UCOBOUND:
3936 case GFC_ISYM_THIS_IMAGE:
3937 loop->dimen = ss->dimen;
3938 goto done;
3939
3940 default:
3941 break;
3942 }
3943
3944 default:
3945 break;
3946 }
3947 }
3948
3949 /* We should have determined the rank of the expression by now. If
3950 not, that's bad news. */
3951 gcc_unreachable ();
3952
3953 done:
3954 /* Loop over all the SS in the chain. */
3955 for (ss = loop->ss; ss != gfc_ss_terminator; ss = ss->loop_chain)
3956 {
3957 gfc_ss_info *ss_info;
3958 gfc_array_info *info;
3959 gfc_expr *expr;
3960
3961 ss_info = ss->info;
3962 expr = ss_info->expr;
3963 info = &ss_info->data.array;
3964
3965 if (expr && expr->shape && !info->shape)
3966 info->shape = expr->shape;
3967
3968 switch (ss_info->type)
3969 {
3970 case GFC_SS_SECTION:
3971 /* Get the descriptor for the array. If it is a cross loops array,
3972 we got the descriptor already in the outermost loop. */
3973 if (ss->parent == NULL)
3974 gfc_conv_ss_descriptor (&outer_loop->pre, ss,
3975 !loop->array_parameter);
3976
3977 for (n = 0; n < ss->dimen; n++)
3978 gfc_conv_section_startstride (&outer_loop->pre, ss, ss->dim[n]);
3979 break;
3980
3981 case GFC_SS_INTRINSIC:
3982 switch (expr->value.function.isym->id)
3983 {
3984 /* Fall through to supply start and stride. */
3985 case GFC_ISYM_LBOUND:
3986 case GFC_ISYM_UBOUND:
3987 {
3988 gfc_expr *arg;
3989
3990 /* This is the variant without DIM=... */
3991 gcc_assert (expr->value.function.actual->next->expr == NULL);
3992
3993 arg = expr->value.function.actual->expr;
3994 if (arg->rank == -1)
3995 {
3996 gfc_se se;
3997 tree rank, tmp;
3998
3999 /* The rank (hence the return value's shape) is unknown,
4000 we have to retrieve it. */
4001 gfc_init_se (&se, NULL);
4002 se.descriptor_only = 1;
4003 gfc_conv_expr (&se, arg);
4004 /* This is a bare variable, so there is no preliminary
4005 or cleanup code. */
4006 gcc_assert (se.pre.head == NULL_TREE
4007 && se.post.head == NULL_TREE);
4008 rank = gfc_conv_descriptor_rank (se.expr);
4009 tmp = fold_build2_loc (input_location, MINUS_EXPR,
4010 gfc_array_index_type,
4011 fold_convert (gfc_array_index_type,
4012 rank),
4013 gfc_index_one_node);
4014 info->end[0] = gfc_evaluate_now (tmp, &outer_loop->pre);
4015 info->start[0] = gfc_index_zero_node;
4016 info->stride[0] = gfc_index_one_node;
4017 continue;
4018 }
4019 /* Otherwise fall through GFC_SS_FUNCTION. */
4020 }
4021 case GFC_ISYM_LCOBOUND:
4022 case GFC_ISYM_UCOBOUND:
4023 case GFC_ISYM_THIS_IMAGE:
4024 break;
4025
4026 default:
4027 continue;
4028 }
4029
4030 case GFC_SS_CONSTRUCTOR:
4031 case GFC_SS_FUNCTION:
4032 for (n = 0; n < ss->dimen; n++)
4033 {
4034 int dim = ss->dim[n];
4035
4036 info->start[dim] = gfc_index_zero_node;
4037 info->end[dim] = gfc_index_zero_node;
4038 info->stride[dim] = gfc_index_one_node;
4039 }
4040 break;
4041
4042 default:
4043 break;
4044 }
4045 }
4046
4047 /* The rest is just runtime bound checking. */
4048 if (gfc_option.rtcheck & GFC_RTCHECK_BOUNDS)
4049 {
4050 stmtblock_t block;
4051 tree lbound, ubound;
4052 tree end;
4053 tree size[GFC_MAX_DIMENSIONS];
4054 tree stride_pos, stride_neg, non_zerosized, tmp2, tmp3;
4055 gfc_array_info *info;
4056 char *msg;
4057 int dim;
4058
4059 gfc_start_block (&block);
4060
4061 for (n = 0; n < loop->dimen; n++)
4062 size[n] = NULL_TREE;
4063
4064 for (ss = loop->ss; ss != gfc_ss_terminator; ss = ss->loop_chain)
4065 {
4066 stmtblock_t inner;
4067 gfc_ss_info *ss_info;
4068 gfc_expr *expr;
4069 locus *expr_loc;
4070 const char *expr_name;
4071
4072 ss_info = ss->info;
4073 if (ss_info->type != GFC_SS_SECTION)
4074 continue;
4075
4076 /* Catch allocatable lhs in f2003. */
4077 if (flag_realloc_lhs && ss->is_alloc_lhs)
4078 continue;
4079
4080 expr = ss_info->expr;
4081 expr_loc = &expr->where;
4082 expr_name = expr->symtree->name;
4083
4084 gfc_start_block (&inner);
4085
4086 /* TODO: range checking for mapped dimensions. */
4087 info = &ss_info->data.array;
4088
4089 /* This code only checks ranges. Elemental and vector
4090 dimensions are checked later. */
4091 for (n = 0; n < loop->dimen; n++)
4092 {
4093 bool check_upper;
4094
4095 dim = ss->dim[n];
4096 if (info->ref->u.ar.dimen_type[dim] != DIMEN_RANGE)
4097 continue;
4098
4099 if (dim == info->ref->u.ar.dimen - 1
4100 && info->ref->u.ar.as->type == AS_ASSUMED_SIZE)
4101 check_upper = false;
4102 else
4103 check_upper = true;
4104
4105 /* Zero stride is not allowed. */
4106 tmp = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node,
4107 info->stride[dim], gfc_index_zero_node);
4108 msg = xasprintf ("Zero stride is not allowed, for dimension %d "
4109 "of array '%s'", dim + 1, expr_name);
4110 gfc_trans_runtime_check (true, false, tmp, &inner,
4111 expr_loc, msg);
4112 free (msg);
4113
4114 desc = info->descriptor;
4115
4116 /* This is the run-time equivalent of resolve.c's
4117 check_dimension(). The logical is more readable there
4118 than it is here, with all the trees. */
4119 lbound = gfc_conv_array_lbound (desc, dim);
4120 end = info->end[dim];
4121 if (check_upper)
4122 ubound = gfc_conv_array_ubound (desc, dim);
4123 else
4124 ubound = NULL;
4125
4126 /* non_zerosized is true when the selected range is not
4127 empty. */
4128 stride_pos = fold_build2_loc (input_location, GT_EXPR,
4129 boolean_type_node, info->stride[dim],
4130 gfc_index_zero_node);
4131 tmp = fold_build2_loc (input_location, LE_EXPR, boolean_type_node,
4132 info->start[dim], end);
4133 stride_pos = fold_build2_loc (input_location, TRUTH_AND_EXPR,
4134 boolean_type_node, stride_pos, tmp);
4135
4136 stride_neg = fold_build2_loc (input_location, LT_EXPR,
4137 boolean_type_node,
4138 info->stride[dim], gfc_index_zero_node);
4139 tmp = fold_build2_loc (input_location, GE_EXPR, boolean_type_node,
4140 info->start[dim], end);
4141 stride_neg = fold_build2_loc (input_location, TRUTH_AND_EXPR,
4142 boolean_type_node,
4143 stride_neg, tmp);
4144 non_zerosized = fold_build2_loc (input_location, TRUTH_OR_EXPR,
4145 boolean_type_node,
4146 stride_pos, stride_neg);
4147
4148 /* Check the start of the range against the lower and upper
4149 bounds of the array, if the range is not empty.
4150 If upper bound is present, include both bounds in the
4151 error message. */
4152 if (check_upper)
4153 {
4154 tmp = fold_build2_loc (input_location, LT_EXPR,
4155 boolean_type_node,
4156 info->start[dim], lbound);
4157 tmp = fold_build2_loc (input_location, TRUTH_AND_EXPR,
4158 boolean_type_node,
4159 non_zerosized, tmp);
4160 tmp2 = fold_build2_loc (input_location, GT_EXPR,
4161 boolean_type_node,
4162 info->start[dim], ubound);
4163 tmp2 = fold_build2_loc (input_location, TRUTH_AND_EXPR,
4164 boolean_type_node,
4165 non_zerosized, tmp2);
4166 msg = xasprintf ("Index '%%ld' of dimension %d of array '%s' "
4167 "outside of expected range (%%ld:%%ld)",
4168 dim + 1, expr_name);
4169 gfc_trans_runtime_check (true, false, tmp, &inner,
4170 expr_loc, msg,
4171 fold_convert (long_integer_type_node, info->start[dim]),
4172 fold_convert (long_integer_type_node, lbound),
4173 fold_convert (long_integer_type_node, ubound));
4174 gfc_trans_runtime_check (true, false, tmp2, &inner,
4175 expr_loc, msg,
4176 fold_convert (long_integer_type_node, info->start[dim]),
4177 fold_convert (long_integer_type_node, lbound),
4178 fold_convert (long_integer_type_node, ubound));
4179 free (msg);
4180 }
4181 else
4182 {
4183 tmp = fold_build2_loc (input_location, LT_EXPR,
4184 boolean_type_node,
4185 info->start[dim], lbound);
4186 tmp = fold_build2_loc (input_location, TRUTH_AND_EXPR,
4187 boolean_type_node, non_zerosized, tmp);
4188 msg = xasprintf ("Index '%%ld' of dimension %d of array '%s' "
4189 "below lower bound of %%ld",
4190 dim + 1, expr_name);
4191 gfc_trans_runtime_check (true, false, tmp, &inner,
4192 expr_loc, msg,
4193 fold_convert (long_integer_type_node, info->start[dim]),
4194 fold_convert (long_integer_type_node, lbound));
4195 free (msg);
4196 }
4197
4198 /* Compute the last element of the range, which is not
4199 necessarily "end" (think 0:5:3, which doesn't contain 5)
4200 and check it against both lower and upper bounds. */
4201
4202 tmp = fold_build2_loc (input_location, MINUS_EXPR,
4203 gfc_array_index_type, end,
4204 info->start[dim]);
4205 tmp = fold_build2_loc (input_location, TRUNC_MOD_EXPR,
4206 gfc_array_index_type, tmp,
4207 info->stride[dim]);
4208 tmp = fold_build2_loc (input_location, MINUS_EXPR,
4209 gfc_array_index_type, end, tmp);
4210 tmp2 = fold_build2_loc (input_location, LT_EXPR,
4211 boolean_type_node, tmp, lbound);
4212 tmp2 = fold_build2_loc (input_location, TRUTH_AND_EXPR,
4213 boolean_type_node, non_zerosized, tmp2);
4214 if (check_upper)
4215 {
4216 tmp3 = fold_build2_loc (input_location, GT_EXPR,
4217 boolean_type_node, tmp, ubound);
4218 tmp3 = fold_build2_loc (input_location, TRUTH_AND_EXPR,
4219 boolean_type_node, non_zerosized, tmp3);
4220 msg = xasprintf ("Index '%%ld' of dimension %d of array '%s' "
4221 "outside of expected range (%%ld:%%ld)",
4222 dim + 1, expr_name);
4223 gfc_trans_runtime_check (true, false, tmp2, &inner,
4224 expr_loc, msg,
4225 fold_convert (long_integer_type_node, tmp),
4226 fold_convert (long_integer_type_node, ubound),
4227 fold_convert (long_integer_type_node, lbound));
4228 gfc_trans_runtime_check (true, false, tmp3, &inner,
4229 expr_loc, msg,
4230 fold_convert (long_integer_type_node, tmp),
4231 fold_convert (long_integer_type_node, ubound),
4232 fold_convert (long_integer_type_node, lbound));
4233 free (msg);
4234 }
4235 else
4236 {
4237 msg = xasprintf ("Index '%%ld' of dimension %d of array '%s' "
4238 "below lower bound of %%ld",
4239 dim + 1, expr_name);
4240 gfc_trans_runtime_check (true, false, tmp2, &inner,
4241 expr_loc, msg,
4242 fold_convert (long_integer_type_node, tmp),
4243 fold_convert (long_integer_type_node, lbound));
4244 free (msg);
4245 }
4246
4247 /* Check the section sizes match. */
4248 tmp = fold_build2_loc (input_location, MINUS_EXPR,
4249 gfc_array_index_type, end,
4250 info->start[dim]);
4251 tmp = fold_build2_loc (input_location, FLOOR_DIV_EXPR,
4252 gfc_array_index_type, tmp,
4253 info->stride[dim]);
4254 tmp = fold_build2_loc (input_location, PLUS_EXPR,
4255 gfc_array_index_type,
4256 gfc_index_one_node, tmp);
4257 tmp = fold_build2_loc (input_location, MAX_EXPR,
4258 gfc_array_index_type, tmp,
4259 build_int_cst (gfc_array_index_type, 0));
4260 /* We remember the size of the first section, and check all the
4261 others against this. */
4262 if (size[n])
4263 {
4264 tmp3 = fold_build2_loc (input_location, NE_EXPR,
4265 boolean_type_node, tmp, size[n]);
4266 msg = xasprintf ("Array bound mismatch for dimension %d "
4267 "of array '%s' (%%ld/%%ld)",
4268 dim + 1, expr_name);
4269
4270 gfc_trans_runtime_check (true, false, tmp3, &inner,
4271 expr_loc, msg,
4272 fold_convert (long_integer_type_node, tmp),
4273 fold_convert (long_integer_type_node, size[n]));
4274
4275 free (msg);
4276 }
4277 else
4278 size[n] = gfc_evaluate_now (tmp, &inner);
4279 }
4280
4281 tmp = gfc_finish_block (&inner);
4282
4283 /* For optional arguments, only check bounds if the argument is
4284 present. */
4285 if (expr->symtree->n.sym->attr.optional
4286 || expr->symtree->n.sym->attr.not_always_present)
4287 tmp = build3_v (COND_EXPR,
4288 gfc_conv_expr_present (expr->symtree->n.sym),
4289 tmp, build_empty_stmt (input_location));
4290
4291 gfc_add_expr_to_block (&block, tmp);
4292
4293 }
4294
4295 tmp = gfc_finish_block (&block);
4296 gfc_add_expr_to_block (&outer_loop->pre, tmp);
4297 }
4298
4299 for (loop = loop->nested; loop; loop = loop->next)
4300 gfc_conv_ss_startstride (loop);
4301 }
4302
4303 /* Return true if both symbols could refer to the same data object. Does
4304 not take account of aliasing due to equivalence statements. */
4305
4306 static int
4307 symbols_could_alias (gfc_symbol *lsym, gfc_symbol *rsym, bool lsym_pointer,
4308 bool lsym_target, bool rsym_pointer, bool rsym_target)
4309 {
4310 /* Aliasing isn't possible if the symbols have different base types. */
4311 if (gfc_compare_types (&lsym->ts, &rsym->ts) == 0)
4312 return 0;
4313
4314 /* Pointers can point to other pointers and target objects. */
4315
4316 if ((lsym_pointer && (rsym_pointer || rsym_target))
4317 || (rsym_pointer && (lsym_pointer || lsym_target)))
4318 return 1;
4319
4320 /* Special case: Argument association, cf. F90 12.4.1.6, F2003 12.4.1.7
4321 and F2008 12.5.2.13 items 3b and 4b. The pointer case (a) is already
4322 checked above. */
4323 if (lsym_target && rsym_target
4324 && ((lsym->attr.dummy && !lsym->attr.contiguous
4325 && (!lsym->attr.dimension || lsym->as->type == AS_ASSUMED_SHAPE))
4326 || (rsym->attr.dummy && !rsym->attr.contiguous
4327 && (!rsym->attr.dimension
4328 || rsym->as->type == AS_ASSUMED_SHAPE))))
4329 return 1;
4330
4331 return 0;
4332 }
4333
4334
4335 /* Return true if the two SS could be aliased, i.e. both point to the same data
4336 object. */
4337 /* TODO: resolve aliases based on frontend expressions. */
4338
4339 static int
4340 gfc_could_be_alias (gfc_ss * lss, gfc_ss * rss)
4341 {
4342 gfc_ref *lref;
4343 gfc_ref *rref;
4344 gfc_expr *lexpr, *rexpr;
4345 gfc_symbol *lsym;
4346 gfc_symbol *rsym;
4347 bool lsym_pointer, lsym_target, rsym_pointer, rsym_target;
4348
4349 lexpr = lss->info->expr;
4350 rexpr = rss->info->expr;
4351
4352 lsym = lexpr->symtree->n.sym;
4353 rsym = rexpr->symtree->n.sym;
4354
4355 lsym_pointer = lsym->attr.pointer;
4356 lsym_target = lsym->attr.target;
4357 rsym_pointer = rsym->attr.pointer;
4358 rsym_target = rsym->attr.target;
4359
4360 if (symbols_could_alias (lsym, rsym, lsym_pointer, lsym_target,
4361 rsym_pointer, rsym_target))
4362 return 1;
4363
4364 if (rsym->ts.type != BT_DERIVED && rsym->ts.type != BT_CLASS
4365 && lsym->ts.type != BT_DERIVED && lsym->ts.type != BT_CLASS)
4366 return 0;
4367
4368 /* For derived types we must check all the component types. We can ignore
4369 array references as these will have the same base type as the previous
4370 component ref. */
4371 for (lref = lexpr->ref; lref != lss->info->data.array.ref; lref = lref->next)
4372 {
4373 if (lref->type != REF_COMPONENT)
4374 continue;
4375
4376 lsym_pointer = lsym_pointer || lref->u.c.sym->attr.pointer;
4377 lsym_target = lsym_target || lref->u.c.sym->attr.target;
4378
4379 if (symbols_could_alias (lref->u.c.sym, rsym, lsym_pointer, lsym_target,
4380 rsym_pointer, rsym_target))
4381 return 1;
4382
4383 if ((lsym_pointer && (rsym_pointer || rsym_target))
4384 || (rsym_pointer && (lsym_pointer || lsym_target)))
4385 {
4386 if (gfc_compare_types (&lref->u.c.component->ts,
4387 &rsym->ts))
4388 return 1;
4389 }
4390
4391 for (rref = rexpr->ref; rref != rss->info->data.array.ref;
4392 rref = rref->next)
4393 {
4394 if (rref->type != REF_COMPONENT)
4395 continue;
4396
4397 rsym_pointer = rsym_pointer || rref->u.c.sym->attr.pointer;
4398 rsym_target = lsym_target || rref->u.c.sym->attr.target;
4399
4400 if (symbols_could_alias (lref->u.c.sym, rref->u.c.sym,
4401 lsym_pointer, lsym_target,
4402 rsym_pointer, rsym_target))
4403 return 1;
4404
4405 if ((lsym_pointer && (rsym_pointer || rsym_target))
4406 || (rsym_pointer && (lsym_pointer || lsym_target)))
4407 {
4408 if (gfc_compare_types (&lref->u.c.component->ts,
4409 &rref->u.c.sym->ts))
4410 return 1;
4411 if (gfc_compare_types (&lref->u.c.sym->ts,
4412 &rref->u.c.component->ts))
4413 return 1;
4414 if (gfc_compare_types (&lref->u.c.component->ts,
4415 &rref->u.c.component->ts))
4416 return 1;
4417 }
4418 }
4419 }
4420
4421 lsym_pointer = lsym->attr.pointer;
4422 lsym_target = lsym->attr.target;
4423 lsym_pointer = lsym->attr.pointer;
4424 lsym_target = lsym->attr.target;
4425
4426 for (rref = rexpr->ref; rref != rss->info->data.array.ref; rref = rref->next)
4427 {
4428 if (rref->type != REF_COMPONENT)
4429 break;
4430
4431 rsym_pointer = rsym_pointer || rref->u.c.sym->attr.pointer;
4432 rsym_target = lsym_target || rref->u.c.sym->attr.target;
4433
4434 if (symbols_could_alias (rref->u.c.sym, lsym,
4435 lsym_pointer, lsym_target,
4436 rsym_pointer, rsym_target))
4437 return 1;
4438
4439 if ((lsym_pointer && (rsym_pointer || rsym_target))
4440 || (rsym_pointer && (lsym_pointer || lsym_target)))
4441 {
4442 if (gfc_compare_types (&lsym->ts, &rref->u.c.component->ts))
4443 return 1;
4444 }
4445 }
4446
4447 return 0;
4448 }
4449
4450
4451 /* Resolve array data dependencies. Creates a temporary if required. */
4452 /* TODO: Calc dependencies with gfc_expr rather than gfc_ss, and move to
4453 dependency.c. */
4454
4455 void
4456 gfc_conv_resolve_dependencies (gfc_loopinfo * loop, gfc_ss * dest,
4457 gfc_ss * rss)
4458 {
4459 gfc_ss *ss;
4460 gfc_ref *lref;
4461 gfc_ref *rref;
4462 gfc_expr *dest_expr;
4463 gfc_expr *ss_expr;
4464 int nDepend = 0;
4465 int i, j;
4466
4467 loop->temp_ss = NULL;
4468 dest_expr = dest->info->expr;
4469
4470 for (ss = rss; ss != gfc_ss_terminator; ss = ss->next)
4471 {
4472 ss_expr = ss->info->expr;
4473
4474 if (ss->info->array_outer_dependency)
4475 {
4476 nDepend = 1;
4477 break;
4478 }
4479
4480 if (ss->info->type != GFC_SS_SECTION)
4481 {
4482 if (flag_realloc_lhs
4483 && dest_expr != ss_expr
4484 && gfc_is_reallocatable_lhs (dest_expr)
4485 && ss_expr->rank)
4486 nDepend = gfc_check_dependency (dest_expr, ss_expr, true);
4487
4488 /* Check for cases like c(:)(1:2) = c(2)(2:3) */
4489 if (!nDepend && dest_expr->rank > 0
4490 && dest_expr->ts.type == BT_CHARACTER
4491 && ss_expr->expr_type == EXPR_VARIABLE)
4492
4493 nDepend = gfc_check_dependency (dest_expr, ss_expr, false);
4494
4495 continue;
4496 }
4497
4498 if (dest_expr->symtree->n.sym != ss_expr->symtree->n.sym)
4499 {
4500 if (gfc_could_be_alias (dest, ss)
4501 || gfc_are_equivalenced_arrays (dest_expr, ss_expr))
4502 {
4503 nDepend = 1;
4504 break;
4505 }
4506 }
4507 else
4508 {
4509 lref = dest_expr->ref;
4510 rref = ss_expr->ref;
4511
4512 nDepend = gfc_dep_resolver (lref, rref, &loop->reverse[0]);
4513
4514 if (nDepend == 1)
4515 break;
4516
4517 for (i = 0; i < dest->dimen; i++)
4518 for (j = 0; j < ss->dimen; j++)
4519 if (i != j
4520 && dest->dim[i] == ss->dim[j])
4521 {
4522 /* If we don't access array elements in the same order,
4523 there is a dependency. */
4524 nDepend = 1;
4525 goto temporary;
4526 }
4527 #if 0
4528 /* TODO : loop shifting. */
4529 if (nDepend == 1)
4530 {
4531 /* Mark the dimensions for LOOP SHIFTING */
4532 for (n = 0; n < loop->dimen; n++)
4533 {
4534 int dim = dest->data.info.dim[n];
4535
4536 if (lref->u.ar.dimen_type[dim] == DIMEN_VECTOR)
4537 depends[n] = 2;
4538 else if (! gfc_is_same_range (&lref->u.ar,
4539 &rref->u.ar, dim, 0))
4540 depends[n] = 1;
4541 }
4542
4543 /* Put all the dimensions with dependencies in the
4544 innermost loops. */
4545 dim = 0;
4546 for (n = 0; n < loop->dimen; n++)
4547 {
4548 gcc_assert (loop->order[n] == n);
4549 if (depends[n])
4550 loop->order[dim++] = n;
4551 }
4552 for (n = 0; n < loop->dimen; n++)
4553 {
4554 if (! depends[n])
4555 loop->order[dim++] = n;
4556 }
4557
4558 gcc_assert (dim == loop->dimen);
4559 break;
4560 }
4561 #endif
4562 }
4563 }
4564
4565 temporary:
4566
4567 if (nDepend == 1)
4568 {
4569 tree base_type = gfc_typenode_for_spec (&dest_expr->ts);
4570 if (GFC_ARRAY_TYPE_P (base_type)
4571 || GFC_DESCRIPTOR_TYPE_P (base_type))
4572 base_type = gfc_get_element_type (base_type);
4573 loop->temp_ss = gfc_get_temp_ss (base_type, dest->info->string_length,
4574 loop->dimen);
4575 gfc_add_ss_to_loop (loop, loop->temp_ss);
4576 }
4577 else
4578 loop->temp_ss = NULL;
4579 }
4580
4581
4582 /* Browse through each array's information from the scalarizer and set the loop
4583 bounds according to the "best" one (per dimension), i.e. the one which
4584 provides the most information (constant bounds, shape, etc.). */
4585
4586 static void
4587 set_loop_bounds (gfc_loopinfo *loop)
4588 {
4589 int n, dim, spec_dim;
4590 gfc_array_info *info;
4591 gfc_array_info *specinfo;
4592 gfc_ss *ss;
4593 tree tmp;
4594 gfc_ss **loopspec;
4595 bool dynamic[GFC_MAX_DIMENSIONS];
4596 mpz_t *cshape;
4597 mpz_t i;
4598 bool nonoptional_arr;
4599
4600 gfc_loopinfo * const outer_loop = outermost_loop (loop);
4601
4602 loopspec = loop->specloop;
4603
4604 mpz_init (i);
4605 for (n = 0; n < loop->dimen; n++)
4606 {
4607 loopspec[n] = NULL;
4608 dynamic[n] = false;
4609
4610 /* If there are both optional and nonoptional array arguments, scalarize
4611 over the nonoptional; otherwise, it does not matter as then all
4612 (optional) arrays have to be present per F2008, 125.2.12p3(6). */
4613
4614 nonoptional_arr = false;
4615
4616 for (ss = loop->ss; ss != gfc_ss_terminator; ss = ss->loop_chain)
4617 if (ss->info->type != GFC_SS_SCALAR && ss->info->type != GFC_SS_TEMP
4618 && ss->info->type != GFC_SS_REFERENCE && !ss->info->can_be_null_ref)
4619 {
4620 nonoptional_arr = true;
4621 break;
4622 }
4623
4624 /* We use one SS term, and use that to determine the bounds of the
4625 loop for this dimension. We try to pick the simplest term. */
4626 for (ss = loop->ss; ss != gfc_ss_terminator; ss = ss->loop_chain)
4627 {
4628 gfc_ss_type ss_type;
4629
4630 ss_type = ss->info->type;
4631 if (ss_type == GFC_SS_SCALAR
4632 || ss_type == GFC_SS_TEMP
4633 || ss_type == GFC_SS_REFERENCE
4634 || (ss->info->can_be_null_ref && nonoptional_arr))
4635 continue;
4636
4637 info = &ss->info->data.array;
4638 dim = ss->dim[n];
4639
4640 if (loopspec[n] != NULL)
4641 {
4642 specinfo = &loopspec[n]->info->data.array;
4643 spec_dim = loopspec[n]->dim[n];
4644 }
4645 else
4646 {
4647 /* Silence uninitialized warnings. */
4648 specinfo = NULL;
4649 spec_dim = 0;
4650 }
4651
4652 if (info->shape)
4653 {
4654 gcc_assert (info->shape[dim]);
4655 /* The frontend has worked out the size for us. */
4656 if (!loopspec[n]
4657 || !specinfo->shape
4658 || !integer_zerop (specinfo->start[spec_dim]))
4659 /* Prefer zero-based descriptors if possible. */
4660 loopspec[n] = ss;
4661 continue;
4662 }
4663
4664 if (ss_type == GFC_SS_CONSTRUCTOR)
4665 {
4666 gfc_constructor_base base;
4667 /* An unknown size constructor will always be rank one.
4668 Higher rank constructors will either have known shape,
4669 or still be wrapped in a call to reshape. */
4670 gcc_assert (loop->dimen == 1);
4671
4672 /* Always prefer to use the constructor bounds if the size
4673 can be determined at compile time. Prefer not to otherwise,
4674 since the general case involves realloc, and it's better to
4675 avoid that overhead if possible. */
4676 base = ss->info->expr->value.constructor;
4677 dynamic[n] = gfc_get_array_constructor_size (&i, base);
4678 if (!dynamic[n] || !loopspec[n])
4679 loopspec[n] = ss;
4680 continue;
4681 }
4682
4683 /* Avoid using an allocatable lhs in an assignment, since
4684 there might be a reallocation coming. */
4685 if (loopspec[n] && ss->is_alloc_lhs)
4686 continue;
4687
4688 if (!loopspec[n])
4689 loopspec[n] = ss;
4690 /* Criteria for choosing a loop specifier (most important first):
4691 doesn't need realloc
4692 stride of one
4693 known stride
4694 known lower bound
4695 known upper bound
4696 */
4697 else if (loopspec[n]->info->type == GFC_SS_CONSTRUCTOR && dynamic[n])
4698 loopspec[n] = ss;
4699 else if (integer_onep (info->stride[dim])
4700 && !integer_onep (specinfo->stride[spec_dim]))
4701 loopspec[n] = ss;
4702 else if (INTEGER_CST_P (info->stride[dim])
4703 && !INTEGER_CST_P (specinfo->stride[spec_dim]))
4704 loopspec[n] = ss;
4705 else if (INTEGER_CST_P (info->start[dim])
4706 && !INTEGER_CST_P (specinfo->start[spec_dim])
4707 && integer_onep (info->stride[dim])
4708 == integer_onep (specinfo->stride[spec_dim])
4709 && INTEGER_CST_P (info->stride[dim])
4710 == INTEGER_CST_P (specinfo->stride[spec_dim]))
4711 loopspec[n] = ss;
4712 /* We don't work out the upper bound.
4713 else if (INTEGER_CST_P (info->finish[n])
4714 && ! INTEGER_CST_P (specinfo->finish[n]))
4715 loopspec[n] = ss; */
4716 }
4717
4718 /* We should have found the scalarization loop specifier. If not,
4719 that's bad news. */
4720 gcc_assert (loopspec[n]);
4721
4722 info = &loopspec[n]->info->data.array;
4723 dim = loopspec[n]->dim[n];
4724
4725 /* Set the extents of this range. */
4726 cshape = info->shape;
4727 if (cshape && INTEGER_CST_P (info->start[dim])
4728 && INTEGER_CST_P (info->stride[dim]))
4729 {
4730 loop->from[n] = info->start[dim];
4731 mpz_set (i, cshape[get_array_ref_dim_for_loop_dim (loopspec[n], n)]);
4732 mpz_sub_ui (i, i, 1);
4733 /* To = from + (size - 1) * stride. */
4734 tmp = gfc_conv_mpz_to_tree (i, gfc_index_integer_kind);
4735 if (!integer_onep (info->stride[dim]))
4736 tmp = fold_build2_loc (input_location, MULT_EXPR,
4737 gfc_array_index_type, tmp,
4738 info->stride[dim]);
4739 loop->to[n] = fold_build2_loc (input_location, PLUS_EXPR,
4740 gfc_array_index_type,
4741 loop->from[n], tmp);
4742 }
4743 else
4744 {
4745 loop->from[n] = info->start[dim];
4746 switch (loopspec[n]->info->type)
4747 {
4748 case GFC_SS_CONSTRUCTOR:
4749 /* The upper bound is calculated when we expand the
4750 constructor. */
4751 gcc_assert (loop->to[n] == NULL_TREE);
4752 break;
4753
4754 case GFC_SS_SECTION:
4755 /* Use the end expression if it exists and is not constant,
4756 so that it is only evaluated once. */
4757 loop->to[n] = info->end[dim];
4758 break;
4759
4760 case GFC_SS_FUNCTION:
4761 /* The loop bound will be set when we generate the call. */
4762 gcc_assert (loop->to[n] == NULL_TREE);
4763 break;
4764
4765 case GFC_SS_INTRINSIC:
4766 {
4767 gfc_expr *expr = loopspec[n]->info->expr;
4768
4769 /* The {l,u}bound of an assumed rank. */
4770 gcc_assert ((expr->value.function.isym->id == GFC_ISYM_LBOUND
4771 || expr->value.function.isym->id == GFC_ISYM_UBOUND)
4772 && expr->value.function.actual->next->expr == NULL
4773 && expr->value.function.actual->expr->rank == -1);
4774
4775 loop->to[n] = info->end[dim];
4776 break;
4777 }
4778
4779 default:
4780 gcc_unreachable ();
4781 }
4782 }
4783
4784 /* Transform everything so we have a simple incrementing variable. */
4785 if (integer_onep (info->stride[dim]))
4786 info->delta[dim] = gfc_index_zero_node;
4787 else
4788 {
4789 /* Set the delta for this section. */
4790 info->delta[dim] = gfc_evaluate_now (loop->from[n], &outer_loop->pre);
4791 /* Number of iterations is (end - start + step) / step.
4792 with start = 0, this simplifies to
4793 last = end / step;
4794 for (i = 0; i<=last; i++){...}; */
4795 tmp = fold_build2_loc (input_location, MINUS_EXPR,
4796 gfc_array_index_type, loop->to[n],
4797 loop->from[n]);
4798 tmp = fold_build2_loc (input_location, FLOOR_DIV_EXPR,
4799 gfc_array_index_type, tmp, info->stride[dim]);
4800 tmp = fold_build2_loc (input_location, MAX_EXPR, gfc_array_index_type,
4801 tmp, build_int_cst (gfc_array_index_type, -1));
4802 loop->to[n] = gfc_evaluate_now (tmp, &outer_loop->pre);
4803 /* Make the loop variable start at 0. */
4804 loop->from[n] = gfc_index_zero_node;
4805 }
4806 }
4807 mpz_clear (i);
4808
4809 for (loop = loop->nested; loop; loop = loop->next)
4810 set_loop_bounds (loop);
4811 }
4812
4813
4814 /* Initialize the scalarization loop. Creates the loop variables. Determines
4815 the range of the loop variables. Creates a temporary if required.
4816 Also generates code for scalar expressions which have been
4817 moved outside the loop. */
4818
4819 void
4820 gfc_conv_loop_setup (gfc_loopinfo * loop, locus * where)
4821 {
4822 gfc_ss *tmp_ss;
4823 tree tmp;
4824
4825 set_loop_bounds (loop);
4826
4827 /* Add all the scalar code that can be taken out of the loops.
4828 This may include calculating the loop bounds, so do it before
4829 allocating the temporary. */
4830 gfc_add_loop_ss_code (loop, loop->ss, false, where);
4831
4832 tmp_ss = loop->temp_ss;
4833 /* If we want a temporary then create it. */
4834 if (tmp_ss != NULL)
4835 {
4836 gfc_ss_info *tmp_ss_info;
4837
4838 tmp_ss_info = tmp_ss->info;
4839 gcc_assert (tmp_ss_info->type == GFC_SS_TEMP);
4840 gcc_assert (loop->parent == NULL);
4841
4842 /* Make absolutely sure that this is a complete type. */
4843 if (tmp_ss_info->string_length)
4844 tmp_ss_info->data.temp.type
4845 = gfc_get_character_type_len_for_eltype
4846 (TREE_TYPE (tmp_ss_info->data.temp.type),
4847 tmp_ss_info->string_length);
4848
4849 tmp = tmp_ss_info->data.temp.type;
4850 memset (&tmp_ss_info->data.array, 0, sizeof (gfc_array_info));
4851 tmp_ss_info->type = GFC_SS_SECTION;
4852
4853 gcc_assert (tmp_ss->dimen != 0);
4854
4855 gfc_trans_create_temp_array (&loop->pre, &loop->post, tmp_ss, tmp,
4856 NULL_TREE, false, true, false, where);
4857 }
4858
4859 /* For array parameters we don't have loop variables, so don't calculate the
4860 translations. */
4861 if (!loop->array_parameter)
4862 gfc_set_delta (loop);
4863 }
4864
4865
4866 /* Calculates how to transform from loop variables to array indices for each
4867 array: once loop bounds are chosen, sets the difference (DELTA field) between
4868 loop bounds and array reference bounds, for each array info. */
4869
4870 void
4871 gfc_set_delta (gfc_loopinfo *loop)
4872 {
4873 gfc_ss *ss, **loopspec;
4874 gfc_array_info *info;
4875 tree tmp;
4876 int n, dim;
4877
4878 gfc_loopinfo * const outer_loop = outermost_loop (loop);
4879
4880 loopspec = loop->specloop;
4881
4882 /* Calculate the translation from loop variables to array indices. */
4883 for (ss = loop->ss; ss != gfc_ss_terminator; ss = ss->loop_chain)
4884 {
4885 gfc_ss_type ss_type;
4886
4887 ss_type = ss->info->type;
4888 if (ss_type != GFC_SS_SECTION
4889 && ss_type != GFC_SS_COMPONENT
4890 && ss_type != GFC_SS_CONSTRUCTOR)
4891 continue;
4892
4893 info = &ss->info->data.array;
4894
4895 for (n = 0; n < ss->dimen; n++)
4896 {
4897 /* If we are specifying the range the delta is already set. */
4898 if (loopspec[n] != ss)
4899 {
4900 dim = ss->dim[n];
4901
4902 /* Calculate the offset relative to the loop variable.
4903 First multiply by the stride. */
4904 tmp = loop->from[n];
4905 if (!integer_onep (info->stride[dim]))
4906 tmp = fold_build2_loc (input_location, MULT_EXPR,
4907 gfc_array_index_type,
4908 tmp, info->stride[dim]);
4909
4910 /* Then subtract this from our starting value. */
4911 tmp = fold_build2_loc (input_location, MINUS_EXPR,
4912 gfc_array_index_type,
4913 info->start[dim], tmp);
4914
4915 info->delta[dim] = gfc_evaluate_now (tmp, &outer_loop->pre);
4916 }
4917 }
4918 }
4919
4920 for (loop = loop->nested; loop; loop = loop->next)
4921 gfc_set_delta (loop);
4922 }
4923
4924
4925 /* Calculate the size of a given array dimension from the bounds. This
4926 is simply (ubound - lbound + 1) if this expression is positive
4927 or 0 if it is negative (pick either one if it is zero). Optionally
4928 (if or_expr is present) OR the (expression != 0) condition to it. */
4929
4930 tree
4931 gfc_conv_array_extent_dim (tree lbound, tree ubound, tree* or_expr)
4932 {
4933 tree res;
4934 tree cond;
4935
4936 /* Calculate (ubound - lbound + 1). */
4937 res = fold_build2_loc (input_location, MINUS_EXPR, gfc_array_index_type,
4938 ubound, lbound);
4939 res = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type, res,
4940 gfc_index_one_node);
4941
4942 /* Check whether the size for this dimension is negative. */
4943 cond = fold_build2_loc (input_location, LE_EXPR, boolean_type_node, res,
4944 gfc_index_zero_node);
4945 res = fold_build3_loc (input_location, COND_EXPR, gfc_array_index_type, cond,
4946 gfc_index_zero_node, res);
4947
4948 /* Build OR expression. */
4949 if (or_expr)
4950 *or_expr = fold_build2_loc (input_location, TRUTH_OR_EXPR,
4951 boolean_type_node, *or_expr, cond);
4952
4953 return res;
4954 }
4955
4956
4957 /* For an array descriptor, get the total number of elements. This is just
4958 the product of the extents along from_dim to to_dim. */
4959
4960 static tree
4961 gfc_conv_descriptor_size_1 (tree desc, int from_dim, int to_dim)
4962 {
4963 tree res;
4964 int dim;
4965
4966 res = gfc_index_one_node;
4967
4968 for (dim = from_dim; dim < to_dim; ++dim)
4969 {
4970 tree lbound;
4971 tree ubound;
4972 tree extent;
4973
4974 lbound = gfc_conv_descriptor_lbound_get (desc, gfc_rank_cst[dim]);
4975 ubound = gfc_conv_descriptor_ubound_get (desc, gfc_rank_cst[dim]);
4976
4977 extent = gfc_conv_array_extent_dim (lbound, ubound, NULL);
4978 res = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
4979 res, extent);
4980 }
4981
4982 return res;
4983 }
4984
4985
4986 /* Full size of an array. */
4987
4988 tree
4989 gfc_conv_descriptor_size (tree desc, int rank)
4990 {
4991 return gfc_conv_descriptor_size_1 (desc, 0, rank);
4992 }
4993
4994
4995 /* Size of a coarray for all dimensions but the last. */
4996
4997 tree
4998 gfc_conv_descriptor_cosize (tree desc, int rank, int corank)
4999 {
5000 return gfc_conv_descriptor_size_1 (desc, rank, rank + corank - 1);
5001 }
5002
5003
5004 /* Fills in an array descriptor, and returns the size of the array.
5005 The size will be a simple_val, ie a variable or a constant. Also
5006 calculates the offset of the base. The pointer argument overflow,
5007 which should be of integer type, will increase in value if overflow
5008 occurs during the size calculation. Returns the size of the array.
5009 {
5010 stride = 1;
5011 offset = 0;
5012 for (n = 0; n < rank; n++)
5013 {
5014 a.lbound[n] = specified_lower_bound;
5015 offset = offset + a.lbond[n] * stride;
5016 size = 1 - lbound;
5017 a.ubound[n] = specified_upper_bound;
5018 a.stride[n] = stride;
5019 size = size >= 0 ? ubound + size : 0; //size = ubound + 1 - lbound
5020 overflow += size == 0 ? 0: (MAX/size < stride ? 1: 0);
5021 stride = stride * size;
5022 }
5023 for (n = rank; n < rank+corank; n++)
5024 (Set lcobound/ucobound as above.)
5025 element_size = sizeof (array element);
5026 if (!rank)
5027 return element_size
5028 stride = (size_t) stride;
5029 overflow += element_size == 0 ? 0: (MAX/element_size < stride ? 1: 0);
5030 stride = stride * element_size;
5031 return (stride);
5032 } */
5033 /*GCC ARRAYS*/
5034
5035 static tree
5036 gfc_array_init_size (tree descriptor, int rank, int corank, tree * poffset,
5037 gfc_expr ** lower, gfc_expr ** upper, stmtblock_t * pblock,
5038 stmtblock_t * descriptor_block, tree * overflow,
5039 tree expr3_elem_size, tree *nelems, gfc_expr *expr3,
5040 tree expr3_desc, bool e3_is_array_constr)
5041 {
5042 tree type;
5043 tree tmp;
5044 tree size;
5045 tree offset;
5046 tree stride;
5047 tree element_size;
5048 tree or_expr;
5049 tree thencase;
5050 tree elsecase;
5051 tree cond;
5052 tree var;
5053 stmtblock_t thenblock;
5054 stmtblock_t elseblock;
5055 gfc_expr *ubound;
5056 gfc_se se;
5057 int n;
5058
5059 type = TREE_TYPE (descriptor);
5060
5061 stride = gfc_index_one_node;
5062 offset = gfc_index_zero_node;
5063
5064 /* Set the dtype. */
5065 tmp = gfc_conv_descriptor_dtype (descriptor);
5066 gfc_add_modify (descriptor_block, tmp, gfc_get_dtype (type));
5067
5068 or_expr = boolean_false_node;
5069
5070 for (n = 0; n < rank; n++)
5071 {
5072 tree conv_lbound;
5073 tree conv_ubound;
5074
5075 /* We have 3 possibilities for determining the size of the array:
5076 lower == NULL => lbound = 1, ubound = upper[n]
5077 upper[n] = NULL => lbound = 1, ubound = lower[n]
5078 upper[n] != NULL => lbound = lower[n], ubound = upper[n] */
5079 ubound = upper[n];
5080
5081 /* Set lower bound. */
5082 gfc_init_se (&se, NULL);
5083 if (expr3_desc != NULL_TREE)
5084 {
5085 if (e3_is_array_constr)
5086 /* The lbound of a constant array [] starts at zero, but when
5087 allocating it, the standard expects the array to start at
5088 one. */
5089 se.expr = gfc_index_one_node;
5090 else
5091 se.expr = gfc_conv_descriptor_lbound_get (expr3_desc,
5092 gfc_rank_cst[n]);
5093 }
5094 else if (lower == NULL)
5095 se.expr = gfc_index_one_node;
5096 else
5097 {
5098 gcc_assert (lower[n]);
5099 if (ubound)
5100 {
5101 gfc_conv_expr_type (&se, lower[n], gfc_array_index_type);
5102 gfc_add_block_to_block (pblock, &se.pre);
5103 }
5104 else
5105 {
5106 se.expr = gfc_index_one_node;
5107 ubound = lower[n];
5108 }
5109 }
5110 gfc_conv_descriptor_lbound_set (descriptor_block, descriptor,
5111 gfc_rank_cst[n], se.expr);
5112 conv_lbound = se.expr;
5113
5114 /* Work out the offset for this component. */
5115 tmp = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
5116 se.expr, stride);
5117 offset = fold_build2_loc (input_location, MINUS_EXPR,
5118 gfc_array_index_type, offset, tmp);
5119
5120 /* Set upper bound. */
5121 gfc_init_se (&se, NULL);
5122 if (expr3_desc != NULL_TREE)
5123 {
5124 if (e3_is_array_constr)
5125 {
5126 /* The lbound of a constant array [] starts at zero, but when
5127 allocating it, the standard expects the array to start at
5128 one. Therefore fix the upper bound to be
5129 (desc.ubound - desc.lbound)+ 1. */
5130 tmp = fold_build2_loc (input_location, MINUS_EXPR,
5131 gfc_array_index_type,
5132 gfc_conv_descriptor_ubound_get (
5133 expr3_desc, gfc_rank_cst[n]),
5134 gfc_conv_descriptor_lbound_get (
5135 expr3_desc, gfc_rank_cst[n]));
5136 tmp = fold_build2_loc (input_location, PLUS_EXPR,
5137 gfc_array_index_type, tmp,
5138 gfc_index_one_node);
5139 se.expr = gfc_evaluate_now (tmp, pblock);
5140 }
5141 else
5142 se.expr = gfc_conv_descriptor_ubound_get (expr3_desc,
5143 gfc_rank_cst[n]);
5144 }
5145 else
5146 {
5147 gcc_assert (ubound);
5148 gfc_conv_expr_type (&se, ubound, gfc_array_index_type);
5149 gfc_add_block_to_block (pblock, &se.pre);
5150 if (ubound->expr_type == EXPR_FUNCTION)
5151 se.expr = gfc_evaluate_now (se.expr, pblock);
5152 }
5153 gfc_conv_descriptor_ubound_set (descriptor_block, descriptor,
5154 gfc_rank_cst[n], se.expr);
5155 conv_ubound = se.expr;
5156
5157 /* Store the stride. */
5158 gfc_conv_descriptor_stride_set (descriptor_block, descriptor,
5159 gfc_rank_cst[n], stride);
5160
5161 /* Calculate size and check whether extent is negative. */
5162 size = gfc_conv_array_extent_dim (conv_lbound, conv_ubound, &or_expr);
5163 size = gfc_evaluate_now (size, pblock);
5164
5165 /* Check whether multiplying the stride by the number of
5166 elements in this dimension would overflow. We must also check
5167 whether the current dimension has zero size in order to avoid
5168 division by zero.
5169 */
5170 tmp = fold_build2_loc (input_location, TRUNC_DIV_EXPR,
5171 gfc_array_index_type,
5172 fold_convert (gfc_array_index_type,
5173 TYPE_MAX_VALUE (gfc_array_index_type)),
5174 size);
5175 cond = gfc_unlikely (fold_build2_loc (input_location, LT_EXPR,
5176 boolean_type_node, tmp, stride),
5177 PRED_FORTRAN_OVERFLOW);
5178 tmp = fold_build3_loc (input_location, COND_EXPR, integer_type_node, cond,
5179 integer_one_node, integer_zero_node);
5180 cond = gfc_unlikely (fold_build2_loc (input_location, EQ_EXPR,
5181 boolean_type_node, size,
5182 gfc_index_zero_node),
5183 PRED_FORTRAN_SIZE_ZERO);
5184 tmp = fold_build3_loc (input_location, COND_EXPR, integer_type_node, cond,
5185 integer_zero_node, tmp);
5186 tmp = fold_build2_loc (input_location, PLUS_EXPR, integer_type_node,
5187 *overflow, tmp);
5188 *overflow = gfc_evaluate_now (tmp, pblock);
5189
5190 /* Multiply the stride by the number of elements in this dimension. */
5191 stride = fold_build2_loc (input_location, MULT_EXPR,
5192 gfc_array_index_type, stride, size);
5193 stride = gfc_evaluate_now (stride, pblock);
5194 }
5195
5196 for (n = rank; n < rank + corank; n++)
5197 {
5198 ubound = upper[n];
5199
5200 /* Set lower bound. */
5201 gfc_init_se (&se, NULL);
5202 if (lower == NULL || lower[n] == NULL)
5203 {
5204 gcc_assert (n == rank + corank - 1);
5205 se.expr = gfc_index_one_node;
5206 }
5207 else
5208 {
5209 if (ubound || n == rank + corank - 1)
5210 {
5211 gfc_conv_expr_type (&se, lower[n], gfc_array_index_type);
5212 gfc_add_block_to_block (pblock, &se.pre);
5213 }
5214 else
5215 {
5216 se.expr = gfc_index_one_node;
5217 ubound = lower[n];
5218 }
5219 }
5220 gfc_conv_descriptor_lbound_set (descriptor_block, descriptor,
5221 gfc_rank_cst[n], se.expr);
5222
5223 if (n < rank + corank - 1)
5224 {
5225 gfc_init_se (&se, NULL);
5226 gcc_assert (ubound);
5227 gfc_conv_expr_type (&se, ubound, gfc_array_index_type);
5228 gfc_add_block_to_block (pblock, &se.pre);
5229 gfc_conv_descriptor_ubound_set (descriptor_block, descriptor,
5230 gfc_rank_cst[n], se.expr);
5231 }
5232 }
5233
5234 /* The stride is the number of elements in the array, so multiply by the
5235 size of an element to get the total size. Obviously, if there is a
5236 SOURCE expression (expr3) we must use its element size. */
5237 if (expr3_elem_size != NULL_TREE)
5238 tmp = expr3_elem_size;
5239 else if (expr3 != NULL)
5240 {
5241 if (expr3->ts.type == BT_CLASS)
5242 {
5243 gfc_se se_sz;
5244 gfc_expr *sz = gfc_copy_expr (expr3);
5245 gfc_add_vptr_component (sz);
5246 gfc_add_size_component (sz);
5247 gfc_init_se (&se_sz, NULL);
5248 gfc_conv_expr (&se_sz, sz);
5249 gfc_free_expr (sz);
5250 tmp = se_sz.expr;
5251 }
5252 else
5253 {
5254 tmp = gfc_typenode_for_spec (&expr3->ts);
5255 tmp = TYPE_SIZE_UNIT (tmp);
5256 }
5257 }
5258 else
5259 tmp = TYPE_SIZE_UNIT (gfc_get_element_type (type));
5260
5261 /* Convert to size_t. */
5262 element_size = fold_convert (size_type_node, tmp);
5263
5264 if (rank == 0)
5265 return element_size;
5266
5267 *nelems = gfc_evaluate_now (stride, pblock);
5268 stride = fold_convert (size_type_node, stride);
5269
5270 /* First check for overflow. Since an array of type character can
5271 have zero element_size, we must check for that before
5272 dividing. */
5273 tmp = fold_build2_loc (input_location, TRUNC_DIV_EXPR,
5274 size_type_node,
5275 TYPE_MAX_VALUE (size_type_node), element_size);
5276 cond = gfc_unlikely (fold_build2_loc (input_location, LT_EXPR,
5277 boolean_type_node, tmp, stride),
5278 PRED_FORTRAN_OVERFLOW);
5279 tmp = fold_build3_loc (input_location, COND_EXPR, integer_type_node, cond,
5280 integer_one_node, integer_zero_node);
5281 cond = gfc_unlikely (fold_build2_loc (input_location, EQ_EXPR,
5282 boolean_type_node, element_size,
5283 build_int_cst (size_type_node, 0)),
5284 PRED_FORTRAN_SIZE_ZERO);
5285 tmp = fold_build3_loc (input_location, COND_EXPR, integer_type_node, cond,
5286 integer_zero_node, tmp);
5287 tmp = fold_build2_loc (input_location, PLUS_EXPR, integer_type_node,
5288 *overflow, tmp);
5289 *overflow = gfc_evaluate_now (tmp, pblock);
5290
5291 size = fold_build2_loc (input_location, MULT_EXPR, size_type_node,
5292 stride, element_size);
5293
5294 if (poffset != NULL)
5295 {
5296 offset = gfc_evaluate_now (offset, pblock);
5297 *poffset = offset;
5298 }
5299
5300 if (integer_zerop (or_expr))
5301 return size;
5302 if (integer_onep (or_expr))
5303 return build_int_cst (size_type_node, 0);
5304
5305 var = gfc_create_var (TREE_TYPE (size), "size");
5306 gfc_start_block (&thenblock);
5307 gfc_add_modify (&thenblock, var, build_int_cst (size_type_node, 0));
5308 thencase = gfc_finish_block (&thenblock);
5309
5310 gfc_start_block (&elseblock);
5311 gfc_add_modify (&elseblock, var, size);
5312 elsecase = gfc_finish_block (&elseblock);
5313
5314 tmp = gfc_evaluate_now (or_expr, pblock);
5315 tmp = build3_v (COND_EXPR, tmp, thencase, elsecase);
5316 gfc_add_expr_to_block (pblock, tmp);
5317
5318 return var;
5319 }
5320
5321
5322 /* Retrieve the last ref from the chain. This routine is specific to
5323 gfc_array_allocate ()'s needs. */
5324
5325 bool
5326 retrieve_last_ref (gfc_ref **ref_in, gfc_ref **prev_ref_in)
5327 {
5328 gfc_ref *ref, *prev_ref;
5329
5330 ref = *ref_in;
5331 /* Prevent warnings for uninitialized variables. */
5332 prev_ref = *prev_ref_in;
5333 while (ref && ref->next != NULL)
5334 {
5335 gcc_assert (ref->type != REF_ARRAY || ref->u.ar.type == AR_ELEMENT
5336 || (ref->u.ar.dimen == 0 && ref->u.ar.codimen > 0));
5337 prev_ref = ref;
5338 ref = ref->next;
5339 }
5340
5341 if (ref == NULL || ref->type != REF_ARRAY)
5342 return false;
5343
5344 *ref_in = ref;
5345 *prev_ref_in = prev_ref;
5346 return true;
5347 }
5348
5349 /* Initializes the descriptor and generates a call to _gfor_allocate. Does
5350 the work for an ALLOCATE statement. */
5351 /*GCC ARRAYS*/
5352
5353 bool
5354 gfc_array_allocate (gfc_se * se, gfc_expr * expr, tree status, tree errmsg,
5355 tree errlen, tree label_finish, tree expr3_elem_size,
5356 tree *nelems, gfc_expr *expr3, tree e3_arr_desc,
5357 bool e3_is_array_constr)
5358 {
5359 tree tmp;
5360 tree pointer;
5361 tree offset = NULL_TREE;
5362 tree token = NULL_TREE;
5363 tree size;
5364 tree msg;
5365 tree error = NULL_TREE;
5366 tree overflow; /* Boolean storing whether size calculation overflows. */
5367 tree var_overflow = NULL_TREE;
5368 tree cond;
5369 tree set_descriptor;
5370 stmtblock_t set_descriptor_block;
5371 stmtblock_t elseblock;
5372 gfc_expr **lower;
5373 gfc_expr **upper;
5374 gfc_ref *ref, *prev_ref = NULL;
5375 bool allocatable, coarray, dimension, alloc_w_e3_arr_spec = false;
5376
5377 ref = expr->ref;
5378
5379 /* Find the last reference in the chain. */
5380 if (!retrieve_last_ref (&ref, &prev_ref))
5381 return false;
5382
5383 if (ref->u.ar.type == AR_FULL && expr3 != NULL)
5384 {
5385 /* F08:C633: Array shape from expr3. */
5386 ref = expr3->ref;
5387
5388 /* Find the last reference in the chain. */
5389 if (!retrieve_last_ref (&ref, &prev_ref))
5390 return false;
5391 alloc_w_e3_arr_spec = true;
5392 }
5393
5394 if (!prev_ref)
5395 {
5396 allocatable = expr->symtree->n.sym->attr.allocatable;
5397 coarray = expr->symtree->n.sym->attr.codimension;
5398 dimension = expr->symtree->n.sym->attr.dimension;
5399 }
5400 else
5401 {
5402 allocatable = prev_ref->u.c.component->attr.allocatable;
5403 coarray = prev_ref->u.c.component->attr.codimension;
5404 dimension = prev_ref->u.c.component->attr.dimension;
5405 }
5406
5407 if (!dimension)
5408 gcc_assert (coarray);
5409
5410 /* Figure out the size of the array. */
5411 switch (ref->u.ar.type)
5412 {
5413 case AR_ELEMENT:
5414 if (!coarray)
5415 {
5416 lower = NULL;
5417 upper = ref->u.ar.start;
5418 break;
5419 }
5420 /* Fall through. */
5421
5422 case AR_SECTION:
5423 lower = ref->u.ar.start;
5424 upper = ref->u.ar.end;
5425 break;
5426
5427 case AR_FULL:
5428 gcc_assert (ref->u.ar.as->type == AS_EXPLICIT
5429 || alloc_w_e3_arr_spec);
5430
5431 lower = ref->u.ar.as->lower;
5432 upper = ref->u.ar.as->upper;
5433 break;
5434
5435 default:
5436 gcc_unreachable ();
5437 break;
5438 }
5439
5440 overflow = integer_zero_node;
5441
5442 gfc_init_block (&set_descriptor_block);
5443 size = gfc_array_init_size (se->expr, alloc_w_e3_arr_spec ? expr->rank
5444 : ref->u.ar.as->rank,
5445 ref->u.ar.as->corank, &offset, lower, upper,
5446 &se->pre, &set_descriptor_block, &overflow,
5447 expr3_elem_size, nelems, expr3, e3_arr_desc,
5448 e3_is_array_constr);
5449
5450 if (dimension)
5451 {
5452 var_overflow = gfc_create_var (integer_type_node, "overflow");
5453 gfc_add_modify (&se->pre, var_overflow, overflow);
5454
5455 if (status == NULL_TREE)
5456 {
5457 /* Generate the block of code handling overflow. */
5458 msg = gfc_build_addr_expr (pchar_type_node,
5459 gfc_build_localized_cstring_const
5460 ("Integer overflow when calculating the amount of "
5461 "memory to allocate"));
5462 error = build_call_expr_loc (input_location,
5463 gfor_fndecl_runtime_error, 1, msg);
5464 }
5465 else
5466 {
5467 tree status_type = TREE_TYPE (status);
5468 stmtblock_t set_status_block;
5469
5470 gfc_start_block (&set_status_block);
5471 gfc_add_modify (&set_status_block, status,
5472 build_int_cst (status_type, LIBERROR_ALLOCATION));
5473 error = gfc_finish_block (&set_status_block);
5474 }
5475 }
5476
5477 gfc_start_block (&elseblock);
5478
5479 /* Allocate memory to store the data. */
5480 if (POINTER_TYPE_P (TREE_TYPE (se->expr)))
5481 se->expr = build_fold_indirect_ref_loc (input_location, se->expr);
5482
5483 pointer = gfc_conv_descriptor_data_get (se->expr);
5484 STRIP_NOPS (pointer);
5485
5486 if (coarray && flag_coarray == GFC_FCOARRAY_LIB)
5487 token = gfc_build_addr_expr (NULL_TREE,
5488 gfc_conv_descriptor_token (se->expr));
5489
5490 /* The allocatable variant takes the old pointer as first argument. */
5491 if (allocatable)
5492 gfc_allocate_allocatable (&elseblock, pointer, size, token,
5493 status, errmsg, errlen, label_finish, expr);
5494 else
5495 gfc_allocate_using_malloc (&elseblock, pointer, size, status);
5496
5497 if (dimension)
5498 {
5499 cond = gfc_unlikely (fold_build2_loc (input_location, NE_EXPR,
5500 boolean_type_node, var_overflow, integer_zero_node),
5501 PRED_FORTRAN_OVERFLOW);
5502 tmp = fold_build3_loc (input_location, COND_EXPR, void_type_node, cond,
5503 error, gfc_finish_block (&elseblock));
5504 }
5505 else
5506 tmp = gfc_finish_block (&elseblock);
5507
5508 gfc_add_expr_to_block (&se->pre, tmp);
5509
5510 /* Update the array descriptors. */
5511 if (dimension)
5512 gfc_conv_descriptor_offset_set (&set_descriptor_block, se->expr, offset);
5513
5514 set_descriptor = gfc_finish_block (&set_descriptor_block);
5515 if (status != NULL_TREE)
5516 {
5517 cond = fold_build2_loc (input_location, EQ_EXPR,
5518 boolean_type_node, status,
5519 build_int_cst (TREE_TYPE (status), 0));
5520 gfc_add_expr_to_block (&se->pre,
5521 fold_build3_loc (input_location, COND_EXPR, void_type_node,
5522 gfc_likely (cond, PRED_FORTRAN_FAIL_ALLOC),
5523 set_descriptor,
5524 build_empty_stmt (input_location)));
5525 }
5526 else
5527 gfc_add_expr_to_block (&se->pre, set_descriptor);
5528
5529 if ((expr->ts.type == BT_DERIVED)
5530 && expr->ts.u.derived->attr.alloc_comp)
5531 {
5532 tmp = gfc_nullify_alloc_comp (expr->ts.u.derived, se->expr,
5533 ref->u.ar.as->rank);
5534 gfc_add_expr_to_block (&se->pre, tmp);
5535 }
5536
5537 return true;
5538 }
5539
5540
5541 /* Deallocate an array variable. Also used when an allocated variable goes
5542 out of scope. */
5543 /*GCC ARRAYS*/
5544
5545 tree
5546 gfc_array_deallocate (tree descriptor, tree pstat, tree errmsg, tree errlen,
5547 tree label_finish, gfc_expr* expr)
5548 {
5549 tree var;
5550 tree tmp;
5551 stmtblock_t block;
5552 bool coarray = gfc_is_coarray (expr);
5553
5554 gfc_start_block (&block);
5555
5556 /* Get a pointer to the data. */
5557 var = gfc_conv_descriptor_data_get (descriptor);
5558 STRIP_NOPS (var);
5559
5560 /* Parameter is the address of the data component. */
5561 tmp = gfc_deallocate_with_status (coarray ? descriptor : var, pstat, errmsg,
5562 errlen, label_finish, false, expr, coarray);
5563 gfc_add_expr_to_block (&block, tmp);
5564
5565 /* Zero the data pointer; only for coarrays an error can occur and then
5566 the allocation status may not be changed. */
5567 tmp = fold_build2_loc (input_location, MODIFY_EXPR, void_type_node,
5568 var, build_int_cst (TREE_TYPE (var), 0));
5569 if (pstat != NULL_TREE && coarray && flag_coarray == GFC_FCOARRAY_LIB)
5570 {
5571 tree cond;
5572 tree stat = build_fold_indirect_ref_loc (input_location, pstat);
5573
5574 cond = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node,
5575 stat, build_int_cst (TREE_TYPE (stat), 0));
5576 tmp = fold_build3_loc (input_location, COND_EXPR, void_type_node,
5577 cond, tmp, build_empty_stmt (input_location));
5578 }
5579
5580 gfc_add_expr_to_block (&block, tmp);
5581
5582 return gfc_finish_block (&block);
5583 }
5584
5585
5586 /* Create an array constructor from an initialization expression.
5587 We assume the frontend already did any expansions and conversions. */
5588
5589 tree
5590 gfc_conv_array_initializer (tree type, gfc_expr * expr)
5591 {
5592 gfc_constructor *c;
5593 tree tmp;
5594 offset_int wtmp;
5595 gfc_se se;
5596 tree index, range;
5597 vec<constructor_elt, va_gc> *v = NULL;
5598
5599 if (expr->expr_type == EXPR_VARIABLE
5600 && expr->symtree->n.sym->attr.flavor == FL_PARAMETER
5601 && expr->symtree->n.sym->value)
5602 expr = expr->symtree->n.sym->value;
5603
5604 switch (expr->expr_type)
5605 {
5606 case EXPR_CONSTANT:
5607 case EXPR_STRUCTURE:
5608 /* A single scalar or derived type value. Create an array with all
5609 elements equal to that value. */
5610 gfc_init_se (&se, NULL);
5611
5612 if (expr->expr_type == EXPR_CONSTANT)
5613 gfc_conv_constant (&se, expr);
5614 else
5615 gfc_conv_structure (&se, expr, 1);
5616
5617 wtmp = wi::to_offset (TYPE_MAX_VALUE (TYPE_DOMAIN (type))) + 1;
5618 /* This will probably eat buckets of memory for large arrays. */
5619 while (wtmp != 0)
5620 {
5621 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, se.expr);
5622 wtmp -= 1;
5623 }
5624 break;
5625
5626 case EXPR_ARRAY:
5627 /* Create a vector of all the elements. */
5628 for (c = gfc_constructor_first (expr->value.constructor);
5629 c; c = gfc_constructor_next (c))
5630 {
5631 if (c->iterator)
5632 {
5633 /* Problems occur when we get something like
5634 integer :: a(lots) = (/(i, i=1, lots)/) */
5635 gfc_fatal_error ("The number of elements in the array "
5636 "constructor at %L requires an increase of "
5637 "the allowed %d upper limit. See "
5638 "%<-fmax-array-constructor%> option",
5639 &expr->where, flag_max_array_constructor);
5640 return NULL_TREE;
5641 }
5642 if (mpz_cmp_si (c->offset, 0) != 0)
5643 index = gfc_conv_mpz_to_tree (c->offset, gfc_index_integer_kind);
5644 else
5645 index = NULL_TREE;
5646
5647 if (mpz_cmp_si (c->repeat, 1) > 0)
5648 {
5649 tree tmp1, tmp2;
5650 mpz_t maxval;
5651
5652 mpz_init (maxval);
5653 mpz_add (maxval, c->offset, c->repeat);
5654 mpz_sub_ui (maxval, maxval, 1);
5655 tmp2 = gfc_conv_mpz_to_tree (maxval, gfc_index_integer_kind);
5656 if (mpz_cmp_si (c->offset, 0) != 0)
5657 {
5658 mpz_add_ui (maxval, c->offset, 1);
5659 tmp1 = gfc_conv_mpz_to_tree (maxval, gfc_index_integer_kind);
5660 }
5661 else
5662 tmp1 = gfc_conv_mpz_to_tree (c->offset, gfc_index_integer_kind);
5663
5664 range = fold_build2 (RANGE_EXPR, gfc_array_index_type, tmp1, tmp2);
5665 mpz_clear (maxval);
5666 }
5667 else
5668 range = NULL;
5669
5670 gfc_init_se (&se, NULL);
5671 switch (c->expr->expr_type)
5672 {
5673 case EXPR_CONSTANT:
5674 gfc_conv_constant (&se, c->expr);
5675 break;
5676
5677 case EXPR_STRUCTURE:
5678 gfc_conv_structure (&se, c->expr, 1);
5679 break;
5680
5681 default:
5682 /* Catch those occasional beasts that do not simplify
5683 for one reason or another, assuming that if they are
5684 standard defying the frontend will catch them. */
5685 gfc_conv_expr (&se, c->expr);
5686 break;
5687 }
5688
5689 if (range == NULL_TREE)
5690 CONSTRUCTOR_APPEND_ELT (v, index, se.expr);
5691 else
5692 {
5693 if (index != NULL_TREE)
5694 CONSTRUCTOR_APPEND_ELT (v, index, se.expr);
5695 CONSTRUCTOR_APPEND_ELT (v, range, se.expr);
5696 }
5697 }
5698 break;
5699
5700 case EXPR_NULL:
5701 return gfc_build_null_descriptor (type);
5702
5703 default:
5704 gcc_unreachable ();
5705 }
5706
5707 /* Create a constructor from the list of elements. */
5708 tmp = build_constructor (type, v);
5709 TREE_CONSTANT (tmp) = 1;
5710 return tmp;
5711 }
5712
5713
5714 /* Generate code to evaluate non-constant coarray cobounds. */
5715
5716 void
5717 gfc_trans_array_cobounds (tree type, stmtblock_t * pblock,
5718 const gfc_symbol *sym)
5719 {
5720 int dim;
5721 tree ubound;
5722 tree lbound;
5723 gfc_se se;
5724 gfc_array_spec *as;
5725
5726 as = IS_CLASS_ARRAY (sym) ? CLASS_DATA (sym)->as : sym->as;
5727
5728 for (dim = as->rank; dim < as->rank + as->corank; dim++)
5729 {
5730 /* Evaluate non-constant array bound expressions. */
5731 lbound = GFC_TYPE_ARRAY_LBOUND (type, dim);
5732 if (as->lower[dim] && !INTEGER_CST_P (lbound))
5733 {
5734 gfc_init_se (&se, NULL);
5735 gfc_conv_expr_type (&se, as->lower[dim], gfc_array_index_type);
5736 gfc_add_block_to_block (pblock, &se.pre);
5737 gfc_add_modify (pblock, lbound, se.expr);
5738 }
5739 ubound = GFC_TYPE_ARRAY_UBOUND (type, dim);
5740 if (as->upper[dim] && !INTEGER_CST_P (ubound))
5741 {
5742 gfc_init_se (&se, NULL);
5743 gfc_conv_expr_type (&se, as->upper[dim], gfc_array_index_type);
5744 gfc_add_block_to_block (pblock, &se.pre);
5745 gfc_add_modify (pblock, ubound, se.expr);
5746 }
5747 }
5748 }
5749
5750
5751 /* Generate code to evaluate non-constant array bounds. Sets *poffset and
5752 returns the size (in elements) of the array. */
5753
5754 static tree
5755 gfc_trans_array_bounds (tree type, gfc_symbol * sym, tree * poffset,
5756 stmtblock_t * pblock)
5757 {
5758 gfc_array_spec *as;
5759 tree size;
5760 tree stride;
5761 tree offset;
5762 tree ubound;
5763 tree lbound;
5764 tree tmp;
5765 gfc_se se;
5766
5767 int dim;
5768
5769 as = IS_CLASS_ARRAY (sym) ? CLASS_DATA (sym)->as : sym->as;
5770
5771 size = gfc_index_one_node;
5772 offset = gfc_index_zero_node;
5773 for (dim = 0; dim < as->rank; dim++)
5774 {
5775 /* Evaluate non-constant array bound expressions. */
5776 lbound = GFC_TYPE_ARRAY_LBOUND (type, dim);
5777 if (as->lower[dim] && !INTEGER_CST_P (lbound))
5778 {
5779 gfc_init_se (&se, NULL);
5780 gfc_conv_expr_type (&se, as->lower[dim], gfc_array_index_type);
5781 gfc_add_block_to_block (pblock, &se.pre);
5782 gfc_add_modify (pblock, lbound, se.expr);
5783 }
5784 ubound = GFC_TYPE_ARRAY_UBOUND (type, dim);
5785 if (as->upper[dim] && !INTEGER_CST_P (ubound))
5786 {
5787 gfc_init_se (&se, NULL);
5788 gfc_conv_expr_type (&se, as->upper[dim], gfc_array_index_type);
5789 gfc_add_block_to_block (pblock, &se.pre);
5790 gfc_add_modify (pblock, ubound, se.expr);
5791 }
5792 /* The offset of this dimension. offset = offset - lbound * stride. */
5793 tmp = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
5794 lbound, size);
5795 offset = fold_build2_loc (input_location, MINUS_EXPR, gfc_array_index_type,
5796 offset, tmp);
5797
5798 /* The size of this dimension, and the stride of the next. */
5799 if (dim + 1 < as->rank)
5800 stride = GFC_TYPE_ARRAY_STRIDE (type, dim + 1);
5801 else
5802 stride = GFC_TYPE_ARRAY_SIZE (type);
5803
5804 if (ubound != NULL_TREE && !(stride && INTEGER_CST_P (stride)))
5805 {
5806 /* Calculate stride = size * (ubound + 1 - lbound). */
5807 tmp = fold_build2_loc (input_location, MINUS_EXPR,
5808 gfc_array_index_type,
5809 gfc_index_one_node, lbound);
5810 tmp = fold_build2_loc (input_location, PLUS_EXPR,
5811 gfc_array_index_type, ubound, tmp);
5812 tmp = fold_build2_loc (input_location, MULT_EXPR,
5813 gfc_array_index_type, size, tmp);
5814 if (stride)
5815 gfc_add_modify (pblock, stride, tmp);
5816 else
5817 stride = gfc_evaluate_now (tmp, pblock);
5818
5819 /* Make sure that negative size arrays are translated
5820 to being zero size. */
5821 tmp = fold_build2_loc (input_location, GE_EXPR, boolean_type_node,
5822 stride, gfc_index_zero_node);
5823 tmp = fold_build3_loc (input_location, COND_EXPR,
5824 gfc_array_index_type, tmp,
5825 stride, gfc_index_zero_node);
5826 gfc_add_modify (pblock, stride, tmp);
5827 }
5828
5829 size = stride;
5830 }
5831
5832 gfc_trans_array_cobounds (type, pblock, sym);
5833 gfc_trans_vla_type_sizes (sym, pblock);
5834
5835 *poffset = offset;
5836 return size;
5837 }
5838
5839
5840 /* Generate code to initialize/allocate an array variable. */
5841
5842 void
5843 gfc_trans_auto_array_allocation (tree decl, gfc_symbol * sym,
5844 gfc_wrapped_block * block)
5845 {
5846 stmtblock_t init;
5847 tree type;
5848 tree tmp = NULL_TREE;
5849 tree size;
5850 tree offset;
5851 tree space;
5852 tree inittree;
5853 bool onstack;
5854
5855 gcc_assert (!(sym->attr.pointer || sym->attr.allocatable));
5856
5857 /* Do nothing for USEd variables. */
5858 if (sym->attr.use_assoc)
5859 return;
5860
5861 type = TREE_TYPE (decl);
5862 gcc_assert (GFC_ARRAY_TYPE_P (type));
5863 onstack = TREE_CODE (type) != POINTER_TYPE;
5864
5865 gfc_init_block (&init);
5866
5867 /* Evaluate character string length. */
5868 if (sym->ts.type == BT_CHARACTER
5869 && onstack && !INTEGER_CST_P (sym->ts.u.cl->backend_decl))
5870 {
5871 gfc_conv_string_length (sym->ts.u.cl, NULL, &init);
5872
5873 gfc_trans_vla_type_sizes (sym, &init);
5874
5875 /* Emit a DECL_EXPR for this variable, which will cause the
5876 gimplifier to allocate storage, and all that good stuff. */
5877 tmp = fold_build1_loc (input_location, DECL_EXPR, TREE_TYPE (decl), decl);
5878 gfc_add_expr_to_block (&init, tmp);
5879 }
5880
5881 if (onstack)
5882 {
5883 gfc_add_init_cleanup (block, gfc_finish_block (&init), NULL_TREE);
5884 return;
5885 }
5886
5887 type = TREE_TYPE (type);
5888
5889 gcc_assert (!sym->attr.use_assoc);
5890 gcc_assert (!TREE_STATIC (decl));
5891 gcc_assert (!sym->module);
5892
5893 if (sym->ts.type == BT_CHARACTER
5894 && !INTEGER_CST_P (sym->ts.u.cl->backend_decl))
5895 gfc_conv_string_length (sym->ts.u.cl, NULL, &init);
5896
5897 size = gfc_trans_array_bounds (type, sym, &offset, &init);
5898
5899 /* Don't actually allocate space for Cray Pointees. */
5900 if (sym->attr.cray_pointee)
5901 {
5902 if (TREE_CODE (GFC_TYPE_ARRAY_OFFSET (type)) == VAR_DECL)
5903 gfc_add_modify (&init, GFC_TYPE_ARRAY_OFFSET (type), offset);
5904
5905 gfc_add_init_cleanup (block, gfc_finish_block (&init), NULL_TREE);
5906 return;
5907 }
5908
5909 if (flag_stack_arrays)
5910 {
5911 gcc_assert (TREE_CODE (TREE_TYPE (decl)) == POINTER_TYPE);
5912 space = build_decl (sym->declared_at.lb->location,
5913 VAR_DECL, create_tmp_var_name ("A"),
5914 TREE_TYPE (TREE_TYPE (decl)));
5915 gfc_trans_vla_type_sizes (sym, &init);
5916 }
5917 else
5918 {
5919 /* The size is the number of elements in the array, so multiply by the
5920 size of an element to get the total size. */
5921 tmp = TYPE_SIZE_UNIT (gfc_get_element_type (type));
5922 size = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
5923 size, fold_convert (gfc_array_index_type, tmp));
5924
5925 /* Allocate memory to hold the data. */
5926 tmp = gfc_call_malloc (&init, TREE_TYPE (decl), size);
5927 gfc_add_modify (&init, decl, tmp);
5928
5929 /* Free the temporary. */
5930 tmp = gfc_call_free (decl);
5931 space = NULL_TREE;
5932 }
5933
5934 /* Set offset of the array. */
5935 if (TREE_CODE (GFC_TYPE_ARRAY_OFFSET (type)) == VAR_DECL)
5936 gfc_add_modify (&init, GFC_TYPE_ARRAY_OFFSET (type), offset);
5937
5938 /* Automatic arrays should not have initializers. */
5939 gcc_assert (!sym->value);
5940
5941 inittree = gfc_finish_block (&init);
5942
5943 if (space)
5944 {
5945 tree addr;
5946 pushdecl (space);
5947
5948 /* Don't create new scope, emit the DECL_EXPR in exactly the scope
5949 where also space is located. */
5950 gfc_init_block (&init);
5951 tmp = fold_build1_loc (input_location, DECL_EXPR,
5952 TREE_TYPE (space), space);
5953 gfc_add_expr_to_block (&init, tmp);
5954 addr = fold_build1_loc (sym->declared_at.lb->location,
5955 ADDR_EXPR, TREE_TYPE (decl), space);
5956 gfc_add_modify (&init, decl, addr);
5957 gfc_add_init_cleanup (block, gfc_finish_block (&init), NULL_TREE);
5958 tmp = NULL_TREE;
5959 }
5960 gfc_add_init_cleanup (block, inittree, tmp);
5961 }
5962
5963
5964 /* Generate entry and exit code for g77 calling convention arrays. */
5965
5966 void
5967 gfc_trans_g77_array (gfc_symbol * sym, gfc_wrapped_block * block)
5968 {
5969 tree parm;
5970 tree type;
5971 locus loc;
5972 tree offset;
5973 tree tmp;
5974 tree stmt;
5975 stmtblock_t init;
5976
5977 gfc_save_backend_locus (&loc);
5978 gfc_set_backend_locus (&sym->declared_at);
5979
5980 /* Descriptor type. */
5981 parm = sym->backend_decl;
5982 type = TREE_TYPE (parm);
5983 gcc_assert (GFC_ARRAY_TYPE_P (type));
5984
5985 gfc_start_block (&init);
5986
5987 if (sym->ts.type == BT_CHARACTER
5988 && TREE_CODE (sym->ts.u.cl->backend_decl) == VAR_DECL)
5989 gfc_conv_string_length (sym->ts.u.cl, NULL, &init);
5990
5991 /* Evaluate the bounds of the array. */
5992 gfc_trans_array_bounds (type, sym, &offset, &init);
5993
5994 /* Set the offset. */
5995 if (TREE_CODE (GFC_TYPE_ARRAY_OFFSET (type)) == VAR_DECL)
5996 gfc_add_modify (&init, GFC_TYPE_ARRAY_OFFSET (type), offset);
5997
5998 /* Set the pointer itself if we aren't using the parameter directly. */
5999 if (TREE_CODE (parm) != PARM_DECL)
6000 {
6001 tmp = convert (TREE_TYPE (parm), GFC_DECL_SAVED_DESCRIPTOR (parm));
6002 gfc_add_modify (&init, parm, tmp);
6003 }
6004 stmt = gfc_finish_block (&init);
6005
6006 gfc_restore_backend_locus (&loc);
6007
6008 /* Add the initialization code to the start of the function. */
6009
6010 if (sym->attr.optional || sym->attr.not_always_present)
6011 {
6012 tmp = gfc_conv_expr_present (sym);
6013 stmt = build3_v (COND_EXPR, tmp, stmt, build_empty_stmt (input_location));
6014 }
6015
6016 gfc_add_init_cleanup (block, stmt, NULL_TREE);
6017 }
6018
6019
6020 /* Modify the descriptor of an array parameter so that it has the
6021 correct lower bound. Also move the upper bound accordingly.
6022 If the array is not packed, it will be copied into a temporary.
6023 For each dimension we set the new lower and upper bounds. Then we copy the
6024 stride and calculate the offset for this dimension. We also work out
6025 what the stride of a packed array would be, and see it the two match.
6026 If the array need repacking, we set the stride to the values we just
6027 calculated, recalculate the offset and copy the array data.
6028 Code is also added to copy the data back at the end of the function.
6029 */
6030
6031 void
6032 gfc_trans_dummy_array_bias (gfc_symbol * sym, tree tmpdesc,
6033 gfc_wrapped_block * block)
6034 {
6035 tree size;
6036 tree type;
6037 tree offset;
6038 locus loc;
6039 stmtblock_t init;
6040 tree stmtInit, stmtCleanup;
6041 tree lbound;
6042 tree ubound;
6043 tree dubound;
6044 tree dlbound;
6045 tree dumdesc;
6046 tree tmp;
6047 tree stride, stride2;
6048 tree stmt_packed;
6049 tree stmt_unpacked;
6050 tree partial;
6051 gfc_se se;
6052 int n;
6053 int checkparm;
6054 int no_repack;
6055 bool optional_arg;
6056 gfc_array_spec *as;
6057 bool is_classarray = IS_CLASS_ARRAY (sym);
6058
6059 /* Do nothing for pointer and allocatable arrays. */
6060 if ((sym->ts.type != BT_CLASS && sym->attr.pointer)
6061 || (sym->ts.type == BT_CLASS && CLASS_DATA (sym)->attr.class_pointer)
6062 || sym->attr.allocatable
6063 || (is_classarray && CLASS_DATA (sym)->attr.allocatable))
6064 return;
6065
6066 if (!is_classarray && sym->attr.dummy && gfc_is_nodesc_array (sym))
6067 {
6068 gfc_trans_g77_array (sym, block);
6069 return;
6070 }
6071
6072 gfc_save_backend_locus (&loc);
6073 gfc_set_backend_locus (&sym->declared_at);
6074
6075 /* Descriptor type. */
6076 type = TREE_TYPE (tmpdesc);
6077 gcc_assert (GFC_ARRAY_TYPE_P (type));
6078 dumdesc = GFC_DECL_SAVED_DESCRIPTOR (tmpdesc);
6079 if (is_classarray)
6080 /* For a class array the dummy array descriptor is in the _class
6081 component. */
6082 dumdesc = gfc_class_data_get (dumdesc);
6083 else
6084 dumdesc = build_fold_indirect_ref_loc (input_location, dumdesc);
6085 as = IS_CLASS_ARRAY (sym) ? CLASS_DATA (sym)->as : sym->as;
6086 gfc_start_block (&init);
6087
6088 if (sym->ts.type == BT_CHARACTER
6089 && TREE_CODE (sym->ts.u.cl->backend_decl) == VAR_DECL)
6090 gfc_conv_string_length (sym->ts.u.cl, NULL, &init);
6091
6092 checkparm = (as->type == AS_EXPLICIT
6093 && (gfc_option.rtcheck & GFC_RTCHECK_BOUNDS));
6094
6095 no_repack = !(GFC_DECL_PACKED_ARRAY (tmpdesc)
6096 || GFC_DECL_PARTIAL_PACKED_ARRAY (tmpdesc));
6097
6098 if (GFC_DECL_PARTIAL_PACKED_ARRAY (tmpdesc))
6099 {
6100 /* For non-constant shape arrays we only check if the first dimension
6101 is contiguous. Repacking higher dimensions wouldn't gain us
6102 anything as we still don't know the array stride. */
6103 partial = gfc_create_var (boolean_type_node, "partial");
6104 TREE_USED (partial) = 1;
6105 tmp = gfc_conv_descriptor_stride_get (dumdesc, gfc_rank_cst[0]);
6106 tmp = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node, tmp,
6107 gfc_index_one_node);
6108 gfc_add_modify (&init, partial, tmp);
6109 }
6110 else
6111 partial = NULL_TREE;
6112
6113 /* The naming of stmt_unpacked and stmt_packed may be counter-intuitive
6114 here, however I think it does the right thing. */
6115 if (no_repack)
6116 {
6117 /* Set the first stride. */
6118 stride = gfc_conv_descriptor_stride_get (dumdesc, gfc_rank_cst[0]);
6119 stride = gfc_evaluate_now (stride, &init);
6120
6121 tmp = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node,
6122 stride, gfc_index_zero_node);
6123 tmp = fold_build3_loc (input_location, COND_EXPR, gfc_array_index_type,
6124 tmp, gfc_index_one_node, stride);
6125 stride = GFC_TYPE_ARRAY_STRIDE (type, 0);
6126 gfc_add_modify (&init, stride, tmp);
6127
6128 /* Allow the user to disable array repacking. */
6129 stmt_unpacked = NULL_TREE;
6130 }
6131 else
6132 {
6133 gcc_assert (integer_onep (GFC_TYPE_ARRAY_STRIDE (type, 0)));
6134 /* A library call to repack the array if necessary. */
6135 tmp = GFC_DECL_SAVED_DESCRIPTOR (tmpdesc);
6136 stmt_unpacked = build_call_expr_loc (input_location,
6137 gfor_fndecl_in_pack, 1, tmp);
6138
6139 stride = gfc_index_one_node;
6140
6141 if (warn_array_temporaries)
6142 gfc_warning (OPT_Warray_temporaries,
6143 "Creating array temporary at %L", &loc);
6144 }
6145
6146 /* This is for the case where the array data is used directly without
6147 calling the repack function. */
6148 if (no_repack || partial != NULL_TREE)
6149 stmt_packed = gfc_conv_descriptor_data_get (dumdesc);
6150 else
6151 stmt_packed = NULL_TREE;
6152
6153 /* Assign the data pointer. */
6154 if (stmt_packed != NULL_TREE && stmt_unpacked != NULL_TREE)
6155 {
6156 /* Don't repack unknown shape arrays when the first stride is 1. */
6157 tmp = fold_build3_loc (input_location, COND_EXPR, TREE_TYPE (stmt_packed),
6158 partial, stmt_packed, stmt_unpacked);
6159 }
6160 else
6161 tmp = stmt_packed != NULL_TREE ? stmt_packed : stmt_unpacked;
6162 gfc_add_modify (&init, tmpdesc, fold_convert (type, tmp));
6163
6164 offset = gfc_index_zero_node;
6165 size = gfc_index_one_node;
6166
6167 /* Evaluate the bounds of the array. */
6168 for (n = 0; n < as->rank; n++)
6169 {
6170 if (checkparm || !as->upper[n])
6171 {
6172 /* Get the bounds of the actual parameter. */
6173 dubound = gfc_conv_descriptor_ubound_get (dumdesc, gfc_rank_cst[n]);
6174 dlbound = gfc_conv_descriptor_lbound_get (dumdesc, gfc_rank_cst[n]);
6175 }
6176 else
6177 {
6178 dubound = NULL_TREE;
6179 dlbound = NULL_TREE;
6180 }
6181
6182 lbound = GFC_TYPE_ARRAY_LBOUND (type, n);
6183 if (!INTEGER_CST_P (lbound))
6184 {
6185 gfc_init_se (&se, NULL);
6186 gfc_conv_expr_type (&se, as->lower[n],
6187 gfc_array_index_type);
6188 gfc_add_block_to_block (&init, &se.pre);
6189 gfc_add_modify (&init, lbound, se.expr);
6190 }
6191
6192 ubound = GFC_TYPE_ARRAY_UBOUND (type, n);
6193 /* Set the desired upper bound. */
6194 if (as->upper[n])
6195 {
6196 /* We know what we want the upper bound to be. */
6197 if (!INTEGER_CST_P (ubound))
6198 {
6199 gfc_init_se (&se, NULL);
6200 gfc_conv_expr_type (&se, as->upper[n],
6201 gfc_array_index_type);
6202 gfc_add_block_to_block (&init, &se.pre);
6203 gfc_add_modify (&init, ubound, se.expr);
6204 }
6205
6206 /* Check the sizes match. */
6207 if (checkparm)
6208 {
6209 /* Check (ubound(a) - lbound(a) == ubound(b) - lbound(b)). */
6210 char * msg;
6211 tree temp;
6212
6213 temp = fold_build2_loc (input_location, MINUS_EXPR,
6214 gfc_array_index_type, ubound, lbound);
6215 temp = fold_build2_loc (input_location, PLUS_EXPR,
6216 gfc_array_index_type,
6217 gfc_index_one_node, temp);
6218 stride2 = fold_build2_loc (input_location, MINUS_EXPR,
6219 gfc_array_index_type, dubound,
6220 dlbound);
6221 stride2 = fold_build2_loc (input_location, PLUS_EXPR,
6222 gfc_array_index_type,
6223 gfc_index_one_node, stride2);
6224 tmp = fold_build2_loc (input_location, NE_EXPR,
6225 gfc_array_index_type, temp, stride2);
6226 msg = xasprintf ("Dimension %d of array '%s' has extent "
6227 "%%ld instead of %%ld", n+1, sym->name);
6228
6229 gfc_trans_runtime_check (true, false, tmp, &init, &loc, msg,
6230 fold_convert (long_integer_type_node, temp),
6231 fold_convert (long_integer_type_node, stride2));
6232
6233 free (msg);
6234 }
6235 }
6236 else
6237 {
6238 /* For assumed shape arrays move the upper bound by the same amount
6239 as the lower bound. */
6240 tmp = fold_build2_loc (input_location, MINUS_EXPR,
6241 gfc_array_index_type, dubound, dlbound);
6242 tmp = fold_build2_loc (input_location, PLUS_EXPR,
6243 gfc_array_index_type, tmp, lbound);
6244 gfc_add_modify (&init, ubound, tmp);
6245 }
6246 /* The offset of this dimension. offset = offset - lbound * stride. */
6247 tmp = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
6248 lbound, stride);
6249 offset = fold_build2_loc (input_location, MINUS_EXPR,
6250 gfc_array_index_type, offset, tmp);
6251
6252 /* The size of this dimension, and the stride of the next. */
6253 if (n + 1 < as->rank)
6254 {
6255 stride = GFC_TYPE_ARRAY_STRIDE (type, n + 1);
6256
6257 if (no_repack || partial != NULL_TREE)
6258 stmt_unpacked =
6259 gfc_conv_descriptor_stride_get (dumdesc, gfc_rank_cst[n+1]);
6260
6261 /* Figure out the stride if not a known constant. */
6262 if (!INTEGER_CST_P (stride))
6263 {
6264 if (no_repack)
6265 stmt_packed = NULL_TREE;
6266 else
6267 {
6268 /* Calculate stride = size * (ubound + 1 - lbound). */
6269 tmp = fold_build2_loc (input_location, MINUS_EXPR,
6270 gfc_array_index_type,
6271 gfc_index_one_node, lbound);
6272 tmp = fold_build2_loc (input_location, PLUS_EXPR,
6273 gfc_array_index_type, ubound, tmp);
6274 size = fold_build2_loc (input_location, MULT_EXPR,
6275 gfc_array_index_type, size, tmp);
6276 stmt_packed = size;
6277 }
6278
6279 /* Assign the stride. */
6280 if (stmt_packed != NULL_TREE && stmt_unpacked != NULL_TREE)
6281 tmp = fold_build3_loc (input_location, COND_EXPR,
6282 gfc_array_index_type, partial,
6283 stmt_unpacked, stmt_packed);
6284 else
6285 tmp = (stmt_packed != NULL_TREE) ? stmt_packed : stmt_unpacked;
6286 gfc_add_modify (&init, stride, tmp);
6287 }
6288 }
6289 else
6290 {
6291 stride = GFC_TYPE_ARRAY_SIZE (type);
6292
6293 if (stride && !INTEGER_CST_P (stride))
6294 {
6295 /* Calculate size = stride * (ubound + 1 - lbound). */
6296 tmp = fold_build2_loc (input_location, MINUS_EXPR,
6297 gfc_array_index_type,
6298 gfc_index_one_node, lbound);
6299 tmp = fold_build2_loc (input_location, PLUS_EXPR,
6300 gfc_array_index_type,
6301 ubound, tmp);
6302 tmp = fold_build2_loc (input_location, MULT_EXPR,
6303 gfc_array_index_type,
6304 GFC_TYPE_ARRAY_STRIDE (type, n), tmp);
6305 gfc_add_modify (&init, stride, tmp);
6306 }
6307 }
6308 }
6309
6310 gfc_trans_array_cobounds (type, &init, sym);
6311
6312 /* Set the offset. */
6313 if (TREE_CODE (GFC_TYPE_ARRAY_OFFSET (type)) == VAR_DECL)
6314 gfc_add_modify (&init, GFC_TYPE_ARRAY_OFFSET (type), offset);
6315
6316 gfc_trans_vla_type_sizes (sym, &init);
6317
6318 stmtInit = gfc_finish_block (&init);
6319
6320 /* Only do the entry/initialization code if the arg is present. */
6321 dumdesc = GFC_DECL_SAVED_DESCRIPTOR (tmpdesc);
6322 optional_arg = (sym->attr.optional
6323 || (sym->ns->proc_name->attr.entry_master
6324 && sym->attr.dummy));
6325 if (optional_arg)
6326 {
6327 tmp = gfc_conv_expr_present (sym);
6328 stmtInit = build3_v (COND_EXPR, tmp, stmtInit,
6329 build_empty_stmt (input_location));
6330 }
6331
6332 /* Cleanup code. */
6333 if (no_repack)
6334 stmtCleanup = NULL_TREE;
6335 else
6336 {
6337 stmtblock_t cleanup;
6338 gfc_start_block (&cleanup);
6339
6340 if (sym->attr.intent != INTENT_IN)
6341 {
6342 /* Copy the data back. */
6343 tmp = build_call_expr_loc (input_location,
6344 gfor_fndecl_in_unpack, 2, dumdesc, tmpdesc);
6345 gfc_add_expr_to_block (&cleanup, tmp);
6346 }
6347
6348 /* Free the temporary. */
6349 tmp = gfc_call_free (tmpdesc);
6350 gfc_add_expr_to_block (&cleanup, tmp);
6351
6352 stmtCleanup = gfc_finish_block (&cleanup);
6353
6354 /* Only do the cleanup if the array was repacked. */
6355 tmp = build_fold_indirect_ref_loc (input_location, dumdesc);
6356 tmp = gfc_conv_descriptor_data_get (tmp);
6357 tmp = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
6358 tmp, tmpdesc);
6359 stmtCleanup = build3_v (COND_EXPR, tmp, stmtCleanup,
6360 build_empty_stmt (input_location));
6361
6362 if (optional_arg)
6363 {
6364 tmp = gfc_conv_expr_present (sym);
6365 stmtCleanup = build3_v (COND_EXPR, tmp, stmtCleanup,
6366 build_empty_stmt (input_location));
6367 }
6368 }
6369
6370 /* We don't need to free any memory allocated by internal_pack as it will
6371 be freed at the end of the function by pop_context. */
6372 gfc_add_init_cleanup (block, stmtInit, stmtCleanup);
6373
6374 gfc_restore_backend_locus (&loc);
6375 }
6376
6377
6378 /* Calculate the overall offset, including subreferences. */
6379 static void
6380 gfc_get_dataptr_offset (stmtblock_t *block, tree parm, tree desc, tree offset,
6381 bool subref, gfc_expr *expr)
6382 {
6383 tree tmp;
6384 tree field;
6385 tree stride;
6386 tree index;
6387 gfc_ref *ref;
6388 gfc_se start;
6389 int n;
6390
6391 /* If offset is NULL and this is not a subreferenced array, there is
6392 nothing to do. */
6393 if (offset == NULL_TREE)
6394 {
6395 if (subref)
6396 offset = gfc_index_zero_node;
6397 else
6398 return;
6399 }
6400
6401 tmp = build_array_ref (desc, offset, NULL, NULL);
6402
6403 /* Offset the data pointer for pointer assignments from arrays with
6404 subreferences; e.g. my_integer => my_type(:)%integer_component. */
6405 if (subref)
6406 {
6407 /* Go past the array reference. */
6408 for (ref = expr->ref; ref; ref = ref->next)
6409 if (ref->type == REF_ARRAY &&
6410 ref->u.ar.type != AR_ELEMENT)
6411 {
6412 ref = ref->next;
6413 break;
6414 }
6415
6416 /* Calculate the offset for each subsequent subreference. */
6417 for (; ref; ref = ref->next)
6418 {
6419 switch (ref->type)
6420 {
6421 case REF_COMPONENT:
6422 field = ref->u.c.component->backend_decl;
6423 gcc_assert (field && TREE_CODE (field) == FIELD_DECL);
6424 tmp = fold_build3_loc (input_location, COMPONENT_REF,
6425 TREE_TYPE (field),
6426 tmp, field, NULL_TREE);
6427 break;
6428
6429 case REF_SUBSTRING:
6430 gcc_assert (TREE_CODE (TREE_TYPE (tmp)) == ARRAY_TYPE);
6431 gfc_init_se (&start, NULL);
6432 gfc_conv_expr_type (&start, ref->u.ss.start, gfc_charlen_type_node);
6433 gfc_add_block_to_block (block, &start.pre);
6434 tmp = gfc_build_array_ref (tmp, start.expr, NULL);
6435 break;
6436
6437 case REF_ARRAY:
6438 gcc_assert (TREE_CODE (TREE_TYPE (tmp)) == ARRAY_TYPE
6439 && ref->u.ar.type == AR_ELEMENT);
6440
6441 /* TODO - Add bounds checking. */
6442 stride = gfc_index_one_node;
6443 index = gfc_index_zero_node;
6444 for (n = 0; n < ref->u.ar.dimen; n++)
6445 {
6446 tree itmp;
6447 tree jtmp;
6448
6449 /* Update the index. */
6450 gfc_init_se (&start, NULL);
6451 gfc_conv_expr_type (&start, ref->u.ar.start[n], gfc_array_index_type);
6452 itmp = gfc_evaluate_now (start.expr, block);
6453 gfc_init_se (&start, NULL);
6454 gfc_conv_expr_type (&start, ref->u.ar.as->lower[n], gfc_array_index_type);
6455 jtmp = gfc_evaluate_now (start.expr, block);
6456 itmp = fold_build2_loc (input_location, MINUS_EXPR,
6457 gfc_array_index_type, itmp, jtmp);
6458 itmp = fold_build2_loc (input_location, MULT_EXPR,
6459 gfc_array_index_type, itmp, stride);
6460 index = fold_build2_loc (input_location, PLUS_EXPR,
6461 gfc_array_index_type, itmp, index);
6462 index = gfc_evaluate_now (index, block);
6463
6464 /* Update the stride. */
6465 gfc_init_se (&start, NULL);
6466 gfc_conv_expr_type (&start, ref->u.ar.as->upper[n], gfc_array_index_type);
6467 itmp = fold_build2_loc (input_location, MINUS_EXPR,
6468 gfc_array_index_type, start.expr,
6469 jtmp);
6470 itmp = fold_build2_loc (input_location, PLUS_EXPR,
6471 gfc_array_index_type,
6472 gfc_index_one_node, itmp);
6473 stride = fold_build2_loc (input_location, MULT_EXPR,
6474 gfc_array_index_type, stride, itmp);
6475 stride = gfc_evaluate_now (stride, block);
6476 }
6477
6478 /* Apply the index to obtain the array element. */
6479 tmp = gfc_build_array_ref (tmp, index, NULL);
6480 break;
6481
6482 default:
6483 gcc_unreachable ();
6484 break;
6485 }
6486 }
6487 }
6488
6489 /* Set the target data pointer. */
6490 offset = gfc_build_addr_expr (gfc_array_dataptr_type (desc), tmp);
6491 gfc_conv_descriptor_data_set (block, parm, offset);
6492 }
6493
6494
6495 /* gfc_conv_expr_descriptor needs the string length an expression
6496 so that the size of the temporary can be obtained. This is done
6497 by adding up the string lengths of all the elements in the
6498 expression. Function with non-constant expressions have their
6499 string lengths mapped onto the actual arguments using the
6500 interface mapping machinery in trans-expr.c. */
6501 static void
6502 get_array_charlen (gfc_expr *expr, gfc_se *se)
6503 {
6504 gfc_interface_mapping mapping;
6505 gfc_formal_arglist *formal;
6506 gfc_actual_arglist *arg;
6507 gfc_se tse;
6508
6509 if (expr->ts.u.cl->length
6510 && gfc_is_constant_expr (expr->ts.u.cl->length))
6511 {
6512 if (!expr->ts.u.cl->backend_decl)
6513 gfc_conv_string_length (expr->ts.u.cl, expr, &se->pre);
6514 return;
6515 }
6516
6517 switch (expr->expr_type)
6518 {
6519 case EXPR_OP:
6520 get_array_charlen (expr->value.op.op1, se);
6521
6522 /* For parentheses the expression ts.u.cl is identical. */
6523 if (expr->value.op.op == INTRINSIC_PARENTHESES)
6524 return;
6525
6526 expr->ts.u.cl->backend_decl =
6527 gfc_create_var (gfc_charlen_type_node, "sln");
6528
6529 if (expr->value.op.op2)
6530 {
6531 get_array_charlen (expr->value.op.op2, se);
6532
6533 gcc_assert (expr->value.op.op == INTRINSIC_CONCAT);
6534
6535 /* Add the string lengths and assign them to the expression
6536 string length backend declaration. */
6537 gfc_add_modify (&se->pre, expr->ts.u.cl->backend_decl,
6538 fold_build2_loc (input_location, PLUS_EXPR,
6539 gfc_charlen_type_node,
6540 expr->value.op.op1->ts.u.cl->backend_decl,
6541 expr->value.op.op2->ts.u.cl->backend_decl));
6542 }
6543 else
6544 gfc_add_modify (&se->pre, expr->ts.u.cl->backend_decl,
6545 expr->value.op.op1->ts.u.cl->backend_decl);
6546 break;
6547
6548 case EXPR_FUNCTION:
6549 if (expr->value.function.esym == NULL
6550 || expr->ts.u.cl->length->expr_type == EXPR_CONSTANT)
6551 {
6552 gfc_conv_string_length (expr->ts.u.cl, expr, &se->pre);
6553 break;
6554 }
6555
6556 /* Map expressions involving the dummy arguments onto the actual
6557 argument expressions. */
6558 gfc_init_interface_mapping (&mapping);
6559 formal = gfc_sym_get_dummy_args (expr->symtree->n.sym);
6560 arg = expr->value.function.actual;
6561
6562 /* Set se = NULL in the calls to the interface mapping, to suppress any
6563 backend stuff. */
6564 for (; arg != NULL; arg = arg->next, formal = formal ? formal->next : NULL)
6565 {
6566 if (!arg->expr)
6567 continue;
6568 if (formal->sym)
6569 gfc_add_interface_mapping (&mapping, formal->sym, NULL, arg->expr);
6570 }
6571
6572 gfc_init_se (&tse, NULL);
6573
6574 /* Build the expression for the character length and convert it. */
6575 gfc_apply_interface_mapping (&mapping, &tse, expr->ts.u.cl->length);
6576
6577 gfc_add_block_to_block (&se->pre, &tse.pre);
6578 gfc_add_block_to_block (&se->post, &tse.post);
6579 tse.expr = fold_convert (gfc_charlen_type_node, tse.expr);
6580 tse.expr = fold_build2_loc (input_location, MAX_EXPR,
6581 gfc_charlen_type_node, tse.expr,
6582 build_int_cst (gfc_charlen_type_node, 0));
6583 expr->ts.u.cl->backend_decl = tse.expr;
6584 gfc_free_interface_mapping (&mapping);
6585 break;
6586
6587 default:
6588 gfc_conv_string_length (expr->ts.u.cl, expr, &se->pre);
6589 break;
6590 }
6591 }
6592
6593
6594 /* Helper function to check dimensions. */
6595 static bool
6596 transposed_dims (gfc_ss *ss)
6597 {
6598 int n;
6599
6600 for (n = 0; n < ss->dimen; n++)
6601 if (ss->dim[n] != n)
6602 return true;
6603 return false;
6604 }
6605
6606
6607 /* Convert the last ref of a scalar coarray from an AR_ELEMENT to an
6608 AR_FULL, suitable for the scalarizer. */
6609
6610 static gfc_ss *
6611 walk_coarray (gfc_expr *e)
6612 {
6613 gfc_ss *ss;
6614
6615 gcc_assert (gfc_get_corank (e) > 0);
6616
6617 ss = gfc_walk_expr (e);
6618
6619 /* Fix scalar coarray. */
6620 if (ss == gfc_ss_terminator)
6621 {
6622 gfc_ref *ref;
6623
6624 ref = e->ref;
6625 while (ref)
6626 {
6627 if (ref->type == REF_ARRAY
6628 && ref->u.ar.codimen > 0)
6629 break;
6630
6631 ref = ref->next;
6632 }
6633
6634 gcc_assert (ref != NULL);
6635 if (ref->u.ar.type == AR_ELEMENT)
6636 ref->u.ar.type = AR_SECTION;
6637 ss = gfc_reverse_ss (gfc_walk_array_ref (ss, e, ref));
6638 }
6639
6640 return ss;
6641 }
6642
6643
6644 /* Convert an array for passing as an actual argument. Expressions and
6645 vector subscripts are evaluated and stored in a temporary, which is then
6646 passed. For whole arrays the descriptor is passed. For array sections
6647 a modified copy of the descriptor is passed, but using the original data.
6648
6649 This function is also used for array pointer assignments, and there
6650 are three cases:
6651
6652 - se->want_pointer && !se->direct_byref
6653 EXPR is an actual argument. On exit, se->expr contains a
6654 pointer to the array descriptor.
6655
6656 - !se->want_pointer && !se->direct_byref
6657 EXPR is an actual argument to an intrinsic function or the
6658 left-hand side of a pointer assignment. On exit, se->expr
6659 contains the descriptor for EXPR.
6660
6661 - !se->want_pointer && se->direct_byref
6662 EXPR is the right-hand side of a pointer assignment and
6663 se->expr is the descriptor for the previously-evaluated
6664 left-hand side. The function creates an assignment from
6665 EXPR to se->expr.
6666
6667
6668 The se->force_tmp flag disables the non-copying descriptor optimization
6669 that is used for transpose. It may be used in cases where there is an
6670 alias between the transpose argument and another argument in the same
6671 function call. */
6672
6673 void
6674 gfc_conv_expr_descriptor (gfc_se *se, gfc_expr *expr)
6675 {
6676 gfc_ss *ss;
6677 gfc_ss_type ss_type;
6678 gfc_ss_info *ss_info;
6679 gfc_loopinfo loop;
6680 gfc_array_info *info;
6681 int need_tmp;
6682 int n;
6683 tree tmp;
6684 tree desc;
6685 stmtblock_t block;
6686 tree start;
6687 tree offset;
6688 int full;
6689 bool subref_array_target = false;
6690 gfc_expr *arg, *ss_expr;
6691
6692 if (se->want_coarray)
6693 ss = walk_coarray (expr);
6694 else
6695 ss = gfc_walk_expr (expr);
6696
6697 gcc_assert (ss != NULL);
6698 gcc_assert (ss != gfc_ss_terminator);
6699
6700 ss_info = ss->info;
6701 ss_type = ss_info->type;
6702 ss_expr = ss_info->expr;
6703
6704 /* Special case: TRANSPOSE which needs no temporary. */
6705 while (expr->expr_type == EXPR_FUNCTION && expr->value.function.isym
6706 && NULL != (arg = gfc_get_noncopying_intrinsic_argument (expr)))
6707 {
6708 /* This is a call to transpose which has already been handled by the
6709 scalarizer, so that we just need to get its argument's descriptor. */
6710 gcc_assert (expr->value.function.isym->id == GFC_ISYM_TRANSPOSE);
6711 expr = expr->value.function.actual->expr;
6712 }
6713
6714 /* Special case things we know we can pass easily. */
6715 switch (expr->expr_type)
6716 {
6717 case EXPR_VARIABLE:
6718 /* If we have a linear array section, we can pass it directly.
6719 Otherwise we need to copy it into a temporary. */
6720
6721 gcc_assert (ss_type == GFC_SS_SECTION);
6722 gcc_assert (ss_expr == expr);
6723 info = &ss_info->data.array;
6724
6725 /* Get the descriptor for the array. */
6726 gfc_conv_ss_descriptor (&se->pre, ss, 0);
6727 desc = info->descriptor;
6728
6729 subref_array_target = se->direct_byref && is_subref_array (expr);
6730 need_tmp = gfc_ref_needs_temporary_p (expr->ref)
6731 && !subref_array_target;
6732
6733 if (se->force_tmp)
6734 need_tmp = 1;
6735
6736 if (need_tmp)
6737 full = 0;
6738 else if (GFC_ARRAY_TYPE_P (TREE_TYPE (desc)))
6739 {
6740 /* Create a new descriptor if the array doesn't have one. */
6741 full = 0;
6742 }
6743 else if (info->ref->u.ar.type == AR_FULL || se->descriptor_only)
6744 full = 1;
6745 else if (se->direct_byref)
6746 full = 0;
6747 else
6748 full = gfc_full_array_ref_p (info->ref, NULL);
6749
6750 if (full && !transposed_dims (ss))
6751 {
6752 if (se->direct_byref && !se->byref_noassign)
6753 {
6754 /* Copy the descriptor for pointer assignments. */
6755 gfc_add_modify (&se->pre, se->expr, desc);
6756
6757 /* Add any offsets from subreferences. */
6758 gfc_get_dataptr_offset (&se->pre, se->expr, desc, NULL_TREE,
6759 subref_array_target, expr);
6760 }
6761 else if (se->want_pointer)
6762 {
6763 /* We pass full arrays directly. This means that pointers and
6764 allocatable arrays should also work. */
6765 se->expr = gfc_build_addr_expr (NULL_TREE, desc);
6766 }
6767 else
6768 {
6769 se->expr = desc;
6770 }
6771
6772 if (expr->ts.type == BT_CHARACTER)
6773 se->string_length = gfc_get_expr_charlen (expr);
6774
6775 gfc_free_ss_chain (ss);
6776 return;
6777 }
6778 break;
6779
6780 case EXPR_FUNCTION:
6781 /* A transformational function return value will be a temporary
6782 array descriptor. We still need to go through the scalarizer
6783 to create the descriptor. Elemental functions are handled as
6784 arbitrary expressions, i.e. copy to a temporary. */
6785
6786 if (se->direct_byref)
6787 {
6788 gcc_assert (ss_type == GFC_SS_FUNCTION && ss_expr == expr);
6789
6790 /* For pointer assignments pass the descriptor directly. */
6791 if (se->ss == NULL)
6792 se->ss = ss;
6793 else
6794 gcc_assert (se->ss == ss);
6795 se->expr = gfc_build_addr_expr (NULL_TREE, se->expr);
6796 gfc_conv_expr (se, expr);
6797 gfc_free_ss_chain (ss);
6798 return;
6799 }
6800
6801 if (ss_expr != expr || ss_type != GFC_SS_FUNCTION)
6802 {
6803 if (ss_expr != expr)
6804 /* Elemental function. */
6805 gcc_assert ((expr->value.function.esym != NULL
6806 && expr->value.function.esym->attr.elemental)
6807 || (expr->value.function.isym != NULL
6808 && expr->value.function.isym->elemental)
6809 || gfc_inline_intrinsic_function_p (expr));
6810 else
6811 gcc_assert (ss_type == GFC_SS_INTRINSIC);
6812
6813 need_tmp = 1;
6814 if (expr->ts.type == BT_CHARACTER
6815 && expr->ts.u.cl->length->expr_type != EXPR_CONSTANT)
6816 get_array_charlen (expr, se);
6817
6818 info = NULL;
6819 }
6820 else
6821 {
6822 /* Transformational function. */
6823 info = &ss_info->data.array;
6824 need_tmp = 0;
6825 }
6826 break;
6827
6828 case EXPR_ARRAY:
6829 /* Constant array constructors don't need a temporary. */
6830 if (ss_type == GFC_SS_CONSTRUCTOR
6831 && expr->ts.type != BT_CHARACTER
6832 && gfc_constant_array_constructor_p (expr->value.constructor))
6833 {
6834 need_tmp = 0;
6835 info = &ss_info->data.array;
6836 }
6837 else
6838 {
6839 need_tmp = 1;
6840 info = NULL;
6841 }
6842 break;
6843
6844 default:
6845 /* Something complicated. Copy it into a temporary. */
6846 need_tmp = 1;
6847 info = NULL;
6848 break;
6849 }
6850
6851 /* If we are creating a temporary, we don't need to bother about aliases
6852 anymore. */
6853 if (need_tmp)
6854 se->force_tmp = 0;
6855
6856 gfc_init_loopinfo (&loop);
6857
6858 /* Associate the SS with the loop. */
6859 gfc_add_ss_to_loop (&loop, ss);
6860
6861 /* Tell the scalarizer not to bother creating loop variables, etc. */
6862 if (!need_tmp)
6863 loop.array_parameter = 1;
6864 else
6865 /* The right-hand side of a pointer assignment mustn't use a temporary. */
6866 gcc_assert (!se->direct_byref);
6867
6868 /* Setup the scalarizing loops and bounds. */
6869 gfc_conv_ss_startstride (&loop);
6870
6871 if (need_tmp)
6872 {
6873 if (expr->ts.type == BT_CHARACTER && !expr->ts.u.cl->backend_decl)
6874 get_array_charlen (expr, se);
6875
6876 /* Tell the scalarizer to make a temporary. */
6877 loop.temp_ss = gfc_get_temp_ss (gfc_typenode_for_spec (&expr->ts),
6878 ((expr->ts.type == BT_CHARACTER)
6879 ? expr->ts.u.cl->backend_decl
6880 : NULL),
6881 loop.dimen);
6882
6883 se->string_length = loop.temp_ss->info->string_length;
6884 gcc_assert (loop.temp_ss->dimen == loop.dimen);
6885 gfc_add_ss_to_loop (&loop, loop.temp_ss);
6886 }
6887
6888 gfc_conv_loop_setup (&loop, & expr->where);
6889
6890 if (need_tmp)
6891 {
6892 /* Copy into a temporary and pass that. We don't need to copy the data
6893 back because expressions and vector subscripts must be INTENT_IN. */
6894 /* TODO: Optimize passing function return values. */
6895 gfc_se lse;
6896 gfc_se rse;
6897
6898 /* Start the copying loops. */
6899 gfc_mark_ss_chain_used (loop.temp_ss, 1);
6900 gfc_mark_ss_chain_used (ss, 1);
6901 gfc_start_scalarized_body (&loop, &block);
6902
6903 /* Copy each data element. */
6904 gfc_init_se (&lse, NULL);
6905 gfc_copy_loopinfo_to_se (&lse, &loop);
6906 gfc_init_se (&rse, NULL);
6907 gfc_copy_loopinfo_to_se (&rse, &loop);
6908
6909 lse.ss = loop.temp_ss;
6910 rse.ss = ss;
6911
6912 gfc_conv_scalarized_array_ref (&lse, NULL);
6913 if (expr->ts.type == BT_CHARACTER)
6914 {
6915 gfc_conv_expr (&rse, expr);
6916 if (POINTER_TYPE_P (TREE_TYPE (rse.expr)))
6917 rse.expr = build_fold_indirect_ref_loc (input_location,
6918 rse.expr);
6919 }
6920 else
6921 gfc_conv_expr_val (&rse, expr);
6922
6923 gfc_add_block_to_block (&block, &rse.pre);
6924 gfc_add_block_to_block (&block, &lse.pre);
6925
6926 lse.string_length = rse.string_length;
6927 tmp = gfc_trans_scalar_assign (&lse, &rse, expr->ts,
6928 expr->expr_type == EXPR_VARIABLE
6929 || expr->expr_type == EXPR_ARRAY, false);
6930 gfc_add_expr_to_block (&block, tmp);
6931
6932 /* Finish the copying loops. */
6933 gfc_trans_scalarizing_loops (&loop, &block);
6934
6935 desc = loop.temp_ss->info->data.array.descriptor;
6936 }
6937 else if (expr->expr_type == EXPR_FUNCTION && !transposed_dims (ss))
6938 {
6939 desc = info->descriptor;
6940 se->string_length = ss_info->string_length;
6941 }
6942 else
6943 {
6944 /* We pass sections without copying to a temporary. Make a new
6945 descriptor and point it at the section we want. The loop variable
6946 limits will be the limits of the section.
6947 A function may decide to repack the array to speed up access, but
6948 we're not bothered about that here. */
6949 int dim, ndim, codim;
6950 tree parm;
6951 tree parmtype;
6952 tree stride;
6953 tree from;
6954 tree to;
6955 tree base;
6956 bool onebased = false, rank_remap;
6957
6958 ndim = info->ref ? info->ref->u.ar.dimen : ss->dimen;
6959 rank_remap = ss->dimen < ndim;
6960
6961 if (se->want_coarray)
6962 {
6963 gfc_array_ref *ar = &info->ref->u.ar;
6964
6965 codim = gfc_get_corank (expr);
6966 for (n = 0; n < codim - 1; n++)
6967 {
6968 /* Make sure we are not lost somehow. */
6969 gcc_assert (ar->dimen_type[n + ndim] == DIMEN_THIS_IMAGE);
6970
6971 /* Make sure the call to gfc_conv_section_startstride won't
6972 generate unnecessary code to calculate stride. */
6973 gcc_assert (ar->stride[n + ndim] == NULL);
6974
6975 gfc_conv_section_startstride (&loop.pre, ss, n + ndim);
6976 loop.from[n + loop.dimen] = info->start[n + ndim];
6977 loop.to[n + loop.dimen] = info->end[n + ndim];
6978 }
6979
6980 gcc_assert (n == codim - 1);
6981 evaluate_bound (&loop.pre, info->start, ar->start,
6982 info->descriptor, n + ndim, true,
6983 ar->as->type == AS_DEFERRED);
6984 loop.from[n + loop.dimen] = info->start[n + ndim];
6985 }
6986 else
6987 codim = 0;
6988
6989 /* Set the string_length for a character array. */
6990 if (expr->ts.type == BT_CHARACTER)
6991 se->string_length = gfc_get_expr_charlen (expr);
6992
6993 /* If we have an array section or are assigning make sure that
6994 the lower bound is 1. References to the full
6995 array should otherwise keep the original bounds. */
6996 if ((!info->ref || info->ref->u.ar.type != AR_FULL) && !se->want_pointer)
6997 for (dim = 0; dim < loop.dimen; dim++)
6998 if (!integer_onep (loop.from[dim]))
6999 {
7000 tmp = fold_build2_loc (input_location, MINUS_EXPR,
7001 gfc_array_index_type, gfc_index_one_node,
7002 loop.from[dim]);
7003 loop.to[dim] = fold_build2_loc (input_location, PLUS_EXPR,
7004 gfc_array_index_type,
7005 loop.to[dim], tmp);
7006 loop.from[dim] = gfc_index_one_node;
7007 }
7008
7009 desc = info->descriptor;
7010 if (se->direct_byref && !se->byref_noassign)
7011 {
7012 /* For pointer assignments we fill in the destination. */
7013 parm = se->expr;
7014 parmtype = TREE_TYPE (parm);
7015 }
7016 else
7017 {
7018 /* Otherwise make a new one. */
7019 parmtype = gfc_get_element_type (TREE_TYPE (desc));
7020 parmtype = gfc_get_array_type_bounds (parmtype, loop.dimen, codim,
7021 loop.from, loop.to, 0,
7022 GFC_ARRAY_UNKNOWN, false);
7023 parm = gfc_create_var (parmtype, "parm");
7024 }
7025
7026 offset = gfc_index_zero_node;
7027
7028 /* The following can be somewhat confusing. We have two
7029 descriptors, a new one and the original array.
7030 {parm, parmtype, dim} refer to the new one.
7031 {desc, type, n, loop} refer to the original, which maybe
7032 a descriptorless array.
7033 The bounds of the scalarization are the bounds of the section.
7034 We don't have to worry about numeric overflows when calculating
7035 the offsets because all elements are within the array data. */
7036
7037 /* Set the dtype. */
7038 tmp = gfc_conv_descriptor_dtype (parm);
7039 gfc_add_modify (&loop.pre, tmp, gfc_get_dtype (parmtype));
7040
7041 /* Set offset for assignments to pointer only to zero if it is not
7042 the full array. */
7043 if ((se->direct_byref || se->use_offset)
7044 && ((info->ref && info->ref->u.ar.type != AR_FULL)
7045 || (expr->expr_type == EXPR_ARRAY && se->use_offset)))
7046 base = gfc_index_zero_node;
7047 else if (GFC_ARRAY_TYPE_P (TREE_TYPE (desc)))
7048 base = gfc_evaluate_now (gfc_conv_array_offset (desc), &loop.pre);
7049 else
7050 base = NULL_TREE;
7051
7052 for (n = 0; n < ndim; n++)
7053 {
7054 stride = gfc_conv_array_stride (desc, n);
7055
7056 /* Work out the offset. */
7057 if (info->ref
7058 && info->ref->u.ar.dimen_type[n] == DIMEN_ELEMENT)
7059 {
7060 gcc_assert (info->subscript[n]
7061 && info->subscript[n]->info->type == GFC_SS_SCALAR);
7062 start = info->subscript[n]->info->data.scalar.value;
7063 }
7064 else
7065 {
7066 /* Evaluate and remember the start of the section. */
7067 start = info->start[n];
7068 stride = gfc_evaluate_now (stride, &loop.pre);
7069 }
7070
7071 tmp = gfc_conv_array_lbound (desc, n);
7072 tmp = fold_build2_loc (input_location, MINUS_EXPR, TREE_TYPE (tmp),
7073 start, tmp);
7074 tmp = fold_build2_loc (input_location, MULT_EXPR, TREE_TYPE (tmp),
7075 tmp, stride);
7076 offset = fold_build2_loc (input_location, PLUS_EXPR, TREE_TYPE (tmp),
7077 offset, tmp);
7078
7079 if (info->ref
7080 && info->ref->u.ar.dimen_type[n] == DIMEN_ELEMENT)
7081 {
7082 /* For elemental dimensions, we only need the offset. */
7083 continue;
7084 }
7085
7086 /* Vector subscripts need copying and are handled elsewhere. */
7087 if (info->ref)
7088 gcc_assert (info->ref->u.ar.dimen_type[n] == DIMEN_RANGE);
7089
7090 /* look for the corresponding scalarizer dimension: dim. */
7091 for (dim = 0; dim < ndim; dim++)
7092 if (ss->dim[dim] == n)
7093 break;
7094
7095 /* loop exited early: the DIM being looked for has been found. */
7096 gcc_assert (dim < ndim);
7097
7098 /* Set the new lower bound. */
7099 from = loop.from[dim];
7100 to = loop.to[dim];
7101
7102 onebased = integer_onep (from);
7103 gfc_conv_descriptor_lbound_set (&loop.pre, parm,
7104 gfc_rank_cst[dim], from);
7105
7106 /* Set the new upper bound. */
7107 gfc_conv_descriptor_ubound_set (&loop.pre, parm,
7108 gfc_rank_cst[dim], to);
7109
7110 /* Multiply the stride by the section stride to get the
7111 total stride. */
7112 stride = fold_build2_loc (input_location, MULT_EXPR,
7113 gfc_array_index_type,
7114 stride, info->stride[n]);
7115
7116 if (se->direct_byref
7117 && ((info->ref && info->ref->u.ar.type != AR_FULL)
7118 || (expr->expr_type == EXPR_ARRAY && se->use_offset)))
7119 {
7120 base = fold_build2_loc (input_location, MINUS_EXPR,
7121 TREE_TYPE (base), base, stride);
7122 }
7123 else if (GFC_ARRAY_TYPE_P (TREE_TYPE (desc)) || se->use_offset)
7124 {
7125 bool toonebased;
7126 tmp = gfc_conv_array_lbound (desc, n);
7127 toonebased = integer_onep (tmp);
7128 // lb(arr) - from (- start + 1)
7129 tmp = fold_build2_loc (input_location, MINUS_EXPR,
7130 TREE_TYPE (base), tmp, from);
7131 if (onebased && toonebased)
7132 {
7133 tmp = fold_build2_loc (input_location, MINUS_EXPR,
7134 TREE_TYPE (base), tmp, start);
7135 tmp = fold_build2_loc (input_location, PLUS_EXPR,
7136 TREE_TYPE (base), tmp,
7137 gfc_index_one_node);
7138 }
7139 tmp = fold_build2_loc (input_location, MULT_EXPR,
7140 TREE_TYPE (base), tmp,
7141 gfc_conv_array_stride (desc, n));
7142 base = fold_build2_loc (input_location, PLUS_EXPR,
7143 TREE_TYPE (base), tmp, base);
7144 }
7145
7146 /* Store the new stride. */
7147 gfc_conv_descriptor_stride_set (&loop.pre, parm,
7148 gfc_rank_cst[dim], stride);
7149 }
7150
7151 for (n = loop.dimen; n < loop.dimen + codim; n++)
7152 {
7153 from = loop.from[n];
7154 to = loop.to[n];
7155 gfc_conv_descriptor_lbound_set (&loop.pre, parm,
7156 gfc_rank_cst[n], from);
7157 if (n < loop.dimen + codim - 1)
7158 gfc_conv_descriptor_ubound_set (&loop.pre, parm,
7159 gfc_rank_cst[n], to);
7160 }
7161
7162 if (se->data_not_needed)
7163 gfc_conv_descriptor_data_set (&loop.pre, parm,
7164 gfc_index_zero_node);
7165 else
7166 /* Point the data pointer at the 1st element in the section. */
7167 gfc_get_dataptr_offset (&loop.pre, parm, desc, offset,
7168 subref_array_target, expr);
7169
7170 /* Force the offset to be -1, when the lower bound of the highest
7171 dimension is one and the symbol is present and is not a
7172 pointer/allocatable or associated. */
7173 if (((se->direct_byref || GFC_ARRAY_TYPE_P (TREE_TYPE (desc)))
7174 && !se->data_not_needed)
7175 || (se->use_offset && base != NULL_TREE))
7176 {
7177 /* Set the offset depending on base. */
7178 tmp = rank_remap && !se->direct_byref ?
7179 fold_build2_loc (input_location, PLUS_EXPR,
7180 gfc_array_index_type, base,
7181 offset)
7182 : base;
7183 gfc_conv_descriptor_offset_set (&loop.pre, parm, tmp);
7184 }
7185 else if (onebased && (!rank_remap || se->use_offset)
7186 && expr->symtree
7187 && !(expr->symtree->n.sym && expr->symtree->n.sym->ts.type == BT_CLASS
7188 && !CLASS_DATA (expr->symtree->n.sym)->attr.class_pointer)
7189 && !expr->symtree->n.sym->attr.allocatable
7190 && !expr->symtree->n.sym->attr.pointer
7191 && !expr->symtree->n.sym->attr.host_assoc
7192 && !expr->symtree->n.sym->attr.use_assoc)
7193 {
7194 /* Set the offset to -1. */
7195 mpz_t minus_one;
7196 mpz_init_set_si (minus_one, -1);
7197 tmp = gfc_conv_mpz_to_tree (minus_one, gfc_index_integer_kind);
7198 gfc_conv_descriptor_offset_set (&loop.pre, parm, tmp);
7199 }
7200 else
7201 {
7202 /* Only the callee knows what the correct offset it, so just set
7203 it to zero here. */
7204 gfc_conv_descriptor_offset_set (&loop.pre, parm, gfc_index_zero_node);
7205 }
7206 desc = parm;
7207 }
7208
7209 /* For class arrays add the class tree into the saved descriptor to
7210 enable getting of _vptr and the like. */
7211 if (expr->expr_type == EXPR_VARIABLE && VAR_P (desc)
7212 && IS_CLASS_ARRAY (expr->symtree->n.sym))
7213 {
7214 gfc_allocate_lang_decl (desc);
7215 GFC_DECL_SAVED_DESCRIPTOR (desc) =
7216 DECL_LANG_SPECIFIC (expr->symtree->n.sym->backend_decl) ?
7217 GFC_DECL_SAVED_DESCRIPTOR (expr->symtree->n.sym->backend_decl)
7218 : expr->symtree->n.sym->backend_decl;
7219 }
7220 if (!se->direct_byref || se->byref_noassign)
7221 {
7222 /* Get a pointer to the new descriptor. */
7223 if (se->want_pointer)
7224 se->expr = gfc_build_addr_expr (NULL_TREE, desc);
7225 else
7226 se->expr = desc;
7227 }
7228
7229 gfc_add_block_to_block (&se->pre, &loop.pre);
7230 gfc_add_block_to_block (&se->post, &loop.post);
7231
7232 /* Cleanup the scalarizer. */
7233 gfc_cleanup_loop (&loop);
7234 }
7235
7236 /* Helper function for gfc_conv_array_parameter if array size needs to be
7237 computed. */
7238
7239 static void
7240 array_parameter_size (tree desc, gfc_expr *expr, tree *size)
7241 {
7242 tree elem;
7243 if (GFC_ARRAY_TYPE_P (TREE_TYPE (desc)))
7244 *size = GFC_TYPE_ARRAY_SIZE (TREE_TYPE (desc));
7245 else if (expr->rank > 1)
7246 *size = build_call_expr_loc (input_location,
7247 gfor_fndecl_size0, 1,
7248 gfc_build_addr_expr (NULL, desc));
7249 else
7250 {
7251 tree ubound = gfc_conv_descriptor_ubound_get (desc, gfc_index_zero_node);
7252 tree lbound = gfc_conv_descriptor_lbound_get (desc, gfc_index_zero_node);
7253
7254 *size = fold_build2_loc (input_location, MINUS_EXPR,
7255 gfc_array_index_type, ubound, lbound);
7256 *size = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
7257 *size, gfc_index_one_node);
7258 *size = fold_build2_loc (input_location, MAX_EXPR, gfc_array_index_type,
7259 *size, gfc_index_zero_node);
7260 }
7261 elem = TYPE_SIZE_UNIT (gfc_get_element_type (TREE_TYPE (desc)));
7262 *size = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
7263 *size, fold_convert (gfc_array_index_type, elem));
7264 }
7265
7266 /* Convert an array for passing as an actual parameter. */
7267 /* TODO: Optimize passing g77 arrays. */
7268
7269 void
7270 gfc_conv_array_parameter (gfc_se * se, gfc_expr * expr, bool g77,
7271 const gfc_symbol *fsym, const char *proc_name,
7272 tree *size)
7273 {
7274 tree ptr;
7275 tree desc;
7276 tree tmp = NULL_TREE;
7277 tree stmt;
7278 tree parent = DECL_CONTEXT (current_function_decl);
7279 bool full_array_var;
7280 bool this_array_result;
7281 bool contiguous;
7282 bool no_pack;
7283 bool array_constructor;
7284 bool good_allocatable;
7285 bool ultimate_ptr_comp;
7286 bool ultimate_alloc_comp;
7287 gfc_symbol *sym;
7288 stmtblock_t block;
7289 gfc_ref *ref;
7290
7291 ultimate_ptr_comp = false;
7292 ultimate_alloc_comp = false;
7293
7294 for (ref = expr->ref; ref; ref = ref->next)
7295 {
7296 if (ref->next == NULL)
7297 break;
7298
7299 if (ref->type == REF_COMPONENT)
7300 {
7301 ultimate_ptr_comp = ref->u.c.component->attr.pointer;
7302 ultimate_alloc_comp = ref->u.c.component->attr.allocatable;
7303 }
7304 }
7305
7306 full_array_var = false;
7307 contiguous = false;
7308
7309 if (expr->expr_type == EXPR_VARIABLE && ref && !ultimate_ptr_comp)
7310 full_array_var = gfc_full_array_ref_p (ref, &contiguous);
7311
7312 sym = full_array_var ? expr->symtree->n.sym : NULL;
7313
7314 /* The symbol should have an array specification. */
7315 gcc_assert (!sym || sym->as || ref->u.ar.as);
7316
7317 if (expr->expr_type == EXPR_ARRAY && expr->ts.type == BT_CHARACTER)
7318 {
7319 get_array_ctor_strlen (&se->pre, expr->value.constructor, &tmp);
7320 expr->ts.u.cl->backend_decl = tmp;
7321 se->string_length = tmp;
7322 }
7323
7324 /* Is this the result of the enclosing procedure? */
7325 this_array_result = (full_array_var && sym->attr.flavor == FL_PROCEDURE);
7326 if (this_array_result
7327 && (sym->backend_decl != current_function_decl)
7328 && (sym->backend_decl != parent))
7329 this_array_result = false;
7330
7331 /* Passing address of the array if it is not pointer or assumed-shape. */
7332 if (full_array_var && g77 && !this_array_result
7333 && sym->ts.type != BT_DERIVED && sym->ts.type != BT_CLASS)
7334 {
7335 tmp = gfc_get_symbol_decl (sym);
7336
7337 if (sym->ts.type == BT_CHARACTER)
7338 se->string_length = sym->ts.u.cl->backend_decl;
7339
7340 if (!sym->attr.pointer
7341 && sym->as
7342 && sym->as->type != AS_ASSUMED_SHAPE
7343 && sym->as->type != AS_DEFERRED
7344 && sym->as->type != AS_ASSUMED_RANK
7345 && !sym->attr.allocatable)
7346 {
7347 /* Some variables are declared directly, others are declared as
7348 pointers and allocated on the heap. */
7349 if (sym->attr.dummy || POINTER_TYPE_P (TREE_TYPE (tmp)))
7350 se->expr = tmp;
7351 else
7352 se->expr = gfc_build_addr_expr (NULL_TREE, tmp);
7353 if (size)
7354 array_parameter_size (tmp, expr, size);
7355 return;
7356 }
7357
7358 if (sym->attr.allocatable)
7359 {
7360 if (sym->attr.dummy || sym->attr.result)
7361 {
7362 gfc_conv_expr_descriptor (se, expr);
7363 tmp = se->expr;
7364 }
7365 if (size)
7366 array_parameter_size (tmp, expr, size);
7367 se->expr = gfc_conv_array_data (tmp);
7368 return;
7369 }
7370 }
7371
7372 /* A convenient reduction in scope. */
7373 contiguous = g77 && !this_array_result && contiguous;
7374
7375 /* There is no need to pack and unpack the array, if it is contiguous
7376 and not a deferred- or assumed-shape array, or if it is simply
7377 contiguous. */
7378 no_pack = ((sym && sym->as
7379 && !sym->attr.pointer
7380 && sym->as->type != AS_DEFERRED
7381 && sym->as->type != AS_ASSUMED_RANK
7382 && sym->as->type != AS_ASSUMED_SHAPE)
7383 ||
7384 (ref && ref->u.ar.as
7385 && ref->u.ar.as->type != AS_DEFERRED
7386 && ref->u.ar.as->type != AS_ASSUMED_RANK
7387 && ref->u.ar.as->type != AS_ASSUMED_SHAPE)
7388 ||
7389 gfc_is_simply_contiguous (expr, false, true));
7390
7391 no_pack = contiguous && no_pack;
7392
7393 /* Array constructors are always contiguous and do not need packing. */
7394 array_constructor = g77 && !this_array_result && expr->expr_type == EXPR_ARRAY;
7395
7396 /* Same is true of contiguous sections from allocatable variables. */
7397 good_allocatable = contiguous
7398 && expr->symtree
7399 && expr->symtree->n.sym->attr.allocatable;
7400
7401 /* Or ultimate allocatable components. */
7402 ultimate_alloc_comp = contiguous && ultimate_alloc_comp;
7403
7404 if (no_pack || array_constructor || good_allocatable || ultimate_alloc_comp)
7405 {
7406 gfc_conv_expr_descriptor (se, expr);
7407 /* Deallocate the allocatable components of structures that are
7408 not variable. */
7409 if ((expr->ts.type == BT_DERIVED || expr->ts.type == BT_CLASS)
7410 && expr->ts.u.derived->attr.alloc_comp
7411 && expr->expr_type != EXPR_VARIABLE)
7412 {
7413 tmp = gfc_deallocate_alloc_comp (expr->ts.u.derived, se->expr, expr->rank);
7414
7415 /* The components shall be deallocated before their containing entity. */
7416 gfc_prepend_expr_to_block (&se->post, tmp);
7417 }
7418 if (expr->ts.type == BT_CHARACTER)
7419 se->string_length = expr->ts.u.cl->backend_decl;
7420 if (size)
7421 array_parameter_size (se->expr, expr, size);
7422 se->expr = gfc_conv_array_data (se->expr);
7423 return;
7424 }
7425
7426 if (this_array_result)
7427 {
7428 /* Result of the enclosing function. */
7429 gfc_conv_expr_descriptor (se, expr);
7430 if (size)
7431 array_parameter_size (se->expr, expr, size);
7432 se->expr = gfc_build_addr_expr (NULL_TREE, se->expr);
7433
7434 if (g77 && TREE_TYPE (TREE_TYPE (se->expr)) != NULL_TREE
7435 && GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (TREE_TYPE (se->expr))))
7436 se->expr = gfc_conv_array_data (build_fold_indirect_ref_loc (input_location,
7437 se->expr));
7438
7439 return;
7440 }
7441 else
7442 {
7443 /* Every other type of array. */
7444 se->want_pointer = 1;
7445 gfc_conv_expr_descriptor (se, expr);
7446 if (size)
7447 array_parameter_size (build_fold_indirect_ref_loc (input_location,
7448 se->expr),
7449 expr, size);
7450 }
7451
7452 /* Deallocate the allocatable components of structures that are
7453 not variable, for descriptorless arguments.
7454 Arguments with a descriptor are handled in gfc_conv_procedure_call. */
7455 if (g77 && (expr->ts.type == BT_DERIVED || expr->ts.type == BT_CLASS)
7456 && expr->ts.u.derived->attr.alloc_comp
7457 && expr->expr_type != EXPR_VARIABLE)
7458 {
7459 tmp = build_fold_indirect_ref_loc (input_location, se->expr);
7460 tmp = gfc_deallocate_alloc_comp (expr->ts.u.derived, tmp, expr->rank);
7461
7462 /* The components shall be deallocated before their containing entity. */
7463 gfc_prepend_expr_to_block (&se->post, tmp);
7464 }
7465
7466 if (g77 || (fsym && fsym->attr.contiguous
7467 && !gfc_is_simply_contiguous (expr, false, true)))
7468 {
7469 tree origptr = NULL_TREE;
7470
7471 desc = se->expr;
7472
7473 /* For contiguous arrays, save the original value of the descriptor. */
7474 if (!g77)
7475 {
7476 origptr = gfc_create_var (pvoid_type_node, "origptr");
7477 tmp = build_fold_indirect_ref_loc (input_location, desc);
7478 tmp = gfc_conv_array_data (tmp);
7479 tmp = fold_build2_loc (input_location, MODIFY_EXPR,
7480 TREE_TYPE (origptr), origptr,
7481 fold_convert (TREE_TYPE (origptr), tmp));
7482 gfc_add_expr_to_block (&se->pre, tmp);
7483 }
7484
7485 /* Repack the array. */
7486 if (warn_array_temporaries)
7487 {
7488 if (fsym)
7489 gfc_warning (OPT_Warray_temporaries,
7490 "Creating array temporary at %L for argument %qs",
7491 &expr->where, fsym->name);
7492 else
7493 gfc_warning (OPT_Warray_temporaries,
7494 "Creating array temporary at %L", &expr->where);
7495 }
7496
7497 ptr = build_call_expr_loc (input_location,
7498 gfor_fndecl_in_pack, 1, desc);
7499
7500 if (fsym && fsym->attr.optional && sym && sym->attr.optional)
7501 {
7502 tmp = gfc_conv_expr_present (sym);
7503 ptr = build3_loc (input_location, COND_EXPR, TREE_TYPE (se->expr),
7504 tmp, fold_convert (TREE_TYPE (se->expr), ptr),
7505 fold_convert (TREE_TYPE (se->expr), null_pointer_node));
7506 }
7507
7508 ptr = gfc_evaluate_now (ptr, &se->pre);
7509
7510 /* Use the packed data for the actual argument, except for contiguous arrays,
7511 where the descriptor's data component is set. */
7512 if (g77)
7513 se->expr = ptr;
7514 else
7515 {
7516 tmp = build_fold_indirect_ref_loc (input_location, desc);
7517
7518 gfc_ss * ss = gfc_walk_expr (expr);
7519 if (!transposed_dims (ss))
7520 gfc_conv_descriptor_data_set (&se->pre, tmp, ptr);
7521 else
7522 {
7523 tree old_field, new_field;
7524
7525 /* The original descriptor has transposed dims so we can't reuse
7526 it directly; we have to create a new one. */
7527 tree old_desc = tmp;
7528 tree new_desc = gfc_create_var (TREE_TYPE (old_desc), "arg_desc");
7529
7530 old_field = gfc_conv_descriptor_dtype (old_desc);
7531 new_field = gfc_conv_descriptor_dtype (new_desc);
7532 gfc_add_modify (&se->pre, new_field, old_field);
7533
7534 old_field = gfc_conv_descriptor_offset (old_desc);
7535 new_field = gfc_conv_descriptor_offset (new_desc);
7536 gfc_add_modify (&se->pre, new_field, old_field);
7537
7538 for (int i = 0; i < expr->rank; i++)
7539 {
7540 old_field = gfc_conv_descriptor_dimension (old_desc,
7541 gfc_rank_cst[get_array_ref_dim_for_loop_dim (ss, i)]);
7542 new_field = gfc_conv_descriptor_dimension (new_desc,
7543 gfc_rank_cst[i]);
7544 gfc_add_modify (&se->pre, new_field, old_field);
7545 }
7546
7547 if (flag_coarray == GFC_FCOARRAY_LIB
7548 && GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (old_desc))
7549 && GFC_TYPE_ARRAY_AKIND (TREE_TYPE (old_desc))
7550 == GFC_ARRAY_ALLOCATABLE)
7551 {
7552 old_field = gfc_conv_descriptor_token (old_desc);
7553 new_field = gfc_conv_descriptor_token (new_desc);
7554 gfc_add_modify (&se->pre, new_field, old_field);
7555 }
7556
7557 gfc_conv_descriptor_data_set (&se->pre, new_desc, ptr);
7558 se->expr = gfc_build_addr_expr (NULL_TREE, new_desc);
7559 }
7560 gfc_free_ss (ss);
7561 }
7562
7563 if (gfc_option.rtcheck & GFC_RTCHECK_ARRAY_TEMPS)
7564 {
7565 char * msg;
7566
7567 if (fsym && proc_name)
7568 msg = xasprintf ("An array temporary was created for argument "
7569 "'%s' of procedure '%s'", fsym->name, proc_name);
7570 else
7571 msg = xasprintf ("An array temporary was created");
7572
7573 tmp = build_fold_indirect_ref_loc (input_location,
7574 desc);
7575 tmp = gfc_conv_array_data (tmp);
7576 tmp = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
7577 fold_convert (TREE_TYPE (tmp), ptr), tmp);
7578
7579 if (fsym && fsym->attr.optional && sym && sym->attr.optional)
7580 tmp = fold_build2_loc (input_location, TRUTH_AND_EXPR,
7581 boolean_type_node,
7582 gfc_conv_expr_present (sym), tmp);
7583
7584 gfc_trans_runtime_check (false, true, tmp, &se->pre,
7585 &expr->where, msg);
7586 free (msg);
7587 }
7588
7589 gfc_start_block (&block);
7590
7591 /* Copy the data back. */
7592 if (fsym == NULL || fsym->attr.intent != INTENT_IN)
7593 {
7594 tmp = build_call_expr_loc (input_location,
7595 gfor_fndecl_in_unpack, 2, desc, ptr);
7596 gfc_add_expr_to_block (&block, tmp);
7597 }
7598
7599 /* Free the temporary. */
7600 tmp = gfc_call_free (ptr);
7601 gfc_add_expr_to_block (&block, tmp);
7602
7603 stmt = gfc_finish_block (&block);
7604
7605 gfc_init_block (&block);
7606 /* Only if it was repacked. This code needs to be executed before the
7607 loop cleanup code. */
7608 tmp = build_fold_indirect_ref_loc (input_location,
7609 desc);
7610 tmp = gfc_conv_array_data (tmp);
7611 tmp = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
7612 fold_convert (TREE_TYPE (tmp), ptr), tmp);
7613
7614 if (fsym && fsym->attr.optional && sym && sym->attr.optional)
7615 tmp = fold_build2_loc (input_location, TRUTH_AND_EXPR,
7616 boolean_type_node,
7617 gfc_conv_expr_present (sym), tmp);
7618
7619 tmp = build3_v (COND_EXPR, tmp, stmt, build_empty_stmt (input_location));
7620
7621 gfc_add_expr_to_block (&block, tmp);
7622 gfc_add_block_to_block (&block, &se->post);
7623
7624 gfc_init_block (&se->post);
7625
7626 /* Reset the descriptor pointer. */
7627 if (!g77)
7628 {
7629 tmp = build_fold_indirect_ref_loc (input_location, desc);
7630 gfc_conv_descriptor_data_set (&se->post, tmp, origptr);
7631 }
7632
7633 gfc_add_block_to_block (&se->post, &block);
7634 }
7635 }
7636
7637
7638 /* Generate code to deallocate an array, if it is allocated. */
7639
7640 tree
7641 gfc_trans_dealloc_allocated (tree descriptor, bool coarray, gfc_expr *expr)
7642 {
7643 tree tmp;
7644 tree var;
7645 stmtblock_t block;
7646
7647 gfc_start_block (&block);
7648
7649 var = gfc_conv_descriptor_data_get (descriptor);
7650 STRIP_NOPS (var);
7651
7652 /* Call array_deallocate with an int * present in the second argument.
7653 Although it is ignored here, it's presence ensures that arrays that
7654 are already deallocated are ignored. */
7655 tmp = gfc_deallocate_with_status (coarray ? descriptor : var, NULL_TREE,
7656 NULL_TREE, NULL_TREE, NULL_TREE, true,
7657 expr, coarray);
7658 gfc_add_expr_to_block (&block, tmp);
7659
7660 /* Zero the data pointer. */
7661 tmp = fold_build2_loc (input_location, MODIFY_EXPR, void_type_node,
7662 var, build_int_cst (TREE_TYPE (var), 0));
7663 gfc_add_expr_to_block (&block, tmp);
7664
7665 return gfc_finish_block (&block);
7666 }
7667
7668
7669 /* This helper function calculates the size in words of a full array. */
7670
7671 tree
7672 gfc_full_array_size (stmtblock_t *block, tree decl, int rank)
7673 {
7674 tree idx;
7675 tree nelems;
7676 tree tmp;
7677 idx = gfc_rank_cst[rank - 1];
7678 nelems = gfc_conv_descriptor_ubound_get (decl, idx);
7679 tmp = gfc_conv_descriptor_lbound_get (decl, idx);
7680 tmp = fold_build2_loc (input_location, MINUS_EXPR, gfc_array_index_type,
7681 nelems, tmp);
7682 tmp = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
7683 tmp, gfc_index_one_node);
7684 tmp = gfc_evaluate_now (tmp, block);
7685
7686 nelems = gfc_conv_descriptor_stride_get (decl, idx);
7687 tmp = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
7688 nelems, tmp);
7689 return gfc_evaluate_now (tmp, block);
7690 }
7691
7692
7693 /* Allocate dest to the same size as src, and copy src -> dest.
7694 If no_malloc is set, only the copy is done. */
7695
7696 static tree
7697 duplicate_allocatable (tree dest, tree src, tree type, int rank,
7698 bool no_malloc, bool no_memcpy, tree str_sz,
7699 tree add_when_allocated)
7700 {
7701 tree tmp;
7702 tree size;
7703 tree nelems;
7704 tree null_cond;
7705 tree null_data;
7706 stmtblock_t block;
7707
7708 /* If the source is null, set the destination to null. Then,
7709 allocate memory to the destination. */
7710 gfc_init_block (&block);
7711
7712 if (!GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (dest)))
7713 {
7714 tmp = null_pointer_node;
7715 tmp = fold_build2_loc (input_location, MODIFY_EXPR, type, dest, tmp);
7716 gfc_add_expr_to_block (&block, tmp);
7717 null_data = gfc_finish_block (&block);
7718
7719 gfc_init_block (&block);
7720 if (str_sz != NULL_TREE)
7721 size = str_sz;
7722 else
7723 size = TYPE_SIZE_UNIT (TREE_TYPE (type));
7724
7725 if (!no_malloc)
7726 {
7727 tmp = gfc_call_malloc (&block, type, size);
7728 tmp = fold_build2_loc (input_location, MODIFY_EXPR, void_type_node,
7729 dest, fold_convert (type, tmp));
7730 gfc_add_expr_to_block (&block, tmp);
7731 }
7732
7733 if (!no_memcpy)
7734 {
7735 tmp = builtin_decl_explicit (BUILT_IN_MEMCPY);
7736 tmp = build_call_expr_loc (input_location, tmp, 3, dest, src,
7737 fold_convert (size_type_node, size));
7738 gfc_add_expr_to_block (&block, tmp);
7739 }
7740 }
7741 else
7742 {
7743 gfc_conv_descriptor_data_set (&block, dest, null_pointer_node);
7744 null_data = gfc_finish_block (&block);
7745
7746 gfc_init_block (&block);
7747 if (rank)
7748 nelems = gfc_full_array_size (&block, src, rank);
7749 else
7750 nelems = gfc_index_one_node;
7751
7752 if (str_sz != NULL_TREE)
7753 tmp = fold_convert (gfc_array_index_type, str_sz);
7754 else
7755 tmp = fold_convert (gfc_array_index_type,
7756 TYPE_SIZE_UNIT (gfc_get_element_type (type)));
7757 size = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
7758 nelems, tmp);
7759 if (!no_malloc)
7760 {
7761 tmp = TREE_TYPE (gfc_conv_descriptor_data_get (src));
7762 tmp = gfc_call_malloc (&block, tmp, size);
7763 gfc_conv_descriptor_data_set (&block, dest, tmp);
7764 }
7765
7766 /* We know the temporary and the value will be the same length,
7767 so can use memcpy. */
7768 if (!no_memcpy)
7769 {
7770 tmp = builtin_decl_explicit (BUILT_IN_MEMCPY);
7771 tmp = build_call_expr_loc (input_location, tmp, 3,
7772 gfc_conv_descriptor_data_get (dest),
7773 gfc_conv_descriptor_data_get (src),
7774 fold_convert (size_type_node, size));
7775 gfc_add_expr_to_block (&block, tmp);
7776 }
7777 }
7778
7779 gfc_add_expr_to_block (&block, add_when_allocated);
7780 tmp = gfc_finish_block (&block);
7781
7782 /* Null the destination if the source is null; otherwise do
7783 the allocate and copy. */
7784 if (!GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (src)))
7785 null_cond = src;
7786 else
7787 null_cond = gfc_conv_descriptor_data_get (src);
7788
7789 null_cond = convert (pvoid_type_node, null_cond);
7790 null_cond = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
7791 null_cond, null_pointer_node);
7792 return build3_v (COND_EXPR, null_cond, tmp, null_data);
7793 }
7794
7795
7796 /* Allocate dest to the same size as src, and copy data src -> dest. */
7797
7798 tree
7799 gfc_duplicate_allocatable (tree dest, tree src, tree type, int rank,
7800 tree add_when_allocated)
7801 {
7802 return duplicate_allocatable (dest, src, type, rank, false, false,
7803 NULL_TREE, add_when_allocated);
7804 }
7805
7806
7807 /* Copy data src -> dest. */
7808
7809 tree
7810 gfc_copy_allocatable_data (tree dest, tree src, tree type, int rank)
7811 {
7812 return duplicate_allocatable (dest, src, type, rank, true, false,
7813 NULL_TREE, NULL_TREE);
7814 }
7815
7816 /* Allocate dest to the same size as src, but don't copy anything. */
7817
7818 tree
7819 gfc_duplicate_allocatable_nocopy (tree dest, tree src, tree type, int rank)
7820 {
7821 return duplicate_allocatable (dest, src, type, rank, false, true,
7822 NULL_TREE, NULL_TREE);
7823 }
7824
7825
7826 /* Recursively traverse an object of derived type, generating code to
7827 deallocate, nullify or copy allocatable components. This is the work horse
7828 function for the functions named in this enum. */
7829
7830 enum {DEALLOCATE_ALLOC_COMP = 1, DEALLOCATE_ALLOC_COMP_NO_CAF,
7831 NULLIFY_ALLOC_COMP, COPY_ALLOC_COMP, COPY_ONLY_ALLOC_COMP,
7832 COPY_ALLOC_COMP_CAF};
7833
7834 static tree
7835 structure_alloc_comps (gfc_symbol * der_type, tree decl,
7836 tree dest, int rank, int purpose)
7837 {
7838 gfc_component *c;
7839 gfc_loopinfo loop;
7840 stmtblock_t fnblock;
7841 stmtblock_t loopbody;
7842 stmtblock_t tmpblock;
7843 tree decl_type;
7844 tree tmp;
7845 tree comp;
7846 tree dcmp;
7847 tree nelems;
7848 tree index;
7849 tree var;
7850 tree cdecl;
7851 tree ctype;
7852 tree vref, dref;
7853 tree null_cond = NULL_TREE;
7854 tree add_when_allocated;
7855 bool called_dealloc_with_status;
7856
7857 gfc_init_block (&fnblock);
7858
7859 decl_type = TREE_TYPE (decl);
7860
7861 if ((POINTER_TYPE_P (decl_type))
7862 || (TREE_CODE (decl_type) == REFERENCE_TYPE && rank == 0))
7863 {
7864 decl = build_fold_indirect_ref_loc (input_location, decl);
7865 /* Deref dest in sync with decl, but only when it is not NULL. */
7866 if (dest)
7867 dest = build_fold_indirect_ref_loc (input_location, dest);
7868 }
7869
7870 /* Just in case it gets dereferenced. */
7871 decl_type = TREE_TYPE (decl);
7872
7873 /* If this is an array of derived types with allocatable components
7874 build a loop and recursively call this function. */
7875 if (TREE_CODE (decl_type) == ARRAY_TYPE
7876 || (GFC_DESCRIPTOR_TYPE_P (decl_type) && rank != 0))
7877 {
7878 tmp = gfc_conv_array_data (decl);
7879 var = build_fold_indirect_ref_loc (input_location, tmp);
7880
7881 /* Get the number of elements - 1 and set the counter. */
7882 if (GFC_DESCRIPTOR_TYPE_P (decl_type))
7883 {
7884 /* Use the descriptor for an allocatable array. Since this
7885 is a full array reference, we only need the descriptor
7886 information from dimension = rank. */
7887 tmp = gfc_full_array_size (&fnblock, decl, rank);
7888 tmp = fold_build2_loc (input_location, MINUS_EXPR,
7889 gfc_array_index_type, tmp,
7890 gfc_index_one_node);
7891
7892 null_cond = gfc_conv_descriptor_data_get (decl);
7893 null_cond = fold_build2_loc (input_location, NE_EXPR,
7894 boolean_type_node, null_cond,
7895 build_int_cst (TREE_TYPE (null_cond), 0));
7896 }
7897 else
7898 {
7899 /* Otherwise use the TYPE_DOMAIN information. */
7900 tmp = array_type_nelts (decl_type);
7901 tmp = fold_convert (gfc_array_index_type, tmp);
7902 }
7903
7904 /* Remember that this is, in fact, the no. of elements - 1. */
7905 nelems = gfc_evaluate_now (tmp, &fnblock);
7906 index = gfc_create_var (gfc_array_index_type, "S");
7907
7908 /* Build the body of the loop. */
7909 gfc_init_block (&loopbody);
7910
7911 vref = gfc_build_array_ref (var, index, NULL);
7912
7913 if (purpose == COPY_ALLOC_COMP || purpose == COPY_ONLY_ALLOC_COMP)
7914 {
7915 tmp = build_fold_indirect_ref_loc (input_location,
7916 gfc_conv_array_data (dest));
7917 dref = gfc_build_array_ref (tmp, index, NULL);
7918 tmp = structure_alloc_comps (der_type, vref, dref, rank,
7919 COPY_ALLOC_COMP);
7920 }
7921 else
7922 tmp = structure_alloc_comps (der_type, vref, NULL_TREE, rank, purpose);
7923
7924 gfc_add_expr_to_block (&loopbody, tmp);
7925
7926 /* Build the loop and return. */
7927 gfc_init_loopinfo (&loop);
7928 loop.dimen = 1;
7929 loop.from[0] = gfc_index_zero_node;
7930 loop.loopvar[0] = index;
7931 loop.to[0] = nelems;
7932 gfc_trans_scalarizing_loops (&loop, &loopbody);
7933 gfc_add_block_to_block (&fnblock, &loop.pre);
7934
7935 tmp = gfc_finish_block (&fnblock);
7936 /* When copying allocateable components, the above implements the
7937 deep copy. Nevertheless is a deep copy only allowed, when the current
7938 component is allocated, for which code will be generated in
7939 gfc_duplicate_allocatable (), where the deep copy code is just added
7940 into the if's body, by adding tmp (the deep copy code) as last
7941 argument to gfc_duplicate_allocatable (). */
7942 if (purpose == COPY_ALLOC_COMP
7943 && GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (dest)))
7944 tmp = gfc_duplicate_allocatable (dest, decl, decl_type, rank,
7945 tmp);
7946 else if (null_cond != NULL_TREE)
7947 tmp = build3_v (COND_EXPR, null_cond, tmp,
7948 build_empty_stmt (input_location));
7949
7950 return tmp;
7951 }
7952
7953 /* Otherwise, act on the components or recursively call self to
7954 act on a chain of components. */
7955 for (c = der_type->components; c; c = c->next)
7956 {
7957 bool cmp_has_alloc_comps = (c->ts.type == BT_DERIVED
7958 || c->ts.type == BT_CLASS)
7959 && c->ts.u.derived->attr.alloc_comp;
7960 cdecl = c->backend_decl;
7961 ctype = TREE_TYPE (cdecl);
7962
7963 switch (purpose)
7964 {
7965 case DEALLOCATE_ALLOC_COMP:
7966 case DEALLOCATE_ALLOC_COMP_NO_CAF:
7967
7968 /* gfc_deallocate_scalar_with_status calls gfc_deallocate_alloc_comp
7969 (i.e. this function) so generate all the calls and suppress the
7970 recursion from here, if necessary. */
7971 called_dealloc_with_status = false;
7972 gfc_init_block (&tmpblock);
7973
7974 if ((c->ts.type == BT_DERIVED && !c->attr.pointer)
7975 || (c->ts.type == BT_CLASS && !CLASS_DATA (c)->attr.class_pointer))
7976 {
7977 comp = fold_build3_loc (input_location, COMPONENT_REF, ctype,
7978 decl, cdecl, NULL_TREE);
7979
7980 /* The finalizer frees allocatable components. */
7981 called_dealloc_with_status
7982 = gfc_add_comp_finalizer_call (&tmpblock, comp, c,
7983 purpose == DEALLOCATE_ALLOC_COMP);
7984 }
7985 else
7986 comp = NULL_TREE;
7987
7988 if (c->attr.allocatable && !c->attr.proc_pointer
7989 && (c->attr.dimension
7990 || (c->attr.codimension
7991 && purpose != DEALLOCATE_ALLOC_COMP_NO_CAF)))
7992 {
7993 if (comp == NULL_TREE)
7994 comp = fold_build3_loc (input_location, COMPONENT_REF, ctype,
7995 decl, cdecl, NULL_TREE);
7996 tmp = gfc_trans_dealloc_allocated (comp, c->attr.codimension, NULL);
7997 gfc_add_expr_to_block (&tmpblock, tmp);
7998 }
7999 else if (c->attr.allocatable && !c->attr.codimension)
8000 {
8001 /* Allocatable scalar components. */
8002 if (comp == NULL_TREE)
8003 comp = fold_build3_loc (input_location, COMPONENT_REF, ctype,
8004 decl, cdecl, NULL_TREE);
8005
8006 tmp = gfc_deallocate_scalar_with_status (comp, NULL, true, NULL,
8007 c->ts);
8008 gfc_add_expr_to_block (&tmpblock, tmp);
8009 called_dealloc_with_status = true;
8010
8011 tmp = fold_build2_loc (input_location, MODIFY_EXPR,
8012 void_type_node, comp,
8013 build_int_cst (TREE_TYPE (comp), 0));
8014 gfc_add_expr_to_block (&tmpblock, tmp);
8015 }
8016 else if (c->ts.type == BT_CLASS && CLASS_DATA (c)->attr.allocatable
8017 && (!CLASS_DATA (c)->attr.codimension
8018 || purpose != DEALLOCATE_ALLOC_COMP_NO_CAF))
8019 {
8020 /* Allocatable CLASS components. */
8021
8022 /* Add reference to '_data' component. */
8023 tmp = CLASS_DATA (c)->backend_decl;
8024 comp = fold_build3_loc (input_location, COMPONENT_REF,
8025 TREE_TYPE (tmp), comp, tmp, NULL_TREE);
8026
8027 if (GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (comp)))
8028 tmp = gfc_trans_dealloc_allocated (comp,
8029 CLASS_DATA (c)->attr.codimension, NULL);
8030 else
8031 {
8032 tmp = gfc_deallocate_scalar_with_status (comp, NULL_TREE, true, NULL,
8033 CLASS_DATA (c)->ts);
8034 gfc_add_expr_to_block (&tmpblock, tmp);
8035 called_dealloc_with_status = true;
8036
8037 tmp = fold_build2_loc (input_location, MODIFY_EXPR,
8038 void_type_node, comp,
8039 build_int_cst (TREE_TYPE (comp), 0));
8040 }
8041 gfc_add_expr_to_block (&tmpblock, tmp);
8042
8043 /* Finally, reset the vptr to the declared type vtable and, if
8044 necessary reset the _len field.
8045
8046 First recover the reference to the component and obtain
8047 the vptr. */
8048 comp = fold_build3_loc (input_location, COMPONENT_REF, ctype,
8049 decl, cdecl, NULL_TREE);
8050 tmp = gfc_class_vptr_get (comp);
8051
8052 if (UNLIMITED_POLY (c))
8053 {
8054 /* Both vptr and _len field should be nulled. */
8055 gfc_add_modify (&tmpblock, tmp,
8056 build_int_cst (TREE_TYPE (tmp), 0));
8057 tmp = gfc_class_len_get (comp);
8058 gfc_add_modify (&tmpblock, tmp,
8059 build_int_cst (TREE_TYPE (tmp), 0));
8060 }
8061 else
8062 {
8063 /* Build the vtable address and set the vptr with it. */
8064 tree vtab;
8065 gfc_symbol *vtable;
8066 vtable = gfc_find_derived_vtab (c->ts.u.derived);
8067 vtab = vtable->backend_decl;
8068 if (vtab == NULL_TREE)
8069 vtab = gfc_get_symbol_decl (vtable);
8070 vtab = gfc_build_addr_expr (NULL, vtab);
8071 vtab = fold_convert (TREE_TYPE (tmp), vtab);
8072 gfc_add_modify (&tmpblock, tmp, vtab);
8073 }
8074 }
8075
8076 if (cmp_has_alloc_comps
8077 && !c->attr.pointer
8078 && !called_dealloc_with_status)
8079 {
8080 /* Do not deallocate the components of ultimate pointer
8081 components or iteratively call self if call has been made
8082 to gfc_trans_dealloc_allocated */
8083 comp = fold_build3_loc (input_location, COMPONENT_REF, ctype,
8084 decl, cdecl, NULL_TREE);
8085 rank = c->as ? c->as->rank : 0;
8086 tmp = structure_alloc_comps (c->ts.u.derived, comp, NULL_TREE,
8087 rank, purpose);
8088 gfc_add_expr_to_block (&fnblock, tmp);
8089 }
8090
8091 /* Now add the deallocation of this component. */
8092 gfc_add_block_to_block (&fnblock, &tmpblock);
8093 break;
8094
8095 case NULLIFY_ALLOC_COMP:
8096 if (c->attr.pointer || c->attr.proc_pointer)
8097 continue;
8098 else if (c->attr.allocatable
8099 && (c->attr.dimension|| c->attr.codimension))
8100 {
8101 comp = fold_build3_loc (input_location, COMPONENT_REF, ctype,
8102 decl, cdecl, NULL_TREE);
8103 gfc_conv_descriptor_data_set (&fnblock, comp, null_pointer_node);
8104 }
8105 else if (c->attr.allocatable)
8106 {
8107 /* Allocatable scalar components. */
8108 comp = fold_build3_loc (input_location, COMPONENT_REF, ctype,
8109 decl, cdecl, NULL_TREE);
8110 tmp = fold_build2_loc (input_location, MODIFY_EXPR,
8111 void_type_node, comp,
8112 build_int_cst (TREE_TYPE (comp), 0));
8113 gfc_add_expr_to_block (&fnblock, tmp);
8114 if (gfc_deferred_strlen (c, &comp))
8115 {
8116 comp = fold_build3_loc (input_location, COMPONENT_REF,
8117 TREE_TYPE (comp),
8118 decl, comp, NULL_TREE);
8119 tmp = fold_build2_loc (input_location, MODIFY_EXPR,
8120 TREE_TYPE (comp), comp,
8121 build_int_cst (TREE_TYPE (comp), 0));
8122 gfc_add_expr_to_block (&fnblock, tmp);
8123 }
8124 }
8125 else if (c->ts.type == BT_CLASS && CLASS_DATA (c)->attr.allocatable)
8126 {
8127 /* Allocatable CLASS components. */
8128 comp = fold_build3_loc (input_location, COMPONENT_REF, ctype,
8129 decl, cdecl, NULL_TREE);
8130 /* Add reference to '_data' component. */
8131 tmp = CLASS_DATA (c)->backend_decl;
8132 comp = fold_build3_loc (input_location, COMPONENT_REF,
8133 TREE_TYPE (tmp), comp, tmp, NULL_TREE);
8134 if (GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (comp)))
8135 gfc_conv_descriptor_data_set (&fnblock, comp, null_pointer_node);
8136 else
8137 {
8138 tmp = fold_build2_loc (input_location, MODIFY_EXPR,
8139 void_type_node, comp,
8140 build_int_cst (TREE_TYPE (comp), 0));
8141 gfc_add_expr_to_block (&fnblock, tmp);
8142 }
8143 }
8144 else if (cmp_has_alloc_comps)
8145 {
8146 comp = fold_build3_loc (input_location, COMPONENT_REF, ctype,
8147 decl, cdecl, NULL_TREE);
8148 rank = c->as ? c->as->rank : 0;
8149 tmp = structure_alloc_comps (c->ts.u.derived, comp, NULL_TREE,
8150 rank, purpose);
8151 gfc_add_expr_to_block (&fnblock, tmp);
8152 }
8153 break;
8154
8155 case COPY_ALLOC_COMP_CAF:
8156 if (!c->attr.codimension
8157 && (c->ts.type != BT_CLASS || CLASS_DATA (c)->attr.coarray_comp)
8158 && (c->ts.type != BT_DERIVED
8159 || !c->ts.u.derived->attr.coarray_comp))
8160 continue;
8161
8162 comp = fold_build3_loc (input_location, COMPONENT_REF, ctype, decl,
8163 cdecl, NULL_TREE);
8164 dcmp = fold_build3_loc (input_location, COMPONENT_REF, ctype, dest,
8165 cdecl, NULL_TREE);
8166
8167 if (c->attr.codimension)
8168 {
8169 if (c->ts.type == BT_CLASS)
8170 {
8171 comp = gfc_class_data_get (comp);
8172 dcmp = gfc_class_data_get (dcmp);
8173 }
8174 gfc_conv_descriptor_data_set (&fnblock, dcmp,
8175 gfc_conv_descriptor_data_get (comp));
8176 }
8177 else
8178 {
8179 tmp = structure_alloc_comps (c->ts.u.derived, comp, dcmp,
8180 rank, purpose);
8181 gfc_add_expr_to_block (&fnblock, tmp);
8182
8183 }
8184 break;
8185
8186 case COPY_ALLOC_COMP:
8187 if (c->attr.pointer)
8188 continue;
8189
8190 /* We need source and destination components. */
8191 comp = fold_build3_loc (input_location, COMPONENT_REF, ctype, decl,
8192 cdecl, NULL_TREE);
8193 dcmp = fold_build3_loc (input_location, COMPONENT_REF, ctype, dest,
8194 cdecl, NULL_TREE);
8195 dcmp = fold_convert (TREE_TYPE (comp), dcmp);
8196
8197 if (c->ts.type == BT_CLASS && CLASS_DATA (c)->attr.allocatable)
8198 {
8199 tree ftn_tree;
8200 tree size;
8201 tree dst_data;
8202 tree src_data;
8203 tree null_data;
8204
8205 dst_data = gfc_class_data_get (dcmp);
8206 src_data = gfc_class_data_get (comp);
8207 size = fold_convert (size_type_node,
8208 gfc_class_vtab_size_get (comp));
8209
8210 if (CLASS_DATA (c)->attr.dimension)
8211 {
8212 nelems = gfc_conv_descriptor_size (src_data,
8213 CLASS_DATA (c)->as->rank);
8214 size = fold_build2_loc (input_location, MULT_EXPR,
8215 size_type_node, size,
8216 fold_convert (size_type_node,
8217 nelems));
8218 }
8219 else
8220 nelems = build_int_cst (size_type_node, 1);
8221
8222 if (CLASS_DATA (c)->attr.dimension
8223 || CLASS_DATA (c)->attr.codimension)
8224 {
8225 src_data = gfc_conv_descriptor_data_get (src_data);
8226 dst_data = gfc_conv_descriptor_data_get (dst_data);
8227 }
8228
8229 gfc_init_block (&tmpblock);
8230
8231 /* Coarray component have to have the same allocation status and
8232 shape/type-parameter/effective-type on the LHS and RHS of an
8233 intrinsic assignment. Hence, we did not deallocated them - and
8234 do not allocate them here. */
8235 if (!CLASS_DATA (c)->attr.codimension)
8236 {
8237 ftn_tree = builtin_decl_explicit (BUILT_IN_MALLOC);
8238 tmp = build_call_expr_loc (input_location, ftn_tree, 1, size);
8239 gfc_add_modify (&tmpblock, dst_data,
8240 fold_convert (TREE_TYPE (dst_data), tmp));
8241 }
8242
8243 tmp = gfc_copy_class_to_class (comp, dcmp, nelems,
8244 UNLIMITED_POLY (c));
8245 gfc_add_expr_to_block (&tmpblock, tmp);
8246 tmp = gfc_finish_block (&tmpblock);
8247
8248 gfc_init_block (&tmpblock);
8249 gfc_add_modify (&tmpblock, dst_data,
8250 fold_convert (TREE_TYPE (dst_data),
8251 null_pointer_node));
8252 null_data = gfc_finish_block (&tmpblock);
8253
8254 null_cond = fold_build2_loc (input_location, NE_EXPR,
8255 boolean_type_node, src_data,
8256 null_pointer_node);
8257
8258 gfc_add_expr_to_block (&fnblock, build3_v (COND_EXPR, null_cond,
8259 tmp, null_data));
8260 continue;
8261 }
8262
8263 /* To implement guarded deep copy, i.e., deep copy only allocatable
8264 components that are really allocated, the deep copy code has to
8265 be generated first and then added to the if-block in
8266 gfc_duplicate_allocatable (). */
8267 if (cmp_has_alloc_comps)
8268 {
8269 rank = c->as ? c->as->rank : 0;
8270 tmp = fold_convert (TREE_TYPE (dcmp), comp);
8271 gfc_add_modify (&fnblock, dcmp, tmp);
8272 add_when_allocated = structure_alloc_comps (c->ts.u.derived,
8273 comp, dcmp,
8274 rank, purpose);
8275 }
8276 else
8277 add_when_allocated = NULL_TREE;
8278
8279 if (gfc_deferred_strlen (c, &tmp))
8280 {
8281 tree len, size;
8282 len = tmp;
8283 tmp = fold_build3_loc (input_location, COMPONENT_REF,
8284 TREE_TYPE (len),
8285 decl, len, NULL_TREE);
8286 len = fold_build3_loc (input_location, COMPONENT_REF,
8287 TREE_TYPE (len),
8288 dest, len, NULL_TREE);
8289 tmp = fold_build2_loc (input_location, MODIFY_EXPR,
8290 TREE_TYPE (len), len, tmp);
8291 gfc_add_expr_to_block (&fnblock, tmp);
8292 size = size_of_string_in_bytes (c->ts.kind, len);
8293 /* This component can not have allocatable components,
8294 therefore add_when_allocated of duplicate_allocatable ()
8295 is always NULL. */
8296 tmp = duplicate_allocatable (dcmp, comp, ctype, rank,
8297 false, false, size, NULL_TREE);
8298 gfc_add_expr_to_block (&fnblock, tmp);
8299 }
8300 else if (c->attr.allocatable && !c->attr.proc_pointer
8301 && (!(cmp_has_alloc_comps && c->as)
8302 || c->attr.codimension))
8303 {
8304 rank = c->as ? c->as->rank : 0;
8305 if (c->attr.codimension)
8306 tmp = gfc_copy_allocatable_data (dcmp, comp, ctype, rank);
8307 else
8308 tmp = gfc_duplicate_allocatable (dcmp, comp, ctype, rank,
8309 add_when_allocated);
8310 gfc_add_expr_to_block (&fnblock, tmp);
8311 }
8312 else
8313 if (cmp_has_alloc_comps)
8314 gfc_add_expr_to_block (&fnblock, add_when_allocated);
8315
8316 break;
8317
8318 default:
8319 gcc_unreachable ();
8320 break;
8321 }
8322 }
8323
8324 return gfc_finish_block (&fnblock);
8325 }
8326
8327 /* Recursively traverse an object of derived type, generating code to
8328 nullify allocatable components. */
8329
8330 tree
8331 gfc_nullify_alloc_comp (gfc_symbol * der_type, tree decl, int rank)
8332 {
8333 return structure_alloc_comps (der_type, decl, NULL_TREE, rank,
8334 NULLIFY_ALLOC_COMP);
8335 }
8336
8337
8338 /* Recursively traverse an object of derived type, generating code to
8339 deallocate allocatable components. */
8340
8341 tree
8342 gfc_deallocate_alloc_comp (gfc_symbol * der_type, tree decl, int rank)
8343 {
8344 return structure_alloc_comps (der_type, decl, NULL_TREE, rank,
8345 DEALLOCATE_ALLOC_COMP);
8346 }
8347
8348
8349 /* Recursively traverse an object of derived type, generating code to
8350 deallocate allocatable components. But do not deallocate coarrays.
8351 To be used for intrinsic assignment, which may not change the allocation
8352 status of coarrays. */
8353
8354 tree
8355 gfc_deallocate_alloc_comp_no_caf (gfc_symbol * der_type, tree decl, int rank)
8356 {
8357 return structure_alloc_comps (der_type, decl, NULL_TREE, rank,
8358 DEALLOCATE_ALLOC_COMP_NO_CAF);
8359 }
8360
8361
8362 tree
8363 gfc_reassign_alloc_comp_caf (gfc_symbol *der_type, tree decl, tree dest)
8364 {
8365 return structure_alloc_comps (der_type, decl, dest, 0, COPY_ALLOC_COMP_CAF);
8366 }
8367
8368
8369 /* Recursively traverse an object of derived type, generating code to
8370 copy it and its allocatable components. */
8371
8372 tree
8373 gfc_copy_alloc_comp (gfc_symbol * der_type, tree decl, tree dest, int rank)
8374 {
8375 return structure_alloc_comps (der_type, decl, dest, rank, COPY_ALLOC_COMP);
8376 }
8377
8378
8379 /* Recursively traverse an object of derived type, generating code to
8380 copy only its allocatable components. */
8381
8382 tree
8383 gfc_copy_only_alloc_comp (gfc_symbol * der_type, tree decl, tree dest, int rank)
8384 {
8385 return structure_alloc_comps (der_type, decl, dest, rank, COPY_ONLY_ALLOC_COMP);
8386 }
8387
8388
8389 /* Returns the value of LBOUND for an expression. This could be broken out
8390 from gfc_conv_intrinsic_bound but this seemed to be simpler. This is
8391 called by gfc_alloc_allocatable_for_assignment. */
8392 static tree
8393 get_std_lbound (gfc_expr *expr, tree desc, int dim, bool assumed_size)
8394 {
8395 tree lbound;
8396 tree ubound;
8397 tree stride;
8398 tree cond, cond1, cond3, cond4;
8399 tree tmp;
8400 gfc_ref *ref;
8401
8402 if (GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (desc)))
8403 {
8404 tmp = gfc_rank_cst[dim];
8405 lbound = gfc_conv_descriptor_lbound_get (desc, tmp);
8406 ubound = gfc_conv_descriptor_ubound_get (desc, tmp);
8407 stride = gfc_conv_descriptor_stride_get (desc, tmp);
8408 cond1 = fold_build2_loc (input_location, GE_EXPR, boolean_type_node,
8409 ubound, lbound);
8410 cond3 = fold_build2_loc (input_location, GE_EXPR, boolean_type_node,
8411 stride, gfc_index_zero_node);
8412 cond3 = fold_build2_loc (input_location, TRUTH_AND_EXPR,
8413 boolean_type_node, cond3, cond1);
8414 cond4 = fold_build2_loc (input_location, LT_EXPR, boolean_type_node,
8415 stride, gfc_index_zero_node);
8416 if (assumed_size)
8417 cond = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node,
8418 tmp, build_int_cst (gfc_array_index_type,
8419 expr->rank - 1));
8420 else
8421 cond = boolean_false_node;
8422
8423 cond1 = fold_build2_loc (input_location, TRUTH_OR_EXPR,
8424 boolean_type_node, cond3, cond4);
8425 cond = fold_build2_loc (input_location, TRUTH_OR_EXPR,
8426 boolean_type_node, cond, cond1);
8427
8428 return fold_build3_loc (input_location, COND_EXPR,
8429 gfc_array_index_type, cond,
8430 lbound, gfc_index_one_node);
8431 }
8432
8433 if (expr->expr_type == EXPR_FUNCTION)
8434 {
8435 /* A conversion function, so use the argument. */
8436 gcc_assert (expr->value.function.isym
8437 && expr->value.function.isym->conversion);
8438 expr = expr->value.function.actual->expr;
8439 }
8440
8441 if (expr->expr_type == EXPR_VARIABLE)
8442 {
8443 tmp = TREE_TYPE (expr->symtree->n.sym->backend_decl);
8444 for (ref = expr->ref; ref; ref = ref->next)
8445 {
8446 if (ref->type == REF_COMPONENT
8447 && ref->u.c.component->as
8448 && ref->next
8449 && ref->next->u.ar.type == AR_FULL)
8450 tmp = TREE_TYPE (ref->u.c.component->backend_decl);
8451 }
8452 return GFC_TYPE_ARRAY_LBOUND(tmp, dim);
8453 }
8454
8455 return gfc_index_one_node;
8456 }
8457
8458
8459 /* Returns true if an expression represents an lhs that can be reallocated
8460 on assignment. */
8461
8462 bool
8463 gfc_is_reallocatable_lhs (gfc_expr *expr)
8464 {
8465 gfc_ref * ref;
8466
8467 if (!expr->ref)
8468 return false;
8469
8470 /* An allocatable variable. */
8471 if (expr->symtree->n.sym->attr.allocatable
8472 && expr->ref
8473 && expr->ref->type == REF_ARRAY
8474 && expr->ref->u.ar.type == AR_FULL)
8475 return true;
8476
8477 /* All that can be left are allocatable components. */
8478 if ((expr->symtree->n.sym->ts.type != BT_DERIVED
8479 && expr->symtree->n.sym->ts.type != BT_CLASS)
8480 || !expr->symtree->n.sym->ts.u.derived->attr.alloc_comp)
8481 return false;
8482
8483 /* Find a component ref followed by an array reference. */
8484 for (ref = expr->ref; ref; ref = ref->next)
8485 if (ref->next
8486 && ref->type == REF_COMPONENT
8487 && ref->next->type == REF_ARRAY
8488 && !ref->next->next)
8489 break;
8490
8491 if (!ref)
8492 return false;
8493
8494 /* Return true if valid reallocatable lhs. */
8495 if (ref->u.c.component->attr.allocatable
8496 && ref->next->u.ar.type == AR_FULL)
8497 return true;
8498
8499 return false;
8500 }
8501
8502
8503 static tree
8504 concat_str_length (gfc_expr* expr)
8505 {
8506 tree type;
8507 tree len1;
8508 tree len2;
8509 gfc_se se;
8510
8511 type = gfc_typenode_for_spec (&expr->value.op.op1->ts);
8512 len1 = TYPE_MAX_VALUE (TYPE_DOMAIN (type));
8513 if (len1 == NULL_TREE)
8514 {
8515 if (expr->value.op.op1->expr_type == EXPR_OP)
8516 len1 = concat_str_length (expr->value.op.op1);
8517 else if (expr->value.op.op1->expr_type == EXPR_CONSTANT)
8518 len1 = build_int_cst (gfc_charlen_type_node,
8519 expr->value.op.op1->value.character.length);
8520 else if (expr->value.op.op1->ts.u.cl->length)
8521 {
8522 gfc_init_se (&se, NULL);
8523 gfc_conv_expr (&se, expr->value.op.op1->ts.u.cl->length);
8524 len1 = se.expr;
8525 }
8526 else
8527 {
8528 /* Last resort! */
8529 gfc_init_se (&se, NULL);
8530 se.want_pointer = 1;
8531 se.descriptor_only = 1;
8532 gfc_conv_expr (&se, expr->value.op.op1);
8533 len1 = se.string_length;
8534 }
8535 }
8536
8537 type = gfc_typenode_for_spec (&expr->value.op.op2->ts);
8538 len2 = TYPE_MAX_VALUE (TYPE_DOMAIN (type));
8539 if (len2 == NULL_TREE)
8540 {
8541 if (expr->value.op.op2->expr_type == EXPR_OP)
8542 len2 = concat_str_length (expr->value.op.op2);
8543 else if (expr->value.op.op2->expr_type == EXPR_CONSTANT)
8544 len2 = build_int_cst (gfc_charlen_type_node,
8545 expr->value.op.op2->value.character.length);
8546 else if (expr->value.op.op2->ts.u.cl->length)
8547 {
8548 gfc_init_se (&se, NULL);
8549 gfc_conv_expr (&se, expr->value.op.op2->ts.u.cl->length);
8550 len2 = se.expr;
8551 }
8552 else
8553 {
8554 /* Last resort! */
8555 gfc_init_se (&se, NULL);
8556 se.want_pointer = 1;
8557 se.descriptor_only = 1;
8558 gfc_conv_expr (&se, expr->value.op.op2);
8559 len2 = se.string_length;
8560 }
8561 }
8562
8563 gcc_assert(len1 && len2);
8564 len1 = fold_convert (gfc_charlen_type_node, len1);
8565 len2 = fold_convert (gfc_charlen_type_node, len2);
8566
8567 return fold_build2_loc (input_location, PLUS_EXPR,
8568 gfc_charlen_type_node, len1, len2);
8569 }
8570
8571
8572 /* Allocate the lhs of an assignment to an allocatable array, otherwise
8573 reallocate it. */
8574
8575 tree
8576 gfc_alloc_allocatable_for_assignment (gfc_loopinfo *loop,
8577 gfc_expr *expr1,
8578 gfc_expr *expr2)
8579 {
8580 stmtblock_t realloc_block;
8581 stmtblock_t alloc_block;
8582 stmtblock_t fblock;
8583 gfc_ss *rss;
8584 gfc_ss *lss;
8585 gfc_array_info *linfo;
8586 tree realloc_expr;
8587 tree alloc_expr;
8588 tree size1;
8589 tree size2;
8590 tree array1;
8591 tree cond_null;
8592 tree cond;
8593 tree tmp;
8594 tree tmp2;
8595 tree lbound;
8596 tree ubound;
8597 tree desc;
8598 tree old_desc;
8599 tree desc2;
8600 tree offset;
8601 tree jump_label1;
8602 tree jump_label2;
8603 tree neq_size;
8604 tree lbd;
8605 int n;
8606 int dim;
8607 gfc_array_spec * as;
8608
8609 /* x = f(...) with x allocatable. In this case, expr1 is the rhs.
8610 Find the lhs expression in the loop chain and set expr1 and
8611 expr2 accordingly. */
8612 if (expr1->expr_type == EXPR_FUNCTION && expr2 == NULL)
8613 {
8614 expr2 = expr1;
8615 /* Find the ss for the lhs. */
8616 lss = loop->ss;
8617 for (; lss && lss != gfc_ss_terminator; lss = lss->loop_chain)
8618 if (lss->info->expr && lss->info->expr->expr_type == EXPR_VARIABLE)
8619 break;
8620 if (lss == gfc_ss_terminator)
8621 return NULL_TREE;
8622 expr1 = lss->info->expr;
8623 }
8624
8625 /* Bail out if this is not a valid allocate on assignment. */
8626 if (!gfc_is_reallocatable_lhs (expr1)
8627 || (expr2 && !expr2->rank))
8628 return NULL_TREE;
8629
8630 /* Find the ss for the lhs. */
8631 lss = loop->ss;
8632 for (; lss && lss != gfc_ss_terminator; lss = lss->loop_chain)
8633 if (lss->info->expr == expr1)
8634 break;
8635
8636 if (lss == gfc_ss_terminator)
8637 return NULL_TREE;
8638
8639 linfo = &lss->info->data.array;
8640
8641 /* Find an ss for the rhs. For operator expressions, we see the
8642 ss's for the operands. Any one of these will do. */
8643 rss = loop->ss;
8644 for (; rss && rss != gfc_ss_terminator; rss = rss->loop_chain)
8645 if (rss->info->expr != expr1 && rss != loop->temp_ss)
8646 break;
8647
8648 if (expr2 && rss == gfc_ss_terminator)
8649 return NULL_TREE;
8650
8651 gfc_start_block (&fblock);
8652
8653 /* Since the lhs is allocatable, this must be a descriptor type.
8654 Get the data and array size. */
8655 desc = linfo->descriptor;
8656 gcc_assert (GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (desc)));
8657 array1 = gfc_conv_descriptor_data_get (desc);
8658
8659 /* 7.4.1.3 "If variable is an allocated allocatable variable, it is
8660 deallocated if expr is an array of different shape or any of the
8661 corresponding length type parameter values of variable and expr
8662 differ." This assures F95 compatibility. */
8663 jump_label1 = gfc_build_label_decl (NULL_TREE);
8664 jump_label2 = gfc_build_label_decl (NULL_TREE);
8665
8666 /* Allocate if data is NULL. */
8667 cond_null = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node,
8668 array1, build_int_cst (TREE_TYPE (array1), 0));
8669
8670 if (expr1->ts.deferred)
8671 cond_null = gfc_evaluate_now (boolean_true_node, &fblock);
8672 else
8673 cond_null= gfc_evaluate_now (cond_null, &fblock);
8674
8675 tmp = build3_v (COND_EXPR, cond_null,
8676 build1_v (GOTO_EXPR, jump_label1),
8677 build_empty_stmt (input_location));
8678 gfc_add_expr_to_block (&fblock, tmp);
8679
8680 /* Get arrayspec if expr is a full array. */
8681 if (expr2 && expr2->expr_type == EXPR_FUNCTION
8682 && expr2->value.function.isym
8683 && expr2->value.function.isym->conversion)
8684 {
8685 /* For conversion functions, take the arg. */
8686 gfc_expr *arg = expr2->value.function.actual->expr;
8687 as = gfc_get_full_arrayspec_from_expr (arg);
8688 }
8689 else if (expr2)
8690 as = gfc_get_full_arrayspec_from_expr (expr2);
8691 else
8692 as = NULL;
8693
8694 /* If the lhs shape is not the same as the rhs jump to setting the
8695 bounds and doing the reallocation....... */
8696 for (n = 0; n < expr1->rank; n++)
8697 {
8698 /* Check the shape. */
8699 lbound = gfc_conv_descriptor_lbound_get (desc, gfc_rank_cst[n]);
8700 ubound = gfc_conv_descriptor_ubound_get (desc, gfc_rank_cst[n]);
8701 tmp = fold_build2_loc (input_location, MINUS_EXPR,
8702 gfc_array_index_type,
8703 loop->to[n], loop->from[n]);
8704 tmp = fold_build2_loc (input_location, PLUS_EXPR,
8705 gfc_array_index_type,
8706 tmp, lbound);
8707 tmp = fold_build2_loc (input_location, MINUS_EXPR,
8708 gfc_array_index_type,
8709 tmp, ubound);
8710 cond = fold_build2_loc (input_location, NE_EXPR,
8711 boolean_type_node,
8712 tmp, gfc_index_zero_node);
8713 tmp = build3_v (COND_EXPR, cond,
8714 build1_v (GOTO_EXPR, jump_label1),
8715 build_empty_stmt (input_location));
8716 gfc_add_expr_to_block (&fblock, tmp);
8717 }
8718
8719 /* ....else jump past the (re)alloc code. */
8720 tmp = build1_v (GOTO_EXPR, jump_label2);
8721 gfc_add_expr_to_block (&fblock, tmp);
8722
8723 /* Add the label to start automatic (re)allocation. */
8724 tmp = build1_v (LABEL_EXPR, jump_label1);
8725 gfc_add_expr_to_block (&fblock, tmp);
8726
8727 /* If the lhs has not been allocated, its bounds will not have been
8728 initialized and so its size is set to zero. */
8729 size1 = gfc_create_var (gfc_array_index_type, NULL);
8730 gfc_init_block (&alloc_block);
8731 gfc_add_modify (&alloc_block, size1, gfc_index_zero_node);
8732 gfc_init_block (&realloc_block);
8733 gfc_add_modify (&realloc_block, size1,
8734 gfc_conv_descriptor_size (desc, expr1->rank));
8735 tmp = build3_v (COND_EXPR, cond_null,
8736 gfc_finish_block (&alloc_block),
8737 gfc_finish_block (&realloc_block));
8738 gfc_add_expr_to_block (&fblock, tmp);
8739
8740 /* Get the rhs size and fix it. */
8741 if (expr2)
8742 desc2 = rss->info->data.array.descriptor;
8743 else
8744 desc2 = NULL_TREE;
8745
8746 size2 = gfc_index_one_node;
8747 for (n = 0; n < expr2->rank; n++)
8748 {
8749 tmp = fold_build2_loc (input_location, MINUS_EXPR,
8750 gfc_array_index_type,
8751 loop->to[n], loop->from[n]);
8752 tmp = fold_build2_loc (input_location, PLUS_EXPR,
8753 gfc_array_index_type,
8754 tmp, gfc_index_one_node);
8755 size2 = fold_build2_loc (input_location, MULT_EXPR,
8756 gfc_array_index_type,
8757 tmp, size2);
8758 }
8759 size2 = gfc_evaluate_now (size2, &fblock);
8760
8761 cond = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
8762 size1, size2);
8763
8764 /* If the lhs is deferred length, assume that the element size
8765 changes and force a reallocation. */
8766 if (expr1->ts.deferred)
8767 neq_size = gfc_evaluate_now (boolean_true_node, &fblock);
8768 else
8769 neq_size = gfc_evaluate_now (cond, &fblock);
8770
8771 /* Deallocation of allocatable components will have to occur on
8772 reallocation. Fix the old descriptor now. */
8773 if ((expr1->ts.type == BT_DERIVED)
8774 && expr1->ts.u.derived->attr.alloc_comp)
8775 old_desc = gfc_evaluate_now (desc, &fblock);
8776 else
8777 old_desc = NULL_TREE;
8778
8779 /* Now modify the lhs descriptor and the associated scalarizer
8780 variables. F2003 7.4.1.3: "If variable is or becomes an
8781 unallocated allocatable variable, then it is allocated with each
8782 deferred type parameter equal to the corresponding type parameters
8783 of expr , with the shape of expr , and with each lower bound equal
8784 to the corresponding element of LBOUND(expr)."
8785 Reuse size1 to keep a dimension-by-dimension track of the
8786 stride of the new array. */
8787 size1 = gfc_index_one_node;
8788 offset = gfc_index_zero_node;
8789
8790 for (n = 0; n < expr2->rank; n++)
8791 {
8792 tmp = fold_build2_loc (input_location, MINUS_EXPR,
8793 gfc_array_index_type,
8794 loop->to[n], loop->from[n]);
8795 tmp = fold_build2_loc (input_location, PLUS_EXPR,
8796 gfc_array_index_type,
8797 tmp, gfc_index_one_node);
8798
8799 lbound = gfc_index_one_node;
8800 ubound = tmp;
8801
8802 if (as)
8803 {
8804 lbd = get_std_lbound (expr2, desc2, n,
8805 as->type == AS_ASSUMED_SIZE);
8806 ubound = fold_build2_loc (input_location,
8807 MINUS_EXPR,
8808 gfc_array_index_type,
8809 ubound, lbound);
8810 ubound = fold_build2_loc (input_location,
8811 PLUS_EXPR,
8812 gfc_array_index_type,
8813 ubound, lbd);
8814 lbound = lbd;
8815 }
8816
8817 gfc_conv_descriptor_lbound_set (&fblock, desc,
8818 gfc_rank_cst[n],
8819 lbound);
8820 gfc_conv_descriptor_ubound_set (&fblock, desc,
8821 gfc_rank_cst[n],
8822 ubound);
8823 gfc_conv_descriptor_stride_set (&fblock, desc,
8824 gfc_rank_cst[n],
8825 size1);
8826 lbound = gfc_conv_descriptor_lbound_get (desc,
8827 gfc_rank_cst[n]);
8828 tmp2 = fold_build2_loc (input_location, MULT_EXPR,
8829 gfc_array_index_type,
8830 lbound, size1);
8831 offset = fold_build2_loc (input_location, MINUS_EXPR,
8832 gfc_array_index_type,
8833 offset, tmp2);
8834 size1 = fold_build2_loc (input_location, MULT_EXPR,
8835 gfc_array_index_type,
8836 tmp, size1);
8837 }
8838
8839 /* Set the lhs descriptor and scalarizer offsets. For rank > 1,
8840 the array offset is saved and the info.offset is used for a
8841 running offset. Use the saved_offset instead. */
8842 tmp = gfc_conv_descriptor_offset (desc);
8843 gfc_add_modify (&fblock, tmp, offset);
8844 if (linfo->saved_offset
8845 && TREE_CODE (linfo->saved_offset) == VAR_DECL)
8846 gfc_add_modify (&fblock, linfo->saved_offset, tmp);
8847
8848 /* Now set the deltas for the lhs. */
8849 for (n = 0; n < expr1->rank; n++)
8850 {
8851 tmp = gfc_conv_descriptor_lbound_get (desc, gfc_rank_cst[n]);
8852 dim = lss->dim[n];
8853 tmp = fold_build2_loc (input_location, MINUS_EXPR,
8854 gfc_array_index_type, tmp,
8855 loop->from[dim]);
8856 if (linfo->delta[dim]
8857 && TREE_CODE (linfo->delta[dim]) == VAR_DECL)
8858 gfc_add_modify (&fblock, linfo->delta[dim], tmp);
8859 }
8860
8861 /* Get the new lhs size in bytes. */
8862 if (expr1->ts.type == BT_CHARACTER && expr1->ts.deferred)
8863 {
8864 if (expr2->ts.deferred)
8865 {
8866 if (TREE_CODE (expr2->ts.u.cl->backend_decl) == VAR_DECL)
8867 tmp = expr2->ts.u.cl->backend_decl;
8868 else
8869 tmp = rss->info->string_length;
8870 }
8871 else
8872 {
8873 tmp = expr2->ts.u.cl->backend_decl;
8874 if (!tmp && expr2->expr_type == EXPR_OP
8875 && expr2->value.op.op == INTRINSIC_CONCAT)
8876 {
8877 tmp = concat_str_length (expr2);
8878 expr2->ts.u.cl->backend_decl = gfc_evaluate_now (tmp, &fblock);
8879 }
8880 tmp = fold_convert (TREE_TYPE (expr1->ts.u.cl->backend_decl), tmp);
8881 }
8882
8883 if (expr1->ts.u.cl->backend_decl
8884 && TREE_CODE (expr1->ts.u.cl->backend_decl) == VAR_DECL)
8885 gfc_add_modify (&fblock, expr1->ts.u.cl->backend_decl, tmp);
8886 else
8887 gfc_add_modify (&fblock, lss->info->string_length, tmp);
8888 }
8889 else if (expr1->ts.type == BT_CHARACTER && expr1->ts.u.cl->backend_decl)
8890 {
8891 tmp = TYPE_SIZE_UNIT (TREE_TYPE (gfc_typenode_for_spec (&expr1->ts)));
8892 tmp = fold_build2_loc (input_location, MULT_EXPR,
8893 gfc_array_index_type, tmp,
8894 expr1->ts.u.cl->backend_decl);
8895 }
8896 else
8897 tmp = TYPE_SIZE_UNIT (gfc_typenode_for_spec (&expr1->ts));
8898 tmp = fold_convert (gfc_array_index_type, tmp);
8899 size2 = fold_build2_loc (input_location, MULT_EXPR,
8900 gfc_array_index_type,
8901 tmp, size2);
8902 size2 = fold_convert (size_type_node, size2);
8903 size2 = fold_build2_loc (input_location, MAX_EXPR, size_type_node,
8904 size2, size_one_node);
8905 size2 = gfc_evaluate_now (size2, &fblock);
8906
8907 /* For deferred character length, the 'size' field of the dtype might
8908 have changed so set the dtype. */
8909 if (GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (desc))
8910 && expr1->ts.type == BT_CHARACTER && expr1->ts.deferred)
8911 {
8912 tree type;
8913 tmp = gfc_conv_descriptor_dtype (desc);
8914 if (expr2->ts.u.cl->backend_decl)
8915 type = gfc_typenode_for_spec (&expr2->ts);
8916 else
8917 type = gfc_typenode_for_spec (&expr1->ts);
8918
8919 gfc_add_modify (&fblock, tmp,
8920 gfc_get_dtype_rank_type (expr1->rank,type));
8921 }
8922
8923 /* Realloc expression. Note that the scalarizer uses desc.data
8924 in the array reference - (*desc.data)[<element>]. */
8925 gfc_init_block (&realloc_block);
8926
8927 if ((expr1->ts.type == BT_DERIVED)
8928 && expr1->ts.u.derived->attr.alloc_comp)
8929 {
8930 tmp = gfc_deallocate_alloc_comp_no_caf (expr1->ts.u.derived, old_desc,
8931 expr1->rank);
8932 gfc_add_expr_to_block (&realloc_block, tmp);
8933 }
8934
8935 tmp = build_call_expr_loc (input_location,
8936 builtin_decl_explicit (BUILT_IN_REALLOC), 2,
8937 fold_convert (pvoid_type_node, array1),
8938 size2);
8939 gfc_conv_descriptor_data_set (&realloc_block,
8940 desc, tmp);
8941
8942 if ((expr1->ts.type == BT_DERIVED)
8943 && expr1->ts.u.derived->attr.alloc_comp)
8944 {
8945 tmp = gfc_nullify_alloc_comp (expr1->ts.u.derived, desc,
8946 expr1->rank);
8947 gfc_add_expr_to_block (&realloc_block, tmp);
8948 }
8949
8950 realloc_expr = gfc_finish_block (&realloc_block);
8951
8952 /* Only reallocate if sizes are different. */
8953 tmp = build3_v (COND_EXPR, neq_size, realloc_expr,
8954 build_empty_stmt (input_location));
8955 realloc_expr = tmp;
8956
8957
8958 /* Malloc expression. */
8959 gfc_init_block (&alloc_block);
8960 tmp = build_call_expr_loc (input_location,
8961 builtin_decl_explicit (BUILT_IN_MALLOC),
8962 1, size2);
8963 gfc_conv_descriptor_data_set (&alloc_block,
8964 desc, tmp);
8965
8966 /* We already set the dtype in the case of deferred character
8967 length arrays. */
8968 if (!(GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (desc))
8969 && expr1->ts.type == BT_CHARACTER && expr1->ts.deferred))
8970 {
8971 tmp = gfc_conv_descriptor_dtype (desc);
8972 gfc_add_modify (&alloc_block, tmp, gfc_get_dtype (TREE_TYPE (desc)));
8973 }
8974
8975 if ((expr1->ts.type == BT_DERIVED)
8976 && expr1->ts.u.derived->attr.alloc_comp)
8977 {
8978 tmp = gfc_nullify_alloc_comp (expr1->ts.u.derived, desc,
8979 expr1->rank);
8980 gfc_add_expr_to_block (&alloc_block, tmp);
8981 }
8982 alloc_expr = gfc_finish_block (&alloc_block);
8983
8984 /* Malloc if not allocated; realloc otherwise. */
8985 tmp = build_int_cst (TREE_TYPE (array1), 0);
8986 cond = fold_build2_loc (input_location, EQ_EXPR,
8987 boolean_type_node,
8988 array1, tmp);
8989 tmp = build3_v (COND_EXPR, cond, alloc_expr, realloc_expr);
8990 gfc_add_expr_to_block (&fblock, tmp);
8991
8992 /* Make sure that the scalarizer data pointer is updated. */
8993 if (linfo->data
8994 && TREE_CODE (linfo->data) == VAR_DECL)
8995 {
8996 tmp = gfc_conv_descriptor_data_get (desc);
8997 gfc_add_modify (&fblock, linfo->data, tmp);
8998 }
8999
9000 /* Add the exit label. */
9001 tmp = build1_v (LABEL_EXPR, jump_label2);
9002 gfc_add_expr_to_block (&fblock, tmp);
9003
9004 return gfc_finish_block (&fblock);
9005 }
9006
9007
9008 /* NULLIFY an allocatable/pointer array on function entry, free it on exit.
9009 Do likewise, recursively if necessary, with the allocatable components of
9010 derived types. */
9011
9012 void
9013 gfc_trans_deferred_array (gfc_symbol * sym, gfc_wrapped_block * block)
9014 {
9015 tree type;
9016 tree tmp;
9017 tree descriptor;
9018 stmtblock_t init;
9019 stmtblock_t cleanup;
9020 locus loc;
9021 int rank;
9022 bool sym_has_alloc_comp, has_finalizer;
9023
9024 sym_has_alloc_comp = (sym->ts.type == BT_DERIVED
9025 || sym->ts.type == BT_CLASS)
9026 && sym->ts.u.derived->attr.alloc_comp;
9027 has_finalizer = sym->ts.type == BT_CLASS || sym->ts.type == BT_DERIVED
9028 ? gfc_is_finalizable (sym->ts.u.derived, NULL) : false;
9029
9030 /* Make sure the frontend gets these right. */
9031 gcc_assert (sym->attr.pointer || sym->attr.allocatable || sym_has_alloc_comp
9032 || has_finalizer);
9033
9034 gfc_save_backend_locus (&loc);
9035 gfc_set_backend_locus (&sym->declared_at);
9036 gfc_init_block (&init);
9037
9038 gcc_assert (TREE_CODE (sym->backend_decl) == VAR_DECL
9039 || TREE_CODE (sym->backend_decl) == PARM_DECL);
9040
9041 if (sym->ts.type == BT_CHARACTER
9042 && !INTEGER_CST_P (sym->ts.u.cl->backend_decl))
9043 {
9044 gfc_conv_string_length (sym->ts.u.cl, NULL, &init);
9045 gfc_trans_vla_type_sizes (sym, &init);
9046 }
9047
9048 /* Dummy, use associated and result variables don't need anything special. */
9049 if (sym->attr.dummy || sym->attr.use_assoc || sym->attr.result)
9050 {
9051 gfc_add_init_cleanup (block, gfc_finish_block (&init), NULL_TREE);
9052 gfc_restore_backend_locus (&loc);
9053 return;
9054 }
9055
9056 descriptor = sym->backend_decl;
9057
9058 /* Although static, derived types with default initializers and
9059 allocatable components must not be nulled wholesale; instead they
9060 are treated component by component. */
9061 if (TREE_STATIC (descriptor) && !sym_has_alloc_comp && !has_finalizer)
9062 {
9063 /* SAVEd variables are not freed on exit. */
9064 gfc_trans_static_array_pointer (sym);
9065
9066 gfc_add_init_cleanup (block, gfc_finish_block (&init), NULL_TREE);
9067 gfc_restore_backend_locus (&loc);
9068 return;
9069 }
9070
9071 /* Get the descriptor type. */
9072 type = TREE_TYPE (sym->backend_decl);
9073
9074 if ((sym_has_alloc_comp || (has_finalizer && sym->ts.type != BT_CLASS))
9075 && !(sym->attr.pointer || sym->attr.allocatable))
9076 {
9077 if (!sym->attr.save
9078 && !(TREE_STATIC (sym->backend_decl) && sym->attr.is_main_program))
9079 {
9080 if (sym->value == NULL
9081 || !gfc_has_default_initializer (sym->ts.u.derived))
9082 {
9083 rank = sym->as ? sym->as->rank : 0;
9084 tmp = gfc_nullify_alloc_comp (sym->ts.u.derived,
9085 descriptor, rank);
9086 gfc_add_expr_to_block (&init, tmp);
9087 }
9088 else
9089 gfc_init_default_dt (sym, &init, false);
9090 }
9091 }
9092 else if (!GFC_DESCRIPTOR_TYPE_P (type))
9093 {
9094 /* If the backend_decl is not a descriptor, we must have a pointer
9095 to one. */
9096 descriptor = build_fold_indirect_ref_loc (input_location,
9097 sym->backend_decl);
9098 type = TREE_TYPE (descriptor);
9099 }
9100
9101 /* NULLIFY the data pointer, for non-saved allocatables. */
9102 if (GFC_DESCRIPTOR_TYPE_P (type) && !sym->attr.save && sym->attr.allocatable)
9103 gfc_conv_descriptor_data_set (&init, descriptor, null_pointer_node);
9104
9105 gfc_restore_backend_locus (&loc);
9106 gfc_init_block (&cleanup);
9107
9108 /* Allocatable arrays need to be freed when they go out of scope.
9109 The allocatable components of pointers must not be touched. */
9110 if (!sym->attr.allocatable && has_finalizer && sym->ts.type != BT_CLASS
9111 && !sym->attr.pointer && !sym->attr.artificial && !sym->attr.save
9112 && !sym->ns->proc_name->attr.is_main_program)
9113 {
9114 gfc_expr *e;
9115 sym->attr.referenced = 1;
9116 e = gfc_lval_expr_from_sym (sym);
9117 gfc_add_finalizer_call (&cleanup, e);
9118 gfc_free_expr (e);
9119 }
9120 else if ((!sym->attr.allocatable || !has_finalizer)
9121 && sym_has_alloc_comp && !(sym->attr.function || sym->attr.result)
9122 && !sym->attr.pointer && !sym->attr.save
9123 && !sym->ns->proc_name->attr.is_main_program)
9124 {
9125 int rank;
9126 rank = sym->as ? sym->as->rank : 0;
9127 tmp = gfc_deallocate_alloc_comp (sym->ts.u.derived, descriptor, rank);
9128 gfc_add_expr_to_block (&cleanup, tmp);
9129 }
9130
9131 if (sym->attr.allocatable && (sym->attr.dimension || sym->attr.codimension)
9132 && !sym->attr.save && !sym->attr.result
9133 && !sym->ns->proc_name->attr.is_main_program)
9134 {
9135 gfc_expr *e;
9136 e = has_finalizer ? gfc_lval_expr_from_sym (sym) : NULL;
9137 tmp = gfc_trans_dealloc_allocated (sym->backend_decl,
9138 sym->attr.codimension, e);
9139 if (e)
9140 gfc_free_expr (e);
9141 gfc_add_expr_to_block (&cleanup, tmp);
9142 }
9143
9144 gfc_add_init_cleanup (block, gfc_finish_block (&init),
9145 gfc_finish_block (&cleanup));
9146 }
9147
9148 /************ Expression Walking Functions ******************/
9149
9150 /* Walk a variable reference.
9151
9152 Possible extension - multiple component subscripts.
9153 x(:,:) = foo%a(:)%b(:)
9154 Transforms to
9155 forall (i=..., j=...)
9156 x(i,j) = foo%a(j)%b(i)
9157 end forall
9158 This adds a fair amount of complexity because you need to deal with more
9159 than one ref. Maybe handle in a similar manner to vector subscripts.
9160 Maybe not worth the effort. */
9161
9162
9163 static gfc_ss *
9164 gfc_walk_variable_expr (gfc_ss * ss, gfc_expr * expr)
9165 {
9166 gfc_ref *ref;
9167
9168 for (ref = expr->ref; ref; ref = ref->next)
9169 if (ref->type == REF_ARRAY && ref->u.ar.type != AR_ELEMENT)
9170 break;
9171
9172 return gfc_walk_array_ref (ss, expr, ref);
9173 }
9174
9175
9176 gfc_ss *
9177 gfc_walk_array_ref (gfc_ss * ss, gfc_expr * expr, gfc_ref * ref)
9178 {
9179 gfc_array_ref *ar;
9180 gfc_ss *newss;
9181 int n;
9182
9183 for (; ref; ref = ref->next)
9184 {
9185 if (ref->type == REF_SUBSTRING)
9186 {
9187 ss = gfc_get_scalar_ss (ss, ref->u.ss.start);
9188 ss = gfc_get_scalar_ss (ss, ref->u.ss.end);
9189 }
9190
9191 /* We're only interested in array sections from now on. */
9192 if (ref->type != REF_ARRAY)
9193 continue;
9194
9195 ar = &ref->u.ar;
9196
9197 switch (ar->type)
9198 {
9199 case AR_ELEMENT:
9200 for (n = ar->dimen - 1; n >= 0; n--)
9201 ss = gfc_get_scalar_ss (ss, ar->start[n]);
9202 break;
9203
9204 case AR_FULL:
9205 newss = gfc_get_array_ss (ss, expr, ar->as->rank, GFC_SS_SECTION);
9206 newss->info->data.array.ref = ref;
9207
9208 /* Make sure array is the same as array(:,:), this way
9209 we don't need to special case all the time. */
9210 ar->dimen = ar->as->rank;
9211 for (n = 0; n < ar->dimen; n++)
9212 {
9213 ar->dimen_type[n] = DIMEN_RANGE;
9214
9215 gcc_assert (ar->start[n] == NULL);
9216 gcc_assert (ar->end[n] == NULL);
9217 gcc_assert (ar->stride[n] == NULL);
9218 }
9219 ss = newss;
9220 break;
9221
9222 case AR_SECTION:
9223 newss = gfc_get_array_ss (ss, expr, 0, GFC_SS_SECTION);
9224 newss->info->data.array.ref = ref;
9225
9226 /* We add SS chains for all the subscripts in the section. */
9227 for (n = 0; n < ar->dimen; n++)
9228 {
9229 gfc_ss *indexss;
9230
9231 switch (ar->dimen_type[n])
9232 {
9233 case DIMEN_ELEMENT:
9234 /* Add SS for elemental (scalar) subscripts. */
9235 gcc_assert (ar->start[n]);
9236 indexss = gfc_get_scalar_ss (gfc_ss_terminator, ar->start[n]);
9237 indexss->loop_chain = gfc_ss_terminator;
9238 newss->info->data.array.subscript[n] = indexss;
9239 break;
9240
9241 case DIMEN_RANGE:
9242 /* We don't add anything for sections, just remember this
9243 dimension for later. */
9244 newss->dim[newss->dimen] = n;
9245 newss->dimen++;
9246 break;
9247
9248 case DIMEN_VECTOR:
9249 /* Create a GFC_SS_VECTOR index in which we can store
9250 the vector's descriptor. */
9251 indexss = gfc_get_array_ss (gfc_ss_terminator, ar->start[n],
9252 1, GFC_SS_VECTOR);
9253 indexss->loop_chain = gfc_ss_terminator;
9254 newss->info->data.array.subscript[n] = indexss;
9255 newss->dim[newss->dimen] = n;
9256 newss->dimen++;
9257 break;
9258
9259 default:
9260 /* We should know what sort of section it is by now. */
9261 gcc_unreachable ();
9262 }
9263 }
9264 /* We should have at least one non-elemental dimension,
9265 unless we are creating a descriptor for a (scalar) coarray. */
9266 gcc_assert (newss->dimen > 0
9267 || newss->info->data.array.ref->u.ar.as->corank > 0);
9268 ss = newss;
9269 break;
9270
9271 default:
9272 /* We should know what sort of section it is by now. */
9273 gcc_unreachable ();
9274 }
9275
9276 }
9277 return ss;
9278 }
9279
9280
9281 /* Walk an expression operator. If only one operand of a binary expression is
9282 scalar, we must also add the scalar term to the SS chain. */
9283
9284 static gfc_ss *
9285 gfc_walk_op_expr (gfc_ss * ss, gfc_expr * expr)
9286 {
9287 gfc_ss *head;
9288 gfc_ss *head2;
9289
9290 head = gfc_walk_subexpr (ss, expr->value.op.op1);
9291 if (expr->value.op.op2 == NULL)
9292 head2 = head;
9293 else
9294 head2 = gfc_walk_subexpr (head, expr->value.op.op2);
9295
9296 /* All operands are scalar. Pass back and let the caller deal with it. */
9297 if (head2 == ss)
9298 return head2;
9299
9300 /* All operands require scalarization. */
9301 if (head != ss && (expr->value.op.op2 == NULL || head2 != head))
9302 return head2;
9303
9304 /* One of the operands needs scalarization, the other is scalar.
9305 Create a gfc_ss for the scalar expression. */
9306 if (head == ss)
9307 {
9308 /* First operand is scalar. We build the chain in reverse order, so
9309 add the scalar SS after the second operand. */
9310 head = head2;
9311 while (head && head->next != ss)
9312 head = head->next;
9313 /* Check we haven't somehow broken the chain. */
9314 gcc_assert (head);
9315 head->next = gfc_get_scalar_ss (ss, expr->value.op.op1);
9316 }
9317 else /* head2 == head */
9318 {
9319 gcc_assert (head2 == head);
9320 /* Second operand is scalar. */
9321 head2 = gfc_get_scalar_ss (head2, expr->value.op.op2);
9322 }
9323
9324 return head2;
9325 }
9326
9327
9328 /* Reverse a SS chain. */
9329
9330 gfc_ss *
9331 gfc_reverse_ss (gfc_ss * ss)
9332 {
9333 gfc_ss *next;
9334 gfc_ss *head;
9335
9336 gcc_assert (ss != NULL);
9337
9338 head = gfc_ss_terminator;
9339 while (ss != gfc_ss_terminator)
9340 {
9341 next = ss->next;
9342 /* Check we didn't somehow break the chain. */
9343 gcc_assert (next != NULL);
9344 ss->next = head;
9345 head = ss;
9346 ss = next;
9347 }
9348
9349 return (head);
9350 }
9351
9352
9353 /* Given an expression referring to a procedure, return the symbol of its
9354 interface. We can't get the procedure symbol directly as we have to handle
9355 the case of (deferred) type-bound procedures. */
9356
9357 gfc_symbol *
9358 gfc_get_proc_ifc_for_expr (gfc_expr *procedure_ref)
9359 {
9360 gfc_symbol *sym;
9361 gfc_ref *ref;
9362
9363 if (procedure_ref == NULL)
9364 return NULL;
9365
9366 /* Normal procedure case. */
9367 if (procedure_ref->expr_type == EXPR_FUNCTION
9368 && procedure_ref->value.function.esym)
9369 sym = procedure_ref->value.function.esym;
9370 else
9371 sym = procedure_ref->symtree->n.sym;
9372
9373 /* Typebound procedure case. */
9374 for (ref = procedure_ref->ref; ref; ref = ref->next)
9375 {
9376 if (ref->type == REF_COMPONENT
9377 && ref->u.c.component->attr.proc_pointer)
9378 sym = ref->u.c.component->ts.interface;
9379 else
9380 sym = NULL;
9381 }
9382
9383 return sym;
9384 }
9385
9386
9387 /* Walk the arguments of an elemental function.
9388 PROC_EXPR is used to check whether an argument is permitted to be absent. If
9389 it is NULL, we don't do the check and the argument is assumed to be present.
9390 */
9391
9392 gfc_ss *
9393 gfc_walk_elemental_function_args (gfc_ss * ss, gfc_actual_arglist *arg,
9394 gfc_symbol *proc_ifc, gfc_ss_type type)
9395 {
9396 gfc_formal_arglist *dummy_arg;
9397 int scalar;
9398 gfc_ss *head;
9399 gfc_ss *tail;
9400 gfc_ss *newss;
9401
9402 head = gfc_ss_terminator;
9403 tail = NULL;
9404
9405 if (proc_ifc)
9406 dummy_arg = gfc_sym_get_dummy_args (proc_ifc);
9407 else
9408 dummy_arg = NULL;
9409
9410 scalar = 1;
9411 for (; arg; arg = arg->next)
9412 {
9413 if (!arg->expr || arg->expr->expr_type == EXPR_NULL)
9414 goto loop_continue;
9415
9416 newss = gfc_walk_subexpr (head, arg->expr);
9417 if (newss == head)
9418 {
9419 /* Scalar argument. */
9420 gcc_assert (type == GFC_SS_SCALAR || type == GFC_SS_REFERENCE);
9421 newss = gfc_get_scalar_ss (head, arg->expr);
9422 newss->info->type = type;
9423 if (dummy_arg)
9424 newss->info->data.scalar.dummy_arg = dummy_arg->sym;
9425 }
9426 else
9427 scalar = 0;
9428
9429 if (dummy_arg != NULL
9430 && dummy_arg->sym->attr.optional
9431 && arg->expr->expr_type == EXPR_VARIABLE
9432 && (gfc_expr_attr (arg->expr).optional
9433 || gfc_expr_attr (arg->expr).allocatable
9434 || gfc_expr_attr (arg->expr).pointer))
9435 newss->info->can_be_null_ref = true;
9436
9437 head = newss;
9438 if (!tail)
9439 {
9440 tail = head;
9441 while (tail->next != gfc_ss_terminator)
9442 tail = tail->next;
9443 }
9444
9445 loop_continue:
9446 if (dummy_arg != NULL)
9447 dummy_arg = dummy_arg->next;
9448 }
9449
9450 if (scalar)
9451 {
9452 /* If all the arguments are scalar we don't need the argument SS. */
9453 gfc_free_ss_chain (head);
9454 /* Pass it back. */
9455 return ss;
9456 }
9457
9458 /* Add it onto the existing chain. */
9459 tail->next = ss;
9460 return head;
9461 }
9462
9463
9464 /* Walk a function call. Scalar functions are passed back, and taken out of
9465 scalarization loops. For elemental functions we walk their arguments.
9466 The result of functions returning arrays is stored in a temporary outside
9467 the loop, so that the function is only called once. Hence we do not need
9468 to walk their arguments. */
9469
9470 static gfc_ss *
9471 gfc_walk_function_expr (gfc_ss * ss, gfc_expr * expr)
9472 {
9473 gfc_intrinsic_sym *isym;
9474 gfc_symbol *sym;
9475 gfc_component *comp = NULL;
9476
9477 isym = expr->value.function.isym;
9478
9479 /* Handle intrinsic functions separately. */
9480 if (isym)
9481 return gfc_walk_intrinsic_function (ss, expr, isym);
9482
9483 sym = expr->value.function.esym;
9484 if (!sym)
9485 sym = expr->symtree->n.sym;
9486
9487 if (gfc_is_alloc_class_array_function (expr))
9488 return gfc_get_array_ss (ss, expr,
9489 CLASS_DATA (expr->value.function.esym->result)->as->rank,
9490 GFC_SS_FUNCTION);
9491
9492 /* A function that returns arrays. */
9493 comp = gfc_get_proc_ptr_comp (expr);
9494 if ((!comp && gfc_return_by_reference (sym) && sym->result->attr.dimension)
9495 || (comp && comp->attr.dimension))
9496 return gfc_get_array_ss (ss, expr, expr->rank, GFC_SS_FUNCTION);
9497
9498 /* Walk the parameters of an elemental function. For now we always pass
9499 by reference. */
9500 if (sym->attr.elemental || (comp && comp->attr.elemental))
9501 {
9502 gfc_ss *old_ss = ss;
9503
9504 ss = gfc_walk_elemental_function_args (old_ss,
9505 expr->value.function.actual,
9506 gfc_get_proc_ifc_for_expr (expr),
9507 GFC_SS_REFERENCE);
9508 if (ss != old_ss
9509 && (comp
9510 || sym->attr.proc_pointer
9511 || sym->attr.if_source != IFSRC_DECL
9512 || sym->attr.array_outer_dependency))
9513 ss->info->array_outer_dependency = 1;
9514 }
9515
9516 /* Scalar functions are OK as these are evaluated outside the scalarization
9517 loop. Pass back and let the caller deal with it. */
9518 return ss;
9519 }
9520
9521
9522 /* An array temporary is constructed for array constructors. */
9523
9524 static gfc_ss *
9525 gfc_walk_array_constructor (gfc_ss * ss, gfc_expr * expr)
9526 {
9527 return gfc_get_array_ss (ss, expr, expr->rank, GFC_SS_CONSTRUCTOR);
9528 }
9529
9530
9531 /* Walk an expression. Add walked expressions to the head of the SS chain.
9532 A wholly scalar expression will not be added. */
9533
9534 gfc_ss *
9535 gfc_walk_subexpr (gfc_ss * ss, gfc_expr * expr)
9536 {
9537 gfc_ss *head;
9538
9539 switch (expr->expr_type)
9540 {
9541 case EXPR_VARIABLE:
9542 head = gfc_walk_variable_expr (ss, expr);
9543 return head;
9544
9545 case EXPR_OP:
9546 head = gfc_walk_op_expr (ss, expr);
9547 return head;
9548
9549 case EXPR_FUNCTION:
9550 head = gfc_walk_function_expr (ss, expr);
9551 return head;
9552
9553 case EXPR_CONSTANT:
9554 case EXPR_NULL:
9555 case EXPR_STRUCTURE:
9556 /* Pass back and let the caller deal with it. */
9557 break;
9558
9559 case EXPR_ARRAY:
9560 head = gfc_walk_array_constructor (ss, expr);
9561 return head;
9562
9563 case EXPR_SUBSTRING:
9564 /* Pass back and let the caller deal with it. */
9565 break;
9566
9567 default:
9568 gfc_internal_error ("bad expression type during walk (%d)",
9569 expr->expr_type);
9570 }
9571 return ss;
9572 }
9573
9574
9575 /* Entry point for expression walking.
9576 A return value equal to the passed chain means this is
9577 a scalar expression. It is up to the caller to take whatever action is
9578 necessary to translate these. */
9579
9580 gfc_ss *
9581 gfc_walk_expr (gfc_expr * expr)
9582 {
9583 gfc_ss *res;
9584
9585 res = gfc_walk_subexpr (gfc_ss_terminator, expr);
9586 return gfc_reverse_ss (res);
9587 }