]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/fortran/trans-array.c
trans.h (struct gfc_ss_info): Move can_be_null_ref component from the data::scalar...
[thirdparty/gcc.git] / gcc / fortran / trans-array.c
1 /* Array translation routines
2 Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
3 2011, 2012
4 Free Software Foundation, Inc.
5 Contributed by Paul Brook <paul@nowt.org>
6 and Steven Bosscher <s.bosscher@student.tudelft.nl>
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
14
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
23
24 /* trans-array.c-- Various array related code, including scalarization,
25 allocation, initialization and other support routines. */
26
27 /* How the scalarizer works.
28 In gfortran, array expressions use the same core routines as scalar
29 expressions.
30 First, a Scalarization State (SS) chain is built. This is done by walking
31 the expression tree, and building a linear list of the terms in the
32 expression. As the tree is walked, scalar subexpressions are translated.
33
34 The scalarization parameters are stored in a gfc_loopinfo structure.
35 First the start and stride of each term is calculated by
36 gfc_conv_ss_startstride. During this process the expressions for the array
37 descriptors and data pointers are also translated.
38
39 If the expression is an assignment, we must then resolve any dependencies.
40 In fortran all the rhs values of an assignment must be evaluated before
41 any assignments take place. This can require a temporary array to store the
42 values. We also require a temporary when we are passing array expressions
43 or vector subscripts as procedure parameters.
44
45 Array sections are passed without copying to a temporary. These use the
46 scalarizer to determine the shape of the section. The flag
47 loop->array_parameter tells the scalarizer that the actual values and loop
48 variables will not be required.
49
50 The function gfc_conv_loop_setup generates the scalarization setup code.
51 It determines the range of the scalarizing loop variables. If a temporary
52 is required, this is created and initialized. Code for scalar expressions
53 taken outside the loop is also generated at this time. Next the offset and
54 scaling required to translate from loop variables to array indices for each
55 term is calculated.
56
57 A call to gfc_start_scalarized_body marks the start of the scalarized
58 expression. This creates a scope and declares the loop variables. Before
59 calling this gfc_make_ss_chain_used must be used to indicate which terms
60 will be used inside this loop.
61
62 The scalar gfc_conv_* functions are then used to build the main body of the
63 scalarization loop. Scalarization loop variables and precalculated scalar
64 values are automatically substituted. Note that gfc_advance_se_ss_chain
65 must be used, rather than changing the se->ss directly.
66
67 For assignment expressions requiring a temporary two sub loops are
68 generated. The first stores the result of the expression in the temporary,
69 the second copies it to the result. A call to
70 gfc_trans_scalarized_loop_boundary marks the end of the main loop code and
71 the start of the copying loop. The temporary may be less than full rank.
72
73 Finally gfc_trans_scalarizing_loops is called to generate the implicit do
74 loops. The loops are added to the pre chain of the loopinfo. The post
75 chain may still contain cleanup code.
76
77 After the loop code has been added into its parent scope gfc_cleanup_loop
78 is called to free all the SS allocated by the scalarizer. */
79
80 #include "config.h"
81 #include "system.h"
82 #include "coretypes.h"
83 #include "tree.h"
84 #include "gimple.h"
85 #include "diagnostic-core.h" /* For internal_error/fatal_error. */
86 #include "flags.h"
87 #include "gfortran.h"
88 #include "constructor.h"
89 #include "trans.h"
90 #include "trans-stmt.h"
91 #include "trans-types.h"
92 #include "trans-array.h"
93 #include "trans-const.h"
94 #include "dependency.h"
95
96 static bool gfc_get_array_constructor_size (mpz_t *, gfc_constructor_base);
97
98 /* The contents of this structure aren't actually used, just the address. */
99 static gfc_ss gfc_ss_terminator_var;
100 gfc_ss * const gfc_ss_terminator = &gfc_ss_terminator_var;
101
102
103 static tree
104 gfc_array_dataptr_type (tree desc)
105 {
106 return (GFC_TYPE_ARRAY_DATAPTR_TYPE (TREE_TYPE (desc)));
107 }
108
109
110 /* Build expressions to access the members of an array descriptor.
111 It's surprisingly easy to mess up here, so never access
112 an array descriptor by "brute force", always use these
113 functions. This also avoids problems if we change the format
114 of an array descriptor.
115
116 To understand these magic numbers, look at the comments
117 before gfc_build_array_type() in trans-types.c.
118
119 The code within these defines should be the only code which knows the format
120 of an array descriptor.
121
122 Any code just needing to read obtain the bounds of an array should use
123 gfc_conv_array_* rather than the following functions as these will return
124 know constant values, and work with arrays which do not have descriptors.
125
126 Don't forget to #undef these! */
127
128 #define DATA_FIELD 0
129 #define OFFSET_FIELD 1
130 #define DTYPE_FIELD 2
131 #define DIMENSION_FIELD 3
132 #define CAF_TOKEN_FIELD 4
133
134 #define STRIDE_SUBFIELD 0
135 #define LBOUND_SUBFIELD 1
136 #define UBOUND_SUBFIELD 2
137
138 /* This provides READ-ONLY access to the data field. The field itself
139 doesn't have the proper type. */
140
141 tree
142 gfc_conv_descriptor_data_get (tree desc)
143 {
144 tree field, type, t;
145
146 type = TREE_TYPE (desc);
147 gcc_assert (GFC_DESCRIPTOR_TYPE_P (type));
148
149 field = TYPE_FIELDS (type);
150 gcc_assert (DATA_FIELD == 0);
151
152 t = fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field), desc,
153 field, NULL_TREE);
154 t = fold_convert (GFC_TYPE_ARRAY_DATAPTR_TYPE (type), t);
155
156 return t;
157 }
158
159 /* This provides WRITE access to the data field.
160
161 TUPLES_P is true if we are generating tuples.
162
163 This function gets called through the following macros:
164 gfc_conv_descriptor_data_set
165 gfc_conv_descriptor_data_set. */
166
167 void
168 gfc_conv_descriptor_data_set (stmtblock_t *block, tree desc, tree value)
169 {
170 tree field, type, t;
171
172 type = TREE_TYPE (desc);
173 gcc_assert (GFC_DESCRIPTOR_TYPE_P (type));
174
175 field = TYPE_FIELDS (type);
176 gcc_assert (DATA_FIELD == 0);
177
178 t = fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field), desc,
179 field, NULL_TREE);
180 gfc_add_modify (block, t, fold_convert (TREE_TYPE (field), value));
181 }
182
183
184 /* This provides address access to the data field. This should only be
185 used by array allocation, passing this on to the runtime. */
186
187 tree
188 gfc_conv_descriptor_data_addr (tree desc)
189 {
190 tree field, type, t;
191
192 type = TREE_TYPE (desc);
193 gcc_assert (GFC_DESCRIPTOR_TYPE_P (type));
194
195 field = TYPE_FIELDS (type);
196 gcc_assert (DATA_FIELD == 0);
197
198 t = fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field), desc,
199 field, NULL_TREE);
200 return gfc_build_addr_expr (NULL_TREE, t);
201 }
202
203 static tree
204 gfc_conv_descriptor_offset (tree desc)
205 {
206 tree type;
207 tree field;
208
209 type = TREE_TYPE (desc);
210 gcc_assert (GFC_DESCRIPTOR_TYPE_P (type));
211
212 field = gfc_advance_chain (TYPE_FIELDS (type), OFFSET_FIELD);
213 gcc_assert (field != NULL_TREE && TREE_TYPE (field) == gfc_array_index_type);
214
215 return fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field),
216 desc, field, NULL_TREE);
217 }
218
219 tree
220 gfc_conv_descriptor_offset_get (tree desc)
221 {
222 return gfc_conv_descriptor_offset (desc);
223 }
224
225 void
226 gfc_conv_descriptor_offset_set (stmtblock_t *block, tree desc,
227 tree value)
228 {
229 tree t = gfc_conv_descriptor_offset (desc);
230 gfc_add_modify (block, t, fold_convert (TREE_TYPE (t), value));
231 }
232
233
234 tree
235 gfc_conv_descriptor_dtype (tree desc)
236 {
237 tree field;
238 tree type;
239
240 type = TREE_TYPE (desc);
241 gcc_assert (GFC_DESCRIPTOR_TYPE_P (type));
242
243 field = gfc_advance_chain (TYPE_FIELDS (type), DTYPE_FIELD);
244 gcc_assert (field != NULL_TREE && TREE_TYPE (field) == gfc_array_index_type);
245
246 return fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field),
247 desc, field, NULL_TREE);
248 }
249
250 static tree
251 gfc_conv_descriptor_dimension (tree desc, tree dim)
252 {
253 tree field;
254 tree type;
255 tree tmp;
256
257 type = TREE_TYPE (desc);
258 gcc_assert (GFC_DESCRIPTOR_TYPE_P (type));
259
260 field = gfc_advance_chain (TYPE_FIELDS (type), DIMENSION_FIELD);
261 gcc_assert (field != NULL_TREE
262 && TREE_CODE (TREE_TYPE (field)) == ARRAY_TYPE
263 && TREE_CODE (TREE_TYPE (TREE_TYPE (field))) == RECORD_TYPE);
264
265 tmp = fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field),
266 desc, field, NULL_TREE);
267 tmp = gfc_build_array_ref (tmp, dim, NULL);
268 return tmp;
269 }
270
271
272 tree
273 gfc_conv_descriptor_token (tree desc)
274 {
275 tree type;
276 tree field;
277
278 type = TREE_TYPE (desc);
279 gcc_assert (GFC_DESCRIPTOR_TYPE_P (type));
280 gcc_assert (GFC_TYPE_ARRAY_AKIND (type) == GFC_ARRAY_ALLOCATABLE);
281 gcc_assert (gfc_option.coarray == GFC_FCOARRAY_LIB);
282 field = gfc_advance_chain (TYPE_FIELDS (type), CAF_TOKEN_FIELD);
283 gcc_assert (field != NULL_TREE && TREE_TYPE (field) == prvoid_type_node);
284
285 return fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field),
286 desc, field, NULL_TREE);
287 }
288
289
290 static tree
291 gfc_conv_descriptor_stride (tree desc, tree dim)
292 {
293 tree tmp;
294 tree field;
295
296 tmp = gfc_conv_descriptor_dimension (desc, dim);
297 field = TYPE_FIELDS (TREE_TYPE (tmp));
298 field = gfc_advance_chain (field, STRIDE_SUBFIELD);
299 gcc_assert (field != NULL_TREE && TREE_TYPE (field) == gfc_array_index_type);
300
301 tmp = fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field),
302 tmp, field, NULL_TREE);
303 return tmp;
304 }
305
306 tree
307 gfc_conv_descriptor_stride_get (tree desc, tree dim)
308 {
309 tree type = TREE_TYPE (desc);
310 gcc_assert (GFC_DESCRIPTOR_TYPE_P (type));
311 if (integer_zerop (dim)
312 && (GFC_TYPE_ARRAY_AKIND (type) == GFC_ARRAY_ALLOCATABLE
313 ||GFC_TYPE_ARRAY_AKIND (type) == GFC_ARRAY_ASSUMED_SHAPE_CONT
314 ||GFC_TYPE_ARRAY_AKIND (type) == GFC_ARRAY_POINTER_CONT))
315 return gfc_index_one_node;
316
317 return gfc_conv_descriptor_stride (desc, dim);
318 }
319
320 void
321 gfc_conv_descriptor_stride_set (stmtblock_t *block, tree desc,
322 tree dim, tree value)
323 {
324 tree t = gfc_conv_descriptor_stride (desc, dim);
325 gfc_add_modify (block, t, fold_convert (TREE_TYPE (t), value));
326 }
327
328 static tree
329 gfc_conv_descriptor_lbound (tree desc, tree dim)
330 {
331 tree tmp;
332 tree field;
333
334 tmp = gfc_conv_descriptor_dimension (desc, dim);
335 field = TYPE_FIELDS (TREE_TYPE (tmp));
336 field = gfc_advance_chain (field, LBOUND_SUBFIELD);
337 gcc_assert (field != NULL_TREE && TREE_TYPE (field) == gfc_array_index_type);
338
339 tmp = fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field),
340 tmp, field, NULL_TREE);
341 return tmp;
342 }
343
344 tree
345 gfc_conv_descriptor_lbound_get (tree desc, tree dim)
346 {
347 return gfc_conv_descriptor_lbound (desc, dim);
348 }
349
350 void
351 gfc_conv_descriptor_lbound_set (stmtblock_t *block, tree desc,
352 tree dim, tree value)
353 {
354 tree t = gfc_conv_descriptor_lbound (desc, dim);
355 gfc_add_modify (block, t, fold_convert (TREE_TYPE (t), value));
356 }
357
358 static tree
359 gfc_conv_descriptor_ubound (tree desc, tree dim)
360 {
361 tree tmp;
362 tree field;
363
364 tmp = gfc_conv_descriptor_dimension (desc, dim);
365 field = TYPE_FIELDS (TREE_TYPE (tmp));
366 field = gfc_advance_chain (field, UBOUND_SUBFIELD);
367 gcc_assert (field != NULL_TREE && TREE_TYPE (field) == gfc_array_index_type);
368
369 tmp = fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field),
370 tmp, field, NULL_TREE);
371 return tmp;
372 }
373
374 tree
375 gfc_conv_descriptor_ubound_get (tree desc, tree dim)
376 {
377 return gfc_conv_descriptor_ubound (desc, dim);
378 }
379
380 void
381 gfc_conv_descriptor_ubound_set (stmtblock_t *block, tree desc,
382 tree dim, tree value)
383 {
384 tree t = gfc_conv_descriptor_ubound (desc, dim);
385 gfc_add_modify (block, t, fold_convert (TREE_TYPE (t), value));
386 }
387
388 /* Build a null array descriptor constructor. */
389
390 tree
391 gfc_build_null_descriptor (tree type)
392 {
393 tree field;
394 tree tmp;
395
396 gcc_assert (GFC_DESCRIPTOR_TYPE_P (type));
397 gcc_assert (DATA_FIELD == 0);
398 field = TYPE_FIELDS (type);
399
400 /* Set a NULL data pointer. */
401 tmp = build_constructor_single (type, field, null_pointer_node);
402 TREE_CONSTANT (tmp) = 1;
403 /* All other fields are ignored. */
404
405 return tmp;
406 }
407
408
409 /* Modify a descriptor such that the lbound of a given dimension is the value
410 specified. This also updates ubound and offset accordingly. */
411
412 void
413 gfc_conv_shift_descriptor_lbound (stmtblock_t* block, tree desc,
414 int dim, tree new_lbound)
415 {
416 tree offs, ubound, lbound, stride;
417 tree diff, offs_diff;
418
419 new_lbound = fold_convert (gfc_array_index_type, new_lbound);
420
421 offs = gfc_conv_descriptor_offset_get (desc);
422 lbound = gfc_conv_descriptor_lbound_get (desc, gfc_rank_cst[dim]);
423 ubound = gfc_conv_descriptor_ubound_get (desc, gfc_rank_cst[dim]);
424 stride = gfc_conv_descriptor_stride_get (desc, gfc_rank_cst[dim]);
425
426 /* Get difference (new - old) by which to shift stuff. */
427 diff = fold_build2_loc (input_location, MINUS_EXPR, gfc_array_index_type,
428 new_lbound, lbound);
429
430 /* Shift ubound and offset accordingly. This has to be done before
431 updating the lbound, as they depend on the lbound expression! */
432 ubound = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
433 ubound, diff);
434 gfc_conv_descriptor_ubound_set (block, desc, gfc_rank_cst[dim], ubound);
435 offs_diff = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
436 diff, stride);
437 offs = fold_build2_loc (input_location, MINUS_EXPR, gfc_array_index_type,
438 offs, offs_diff);
439 gfc_conv_descriptor_offset_set (block, desc, offs);
440
441 /* Finally set lbound to value we want. */
442 gfc_conv_descriptor_lbound_set (block, desc, gfc_rank_cst[dim], new_lbound);
443 }
444
445
446 /* Cleanup those #defines. */
447
448 #undef DATA_FIELD
449 #undef OFFSET_FIELD
450 #undef DTYPE_FIELD
451 #undef DIMENSION_FIELD
452 #undef CAF_TOKEN_FIELD
453 #undef STRIDE_SUBFIELD
454 #undef LBOUND_SUBFIELD
455 #undef UBOUND_SUBFIELD
456
457
458 /* Mark a SS chain as used. Flags specifies in which loops the SS is used.
459 flags & 1 = Main loop body.
460 flags & 2 = temp copy loop. */
461
462 void
463 gfc_mark_ss_chain_used (gfc_ss * ss, unsigned flags)
464 {
465 for (; ss != gfc_ss_terminator; ss = ss->next)
466 ss->info->useflags = flags;
467 }
468
469
470 /* Free a gfc_ss chain. */
471
472 void
473 gfc_free_ss_chain (gfc_ss * ss)
474 {
475 gfc_ss *next;
476
477 while (ss != gfc_ss_terminator)
478 {
479 gcc_assert (ss != NULL);
480 next = ss->next;
481 gfc_free_ss (ss);
482 ss = next;
483 }
484 }
485
486
487 static void
488 free_ss_info (gfc_ss_info *ss_info)
489 {
490 ss_info->refcount--;
491 if (ss_info->refcount > 0)
492 return;
493
494 gcc_assert (ss_info->refcount == 0);
495 free (ss_info);
496 }
497
498
499 /* Free a SS. */
500
501 void
502 gfc_free_ss (gfc_ss * ss)
503 {
504 gfc_ss_info *ss_info;
505 int n;
506
507 ss_info = ss->info;
508
509 switch (ss_info->type)
510 {
511 case GFC_SS_SECTION:
512 for (n = 0; n < ss->dimen; n++)
513 {
514 if (ss_info->data.array.subscript[ss->dim[n]])
515 gfc_free_ss_chain (ss_info->data.array.subscript[ss->dim[n]]);
516 }
517 break;
518
519 default:
520 break;
521 }
522
523 free_ss_info (ss_info);
524 free (ss);
525 }
526
527
528 /* Creates and initializes an array type gfc_ss struct. */
529
530 gfc_ss *
531 gfc_get_array_ss (gfc_ss *next, gfc_expr *expr, int dimen, gfc_ss_type type)
532 {
533 gfc_ss *ss;
534 gfc_ss_info *ss_info;
535 int i;
536
537 ss_info = gfc_get_ss_info ();
538 ss_info->refcount++;
539 ss_info->type = type;
540 ss_info->expr = expr;
541
542 ss = gfc_get_ss ();
543 ss->info = ss_info;
544 ss->next = next;
545 ss->dimen = dimen;
546 for (i = 0; i < ss->dimen; i++)
547 ss->dim[i] = i;
548
549 return ss;
550 }
551
552
553 /* Creates and initializes a temporary type gfc_ss struct. */
554
555 gfc_ss *
556 gfc_get_temp_ss (tree type, tree string_length, int dimen)
557 {
558 gfc_ss *ss;
559 gfc_ss_info *ss_info;
560 int i;
561
562 ss_info = gfc_get_ss_info ();
563 ss_info->refcount++;
564 ss_info->type = GFC_SS_TEMP;
565 ss_info->string_length = string_length;
566 ss_info->data.temp.type = type;
567
568 ss = gfc_get_ss ();
569 ss->info = ss_info;
570 ss->next = gfc_ss_terminator;
571 ss->dimen = dimen;
572 for (i = 0; i < ss->dimen; i++)
573 ss->dim[i] = i;
574
575 return ss;
576 }
577
578
579 /* Creates and initializes a scalar type gfc_ss struct. */
580
581 gfc_ss *
582 gfc_get_scalar_ss (gfc_ss *next, gfc_expr *expr)
583 {
584 gfc_ss *ss;
585 gfc_ss_info *ss_info;
586
587 ss_info = gfc_get_ss_info ();
588 ss_info->refcount++;
589 ss_info->type = GFC_SS_SCALAR;
590 ss_info->expr = expr;
591
592 ss = gfc_get_ss ();
593 ss->info = ss_info;
594 ss->next = next;
595
596 return ss;
597 }
598
599
600 /* Free all the SS associated with a loop. */
601
602 void
603 gfc_cleanup_loop (gfc_loopinfo * loop)
604 {
605 gfc_loopinfo *loop_next, **ploop;
606 gfc_ss *ss;
607 gfc_ss *next;
608
609 ss = loop->ss;
610 while (ss != gfc_ss_terminator)
611 {
612 gcc_assert (ss != NULL);
613 next = ss->loop_chain;
614 gfc_free_ss (ss);
615 ss = next;
616 }
617
618 /* Remove reference to self in the parent loop. */
619 if (loop->parent)
620 for (ploop = &loop->parent->nested; *ploop; ploop = &(*ploop)->next)
621 if (*ploop == loop)
622 {
623 *ploop = loop->next;
624 break;
625 }
626
627 /* Free non-freed nested loops. */
628 for (loop = loop->nested; loop; loop = loop_next)
629 {
630 loop_next = loop->next;
631 gfc_cleanup_loop (loop);
632 free (loop);
633 }
634 }
635
636
637 static void
638 set_ss_loop (gfc_ss *ss, gfc_loopinfo *loop)
639 {
640 int n;
641
642 for (; ss != gfc_ss_terminator; ss = ss->next)
643 {
644 ss->loop = loop;
645
646 if (ss->info->type == GFC_SS_SCALAR
647 || ss->info->type == GFC_SS_REFERENCE
648 || ss->info->type == GFC_SS_TEMP)
649 continue;
650
651 for (n = 0; n < GFC_MAX_DIMENSIONS; n++)
652 if (ss->info->data.array.subscript[n] != NULL)
653 set_ss_loop (ss->info->data.array.subscript[n], loop);
654 }
655 }
656
657
658 /* Associate a SS chain with a loop. */
659
660 void
661 gfc_add_ss_to_loop (gfc_loopinfo * loop, gfc_ss * head)
662 {
663 gfc_ss *ss;
664 gfc_loopinfo *nested_loop;
665
666 if (head == gfc_ss_terminator)
667 return;
668
669 set_ss_loop (head, loop);
670
671 ss = head;
672 for (; ss && ss != gfc_ss_terminator; ss = ss->next)
673 {
674 if (ss->nested_ss)
675 {
676 nested_loop = ss->nested_ss->loop;
677
678 /* More than one ss can belong to the same loop. Hence, we add the
679 loop to the chain only if it is different from the previously
680 added one, to avoid duplicate nested loops. */
681 if (nested_loop != loop->nested)
682 {
683 gcc_assert (nested_loop->parent == NULL);
684 nested_loop->parent = loop;
685
686 gcc_assert (nested_loop->next == NULL);
687 nested_loop->next = loop->nested;
688 loop->nested = nested_loop;
689 }
690 else
691 gcc_assert (nested_loop->parent == loop);
692 }
693
694 if (ss->next == gfc_ss_terminator)
695 ss->loop_chain = loop->ss;
696 else
697 ss->loop_chain = ss->next;
698 }
699 gcc_assert (ss == gfc_ss_terminator);
700 loop->ss = head;
701 }
702
703
704 /* Generate an initializer for a static pointer or allocatable array. */
705
706 void
707 gfc_trans_static_array_pointer (gfc_symbol * sym)
708 {
709 tree type;
710
711 gcc_assert (TREE_STATIC (sym->backend_decl));
712 /* Just zero the data member. */
713 type = TREE_TYPE (sym->backend_decl);
714 DECL_INITIAL (sym->backend_decl) = gfc_build_null_descriptor (type);
715 }
716
717
718 /* If the bounds of SE's loop have not yet been set, see if they can be
719 determined from array spec AS, which is the array spec of a called
720 function. MAPPING maps the callee's dummy arguments to the values
721 that the caller is passing. Add any initialization and finalization
722 code to SE. */
723
724 void
725 gfc_set_loop_bounds_from_array_spec (gfc_interface_mapping * mapping,
726 gfc_se * se, gfc_array_spec * as)
727 {
728 int n, dim, total_dim;
729 gfc_se tmpse;
730 gfc_ss *ss;
731 tree lower;
732 tree upper;
733 tree tmp;
734
735 total_dim = 0;
736
737 if (!as || as->type != AS_EXPLICIT)
738 return;
739
740 for (ss = se->ss; ss; ss = ss->parent)
741 {
742 total_dim += ss->loop->dimen;
743 for (n = 0; n < ss->loop->dimen; n++)
744 {
745 /* The bound is known, nothing to do. */
746 if (ss->loop->to[n] != NULL_TREE)
747 continue;
748
749 dim = ss->dim[n];
750 gcc_assert (dim < as->rank);
751 gcc_assert (ss->loop->dimen <= as->rank);
752
753 /* Evaluate the lower bound. */
754 gfc_init_se (&tmpse, NULL);
755 gfc_apply_interface_mapping (mapping, &tmpse, as->lower[dim]);
756 gfc_add_block_to_block (&se->pre, &tmpse.pre);
757 gfc_add_block_to_block (&se->post, &tmpse.post);
758 lower = fold_convert (gfc_array_index_type, tmpse.expr);
759
760 /* ...and the upper bound. */
761 gfc_init_se (&tmpse, NULL);
762 gfc_apply_interface_mapping (mapping, &tmpse, as->upper[dim]);
763 gfc_add_block_to_block (&se->pre, &tmpse.pre);
764 gfc_add_block_to_block (&se->post, &tmpse.post);
765 upper = fold_convert (gfc_array_index_type, tmpse.expr);
766
767 /* Set the upper bound of the loop to UPPER - LOWER. */
768 tmp = fold_build2_loc (input_location, MINUS_EXPR,
769 gfc_array_index_type, upper, lower);
770 tmp = gfc_evaluate_now (tmp, &se->pre);
771 ss->loop->to[n] = tmp;
772 }
773 }
774
775 gcc_assert (total_dim == as->rank);
776 }
777
778
779 /* Generate code to allocate an array temporary, or create a variable to
780 hold the data. If size is NULL, zero the descriptor so that the
781 callee will allocate the array. If DEALLOC is true, also generate code to
782 free the array afterwards.
783
784 If INITIAL is not NULL, it is packed using internal_pack and the result used
785 as data instead of allocating a fresh, unitialized area of memory.
786
787 Initialization code is added to PRE and finalization code to POST.
788 DYNAMIC is true if the caller may want to extend the array later
789 using realloc. This prevents us from putting the array on the stack. */
790
791 static void
792 gfc_trans_allocate_array_storage (stmtblock_t * pre, stmtblock_t * post,
793 gfc_array_info * info, tree size, tree nelem,
794 tree initial, bool dynamic, bool dealloc)
795 {
796 tree tmp;
797 tree desc;
798 bool onstack;
799
800 desc = info->descriptor;
801 info->offset = gfc_index_zero_node;
802 if (size == NULL_TREE || integer_zerop (size))
803 {
804 /* A callee allocated array. */
805 gfc_conv_descriptor_data_set (pre, desc, null_pointer_node);
806 onstack = FALSE;
807 }
808 else
809 {
810 /* Allocate the temporary. */
811 onstack = !dynamic && initial == NULL_TREE
812 && (gfc_option.flag_stack_arrays
813 || gfc_can_put_var_on_stack (size));
814
815 if (onstack)
816 {
817 /* Make a temporary variable to hold the data. */
818 tmp = fold_build2_loc (input_location, MINUS_EXPR, TREE_TYPE (nelem),
819 nelem, gfc_index_one_node);
820 tmp = gfc_evaluate_now (tmp, pre);
821 tmp = build_range_type (gfc_array_index_type, gfc_index_zero_node,
822 tmp);
823 tmp = build_array_type (gfc_get_element_type (TREE_TYPE (desc)),
824 tmp);
825 tmp = gfc_create_var (tmp, "A");
826 /* If we're here only because of -fstack-arrays we have to
827 emit a DECL_EXPR to make the gimplifier emit alloca calls. */
828 if (!gfc_can_put_var_on_stack (size))
829 gfc_add_expr_to_block (pre,
830 fold_build1_loc (input_location,
831 DECL_EXPR, TREE_TYPE (tmp),
832 tmp));
833 tmp = gfc_build_addr_expr (NULL_TREE, tmp);
834 gfc_conv_descriptor_data_set (pre, desc, tmp);
835 }
836 else
837 {
838 /* Allocate memory to hold the data or call internal_pack. */
839 if (initial == NULL_TREE)
840 {
841 tmp = gfc_call_malloc (pre, NULL, size);
842 tmp = gfc_evaluate_now (tmp, pre);
843 }
844 else
845 {
846 tree packed;
847 tree source_data;
848 tree was_packed;
849 stmtblock_t do_copying;
850
851 tmp = TREE_TYPE (initial); /* Pointer to descriptor. */
852 gcc_assert (TREE_CODE (tmp) == POINTER_TYPE);
853 tmp = TREE_TYPE (tmp); /* The descriptor itself. */
854 tmp = gfc_get_element_type (tmp);
855 gcc_assert (tmp == gfc_get_element_type (TREE_TYPE (desc)));
856 packed = gfc_create_var (build_pointer_type (tmp), "data");
857
858 tmp = build_call_expr_loc (input_location,
859 gfor_fndecl_in_pack, 1, initial);
860 tmp = fold_convert (TREE_TYPE (packed), tmp);
861 gfc_add_modify (pre, packed, tmp);
862
863 tmp = build_fold_indirect_ref_loc (input_location,
864 initial);
865 source_data = gfc_conv_descriptor_data_get (tmp);
866
867 /* internal_pack may return source->data without any allocation
868 or copying if it is already packed. If that's the case, we
869 need to allocate and copy manually. */
870
871 gfc_start_block (&do_copying);
872 tmp = gfc_call_malloc (&do_copying, NULL, size);
873 tmp = fold_convert (TREE_TYPE (packed), tmp);
874 gfc_add_modify (&do_copying, packed, tmp);
875 tmp = gfc_build_memcpy_call (packed, source_data, size);
876 gfc_add_expr_to_block (&do_copying, tmp);
877
878 was_packed = fold_build2_loc (input_location, EQ_EXPR,
879 boolean_type_node, packed,
880 source_data);
881 tmp = gfc_finish_block (&do_copying);
882 tmp = build3_v (COND_EXPR, was_packed, tmp,
883 build_empty_stmt (input_location));
884 gfc_add_expr_to_block (pre, tmp);
885
886 tmp = fold_convert (pvoid_type_node, packed);
887 }
888
889 gfc_conv_descriptor_data_set (pre, desc, tmp);
890 }
891 }
892 info->data = gfc_conv_descriptor_data_get (desc);
893
894 /* The offset is zero because we create temporaries with a zero
895 lower bound. */
896 gfc_conv_descriptor_offset_set (pre, desc, gfc_index_zero_node);
897
898 if (dealloc && !onstack)
899 {
900 /* Free the temporary. */
901 tmp = gfc_conv_descriptor_data_get (desc);
902 tmp = gfc_call_free (fold_convert (pvoid_type_node, tmp));
903 gfc_add_expr_to_block (post, tmp);
904 }
905 }
906
907
908 /* Get the scalarizer array dimension corresponding to actual array dimension
909 given by ARRAY_DIM.
910
911 For example, if SS represents the array ref a(1,:,:,1), it is a
912 bidimensional scalarizer array, and the result would be 0 for ARRAY_DIM=1,
913 and 1 for ARRAY_DIM=2.
914 If SS represents transpose(a(:,1,1,:)), it is again a bidimensional
915 scalarizer array, and the result would be 1 for ARRAY_DIM=0 and 0 for
916 ARRAY_DIM=3.
917 If SS represents sum(a(:,:,:,1), dim=1), it is a 2+1-dimensional scalarizer
918 array. If called on the inner ss, the result would be respectively 0,1,2 for
919 ARRAY_DIM=0,1,2. If called on the outer ss, the result would be 0,1
920 for ARRAY_DIM=1,2. */
921
922 static int
923 get_scalarizer_dim_for_array_dim (gfc_ss *ss, int array_dim)
924 {
925 int array_ref_dim;
926 int n;
927
928 array_ref_dim = 0;
929
930 for (; ss; ss = ss->parent)
931 for (n = 0; n < ss->dimen; n++)
932 if (ss->dim[n] < array_dim)
933 array_ref_dim++;
934
935 return array_ref_dim;
936 }
937
938
939 static gfc_ss *
940 innermost_ss (gfc_ss *ss)
941 {
942 while (ss->nested_ss != NULL)
943 ss = ss->nested_ss;
944
945 return ss;
946 }
947
948
949
950 /* Get the array reference dimension corresponding to the given loop dimension.
951 It is different from the true array dimension given by the dim array in
952 the case of a partial array reference (i.e. a(:,:,1,:) for example)
953 It is different from the loop dimension in the case of a transposed array.
954 */
955
956 static int
957 get_array_ref_dim_for_loop_dim (gfc_ss *ss, int loop_dim)
958 {
959 return get_scalarizer_dim_for_array_dim (innermost_ss (ss),
960 ss->dim[loop_dim]);
961 }
962
963
964 /* Generate code to create and initialize the descriptor for a temporary
965 array. This is used for both temporaries needed by the scalarizer, and
966 functions returning arrays. Adjusts the loop variables to be
967 zero-based, and calculates the loop bounds for callee allocated arrays.
968 Allocate the array unless it's callee allocated (we have a callee
969 allocated array if 'callee_alloc' is true, or if loop->to[n] is
970 NULL_TREE for any n). Also fills in the descriptor, data and offset
971 fields of info if known. Returns the size of the array, or NULL for a
972 callee allocated array.
973
974 'eltype' == NULL signals that the temporary should be a class object.
975 The 'initial' expression is used to obtain the size of the dynamic
976 type; otehrwise the allocation and initialisation proceeds as for any
977 other expression
978
979 PRE, POST, INITIAL, DYNAMIC and DEALLOC are as for
980 gfc_trans_allocate_array_storage. */
981
982 tree
983 gfc_trans_create_temp_array (stmtblock_t * pre, stmtblock_t * post, gfc_ss * ss,
984 tree eltype, tree initial, bool dynamic,
985 bool dealloc, bool callee_alloc, locus * where)
986 {
987 gfc_loopinfo *loop;
988 gfc_ss *s;
989 gfc_array_info *info;
990 tree from[GFC_MAX_DIMENSIONS], to[GFC_MAX_DIMENSIONS];
991 tree type;
992 tree desc;
993 tree tmp;
994 tree size;
995 tree nelem;
996 tree cond;
997 tree or_expr;
998 tree class_expr = NULL_TREE;
999 int n, dim, tmp_dim;
1000 int total_dim = 0;
1001
1002 /* This signals a class array for which we need the size of the
1003 dynamic type. Generate an eltype and then the class expression. */
1004 if (eltype == NULL_TREE && initial)
1005 {
1006 if (POINTER_TYPE_P (TREE_TYPE (initial)))
1007 class_expr = build_fold_indirect_ref_loc (input_location, initial);
1008 eltype = TREE_TYPE (class_expr);
1009 eltype = gfc_get_element_type (eltype);
1010 /* Obtain the structure (class) expression. */
1011 class_expr = TREE_OPERAND (class_expr, 0);
1012 gcc_assert (class_expr);
1013 }
1014
1015 memset (from, 0, sizeof (from));
1016 memset (to, 0, sizeof (to));
1017
1018 info = &ss->info->data.array;
1019
1020 gcc_assert (ss->dimen > 0);
1021 gcc_assert (ss->loop->dimen == ss->dimen);
1022
1023 if (gfc_option.warn_array_temp && where)
1024 gfc_warning ("Creating array temporary at %L", where);
1025
1026 /* Set the lower bound to zero. */
1027 for (s = ss; s; s = s->parent)
1028 {
1029 loop = s->loop;
1030
1031 total_dim += loop->dimen;
1032 for (n = 0; n < loop->dimen; n++)
1033 {
1034 dim = s->dim[n];
1035
1036 /* Callee allocated arrays may not have a known bound yet. */
1037 if (loop->to[n])
1038 loop->to[n] = gfc_evaluate_now (
1039 fold_build2_loc (input_location, MINUS_EXPR,
1040 gfc_array_index_type,
1041 loop->to[n], loop->from[n]),
1042 pre);
1043 loop->from[n] = gfc_index_zero_node;
1044
1045 /* We have just changed the loop bounds, we must clear the
1046 corresponding specloop, so that delta calculation is not skipped
1047 later in gfc_set_delta. */
1048 loop->specloop[n] = NULL;
1049
1050 /* We are constructing the temporary's descriptor based on the loop
1051 dimensions. As the dimensions may be accessed in arbitrary order
1052 (think of transpose) the size taken from the n'th loop may not map
1053 to the n'th dimension of the array. We need to reconstruct loop
1054 infos in the right order before using it to set the descriptor
1055 bounds. */
1056 tmp_dim = get_scalarizer_dim_for_array_dim (ss, dim);
1057 from[tmp_dim] = loop->from[n];
1058 to[tmp_dim] = loop->to[n];
1059
1060 info->delta[dim] = gfc_index_zero_node;
1061 info->start[dim] = gfc_index_zero_node;
1062 info->end[dim] = gfc_index_zero_node;
1063 info->stride[dim] = gfc_index_one_node;
1064 }
1065 }
1066
1067 /* Initialize the descriptor. */
1068 type =
1069 gfc_get_array_type_bounds (eltype, total_dim, 0, from, to, 1,
1070 GFC_ARRAY_UNKNOWN, true);
1071 desc = gfc_create_var (type, "atmp");
1072 GFC_DECL_PACKED_ARRAY (desc) = 1;
1073
1074 info->descriptor = desc;
1075 size = gfc_index_one_node;
1076
1077 /* Fill in the array dtype. */
1078 tmp = gfc_conv_descriptor_dtype (desc);
1079 gfc_add_modify (pre, tmp, gfc_get_dtype (TREE_TYPE (desc)));
1080
1081 /*
1082 Fill in the bounds and stride. This is a packed array, so:
1083
1084 size = 1;
1085 for (n = 0; n < rank; n++)
1086 {
1087 stride[n] = size
1088 delta = ubound[n] + 1 - lbound[n];
1089 size = size * delta;
1090 }
1091 size = size * sizeof(element);
1092 */
1093
1094 or_expr = NULL_TREE;
1095
1096 /* If there is at least one null loop->to[n], it is a callee allocated
1097 array. */
1098 for (n = 0; n < total_dim; n++)
1099 if (to[n] == NULL_TREE)
1100 {
1101 size = NULL_TREE;
1102 break;
1103 }
1104
1105 if (size == NULL_TREE)
1106 for (s = ss; s; s = s->parent)
1107 for (n = 0; n < s->loop->dimen; n++)
1108 {
1109 dim = get_scalarizer_dim_for_array_dim (ss, s->dim[n]);
1110
1111 /* For a callee allocated array express the loop bounds in terms
1112 of the descriptor fields. */
1113 tmp = fold_build2_loc (input_location,
1114 MINUS_EXPR, gfc_array_index_type,
1115 gfc_conv_descriptor_ubound_get (desc, gfc_rank_cst[dim]),
1116 gfc_conv_descriptor_lbound_get (desc, gfc_rank_cst[dim]));
1117 s->loop->to[n] = tmp;
1118 }
1119 else
1120 {
1121 for (n = 0; n < total_dim; n++)
1122 {
1123 /* Store the stride and bound components in the descriptor. */
1124 gfc_conv_descriptor_stride_set (pre, desc, gfc_rank_cst[n], size);
1125
1126 gfc_conv_descriptor_lbound_set (pre, desc, gfc_rank_cst[n],
1127 gfc_index_zero_node);
1128
1129 gfc_conv_descriptor_ubound_set (pre, desc, gfc_rank_cst[n], to[n]);
1130
1131 tmp = fold_build2_loc (input_location, PLUS_EXPR,
1132 gfc_array_index_type,
1133 to[n], gfc_index_one_node);
1134
1135 /* Check whether the size for this dimension is negative. */
1136 cond = fold_build2_loc (input_location, LE_EXPR, boolean_type_node,
1137 tmp, gfc_index_zero_node);
1138 cond = gfc_evaluate_now (cond, pre);
1139
1140 if (n == 0)
1141 or_expr = cond;
1142 else
1143 or_expr = fold_build2_loc (input_location, TRUTH_OR_EXPR,
1144 boolean_type_node, or_expr, cond);
1145
1146 size = fold_build2_loc (input_location, MULT_EXPR,
1147 gfc_array_index_type, size, tmp);
1148 size = gfc_evaluate_now (size, pre);
1149 }
1150 }
1151
1152 /* Get the size of the array. */
1153 if (size && !callee_alloc)
1154 {
1155 tree elemsize;
1156 /* If or_expr is true, then the extent in at least one
1157 dimension is zero and the size is set to zero. */
1158 size = fold_build3_loc (input_location, COND_EXPR, gfc_array_index_type,
1159 or_expr, gfc_index_zero_node, size);
1160
1161 nelem = size;
1162 if (class_expr == NULL_TREE)
1163 elemsize = fold_convert (gfc_array_index_type,
1164 TYPE_SIZE_UNIT (gfc_get_element_type (type)));
1165 else
1166 elemsize = gfc_vtable_size_get (class_expr);
1167
1168 size = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
1169 size, elemsize);
1170 }
1171 else
1172 {
1173 nelem = size;
1174 size = NULL_TREE;
1175 }
1176
1177 gfc_trans_allocate_array_storage (pre, post, info, size, nelem, initial,
1178 dynamic, dealloc);
1179
1180 while (ss->parent)
1181 ss = ss->parent;
1182
1183 if (ss->dimen > ss->loop->temp_dim)
1184 ss->loop->temp_dim = ss->dimen;
1185
1186 return size;
1187 }
1188
1189
1190 /* Return the number of iterations in a loop that starts at START,
1191 ends at END, and has step STEP. */
1192
1193 static tree
1194 gfc_get_iteration_count (tree start, tree end, tree step)
1195 {
1196 tree tmp;
1197 tree type;
1198
1199 type = TREE_TYPE (step);
1200 tmp = fold_build2_loc (input_location, MINUS_EXPR, type, end, start);
1201 tmp = fold_build2_loc (input_location, FLOOR_DIV_EXPR, type, tmp, step);
1202 tmp = fold_build2_loc (input_location, PLUS_EXPR, type, tmp,
1203 build_int_cst (type, 1));
1204 tmp = fold_build2_loc (input_location, MAX_EXPR, type, tmp,
1205 build_int_cst (type, 0));
1206 return fold_convert (gfc_array_index_type, tmp);
1207 }
1208
1209
1210 /* Extend the data in array DESC by EXTRA elements. */
1211
1212 static void
1213 gfc_grow_array (stmtblock_t * pblock, tree desc, tree extra)
1214 {
1215 tree arg0, arg1;
1216 tree tmp;
1217 tree size;
1218 tree ubound;
1219
1220 if (integer_zerop (extra))
1221 return;
1222
1223 ubound = gfc_conv_descriptor_ubound_get (desc, gfc_rank_cst[0]);
1224
1225 /* Add EXTRA to the upper bound. */
1226 tmp = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
1227 ubound, extra);
1228 gfc_conv_descriptor_ubound_set (pblock, desc, gfc_rank_cst[0], tmp);
1229
1230 /* Get the value of the current data pointer. */
1231 arg0 = gfc_conv_descriptor_data_get (desc);
1232
1233 /* Calculate the new array size. */
1234 size = TYPE_SIZE_UNIT (gfc_get_element_type (TREE_TYPE (desc)));
1235 tmp = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
1236 ubound, gfc_index_one_node);
1237 arg1 = fold_build2_loc (input_location, MULT_EXPR, size_type_node,
1238 fold_convert (size_type_node, tmp),
1239 fold_convert (size_type_node, size));
1240
1241 /* Call the realloc() function. */
1242 tmp = gfc_call_realloc (pblock, arg0, arg1);
1243 gfc_conv_descriptor_data_set (pblock, desc, tmp);
1244 }
1245
1246
1247 /* Return true if the bounds of iterator I can only be determined
1248 at run time. */
1249
1250 static inline bool
1251 gfc_iterator_has_dynamic_bounds (gfc_iterator * i)
1252 {
1253 return (i->start->expr_type != EXPR_CONSTANT
1254 || i->end->expr_type != EXPR_CONSTANT
1255 || i->step->expr_type != EXPR_CONSTANT);
1256 }
1257
1258
1259 /* Split the size of constructor element EXPR into the sum of two terms,
1260 one of which can be determined at compile time and one of which must
1261 be calculated at run time. Set *SIZE to the former and return true
1262 if the latter might be nonzero. */
1263
1264 static bool
1265 gfc_get_array_constructor_element_size (mpz_t * size, gfc_expr * expr)
1266 {
1267 if (expr->expr_type == EXPR_ARRAY)
1268 return gfc_get_array_constructor_size (size, expr->value.constructor);
1269 else if (expr->rank > 0)
1270 {
1271 /* Calculate everything at run time. */
1272 mpz_set_ui (*size, 0);
1273 return true;
1274 }
1275 else
1276 {
1277 /* A single element. */
1278 mpz_set_ui (*size, 1);
1279 return false;
1280 }
1281 }
1282
1283
1284 /* Like gfc_get_array_constructor_element_size, but applied to the whole
1285 of array constructor C. */
1286
1287 static bool
1288 gfc_get_array_constructor_size (mpz_t * size, gfc_constructor_base base)
1289 {
1290 gfc_constructor *c;
1291 gfc_iterator *i;
1292 mpz_t val;
1293 mpz_t len;
1294 bool dynamic;
1295
1296 mpz_set_ui (*size, 0);
1297 mpz_init (len);
1298 mpz_init (val);
1299
1300 dynamic = false;
1301 for (c = gfc_constructor_first (base); c; c = gfc_constructor_next (c))
1302 {
1303 i = c->iterator;
1304 if (i && gfc_iterator_has_dynamic_bounds (i))
1305 dynamic = true;
1306 else
1307 {
1308 dynamic |= gfc_get_array_constructor_element_size (&len, c->expr);
1309 if (i)
1310 {
1311 /* Multiply the static part of the element size by the
1312 number of iterations. */
1313 mpz_sub (val, i->end->value.integer, i->start->value.integer);
1314 mpz_fdiv_q (val, val, i->step->value.integer);
1315 mpz_add_ui (val, val, 1);
1316 if (mpz_sgn (val) > 0)
1317 mpz_mul (len, len, val);
1318 else
1319 mpz_set_ui (len, 0);
1320 }
1321 mpz_add (*size, *size, len);
1322 }
1323 }
1324 mpz_clear (len);
1325 mpz_clear (val);
1326 return dynamic;
1327 }
1328
1329
1330 /* Make sure offset is a variable. */
1331
1332 static void
1333 gfc_put_offset_into_var (stmtblock_t * pblock, tree * poffset,
1334 tree * offsetvar)
1335 {
1336 /* We should have already created the offset variable. We cannot
1337 create it here because we may be in an inner scope. */
1338 gcc_assert (*offsetvar != NULL_TREE);
1339 gfc_add_modify (pblock, *offsetvar, *poffset);
1340 *poffset = *offsetvar;
1341 TREE_USED (*offsetvar) = 1;
1342 }
1343
1344
1345 /* Variables needed for bounds-checking. */
1346 static bool first_len;
1347 static tree first_len_val;
1348 static bool typespec_chararray_ctor;
1349
1350 static void
1351 gfc_trans_array_ctor_element (stmtblock_t * pblock, tree desc,
1352 tree offset, gfc_se * se, gfc_expr * expr)
1353 {
1354 tree tmp;
1355
1356 gfc_conv_expr (se, expr);
1357
1358 /* Store the value. */
1359 tmp = build_fold_indirect_ref_loc (input_location,
1360 gfc_conv_descriptor_data_get (desc));
1361 tmp = gfc_build_array_ref (tmp, offset, NULL);
1362
1363 if (expr->ts.type == BT_CHARACTER)
1364 {
1365 int i = gfc_validate_kind (BT_CHARACTER, expr->ts.kind, false);
1366 tree esize;
1367
1368 esize = size_in_bytes (gfc_get_element_type (TREE_TYPE (desc)));
1369 esize = fold_convert (gfc_charlen_type_node, esize);
1370 esize = fold_build2_loc (input_location, TRUNC_DIV_EXPR,
1371 gfc_charlen_type_node, esize,
1372 build_int_cst (gfc_charlen_type_node,
1373 gfc_character_kinds[i].bit_size / 8));
1374
1375 gfc_conv_string_parameter (se);
1376 if (POINTER_TYPE_P (TREE_TYPE (tmp)))
1377 {
1378 /* The temporary is an array of pointers. */
1379 se->expr = fold_convert (TREE_TYPE (tmp), se->expr);
1380 gfc_add_modify (&se->pre, tmp, se->expr);
1381 }
1382 else
1383 {
1384 /* The temporary is an array of string values. */
1385 tmp = gfc_build_addr_expr (gfc_get_pchar_type (expr->ts.kind), tmp);
1386 /* We know the temporary and the value will be the same length,
1387 so can use memcpy. */
1388 gfc_trans_string_copy (&se->pre, esize, tmp, expr->ts.kind,
1389 se->string_length, se->expr, expr->ts.kind);
1390 }
1391 if ((gfc_option.rtcheck & GFC_RTCHECK_BOUNDS) && !typespec_chararray_ctor)
1392 {
1393 if (first_len)
1394 {
1395 gfc_add_modify (&se->pre, first_len_val,
1396 se->string_length);
1397 first_len = false;
1398 }
1399 else
1400 {
1401 /* Verify that all constructor elements are of the same
1402 length. */
1403 tree cond = fold_build2_loc (input_location, NE_EXPR,
1404 boolean_type_node, first_len_val,
1405 se->string_length);
1406 gfc_trans_runtime_check
1407 (true, false, cond, &se->pre, &expr->where,
1408 "Different CHARACTER lengths (%ld/%ld) in array constructor",
1409 fold_convert (long_integer_type_node, first_len_val),
1410 fold_convert (long_integer_type_node, se->string_length));
1411 }
1412 }
1413 }
1414 else
1415 {
1416 /* TODO: Should the frontend already have done this conversion? */
1417 se->expr = fold_convert (TREE_TYPE (tmp), se->expr);
1418 gfc_add_modify (&se->pre, tmp, se->expr);
1419 }
1420
1421 gfc_add_block_to_block (pblock, &se->pre);
1422 gfc_add_block_to_block (pblock, &se->post);
1423 }
1424
1425
1426 /* Add the contents of an array to the constructor. DYNAMIC is as for
1427 gfc_trans_array_constructor_value. */
1428
1429 static void
1430 gfc_trans_array_constructor_subarray (stmtblock_t * pblock,
1431 tree type ATTRIBUTE_UNUSED,
1432 tree desc, gfc_expr * expr,
1433 tree * poffset, tree * offsetvar,
1434 bool dynamic)
1435 {
1436 gfc_se se;
1437 gfc_ss *ss;
1438 gfc_loopinfo loop;
1439 stmtblock_t body;
1440 tree tmp;
1441 tree size;
1442 int n;
1443
1444 /* We need this to be a variable so we can increment it. */
1445 gfc_put_offset_into_var (pblock, poffset, offsetvar);
1446
1447 gfc_init_se (&se, NULL);
1448
1449 /* Walk the array expression. */
1450 ss = gfc_walk_expr (expr);
1451 gcc_assert (ss != gfc_ss_terminator);
1452
1453 /* Initialize the scalarizer. */
1454 gfc_init_loopinfo (&loop);
1455 gfc_add_ss_to_loop (&loop, ss);
1456
1457 /* Initialize the loop. */
1458 gfc_conv_ss_startstride (&loop);
1459 gfc_conv_loop_setup (&loop, &expr->where);
1460
1461 /* Make sure the constructed array has room for the new data. */
1462 if (dynamic)
1463 {
1464 /* Set SIZE to the total number of elements in the subarray. */
1465 size = gfc_index_one_node;
1466 for (n = 0; n < loop.dimen; n++)
1467 {
1468 tmp = gfc_get_iteration_count (loop.from[n], loop.to[n],
1469 gfc_index_one_node);
1470 size = fold_build2_loc (input_location, MULT_EXPR,
1471 gfc_array_index_type, size, tmp);
1472 }
1473
1474 /* Grow the constructed array by SIZE elements. */
1475 gfc_grow_array (&loop.pre, desc, size);
1476 }
1477
1478 /* Make the loop body. */
1479 gfc_mark_ss_chain_used (ss, 1);
1480 gfc_start_scalarized_body (&loop, &body);
1481 gfc_copy_loopinfo_to_se (&se, &loop);
1482 se.ss = ss;
1483
1484 gfc_trans_array_ctor_element (&body, desc, *poffset, &se, expr);
1485 gcc_assert (se.ss == gfc_ss_terminator);
1486
1487 /* Increment the offset. */
1488 tmp = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
1489 *poffset, gfc_index_one_node);
1490 gfc_add_modify (&body, *poffset, tmp);
1491
1492 /* Finish the loop. */
1493 gfc_trans_scalarizing_loops (&loop, &body);
1494 gfc_add_block_to_block (&loop.pre, &loop.post);
1495 tmp = gfc_finish_block (&loop.pre);
1496 gfc_add_expr_to_block (pblock, tmp);
1497
1498 gfc_cleanup_loop (&loop);
1499 }
1500
1501
1502 /* Assign the values to the elements of an array constructor. DYNAMIC
1503 is true if descriptor DESC only contains enough data for the static
1504 size calculated by gfc_get_array_constructor_size. When true, memory
1505 for the dynamic parts must be allocated using realloc. */
1506
1507 static void
1508 gfc_trans_array_constructor_value (stmtblock_t * pblock, tree type,
1509 tree desc, gfc_constructor_base base,
1510 tree * poffset, tree * offsetvar,
1511 bool dynamic)
1512 {
1513 tree tmp;
1514 stmtblock_t body;
1515 gfc_se se;
1516 mpz_t size;
1517 gfc_constructor *c;
1518
1519 tree shadow_loopvar = NULL_TREE;
1520 gfc_saved_var saved_loopvar;
1521
1522 mpz_init (size);
1523 for (c = gfc_constructor_first (base); c; c = gfc_constructor_next (c))
1524 {
1525 /* If this is an iterator or an array, the offset must be a variable. */
1526 if ((c->iterator || c->expr->rank > 0) && INTEGER_CST_P (*poffset))
1527 gfc_put_offset_into_var (pblock, poffset, offsetvar);
1528
1529 /* Shadowing the iterator avoids changing its value and saves us from
1530 keeping track of it. Further, it makes sure that there's always a
1531 backend-decl for the symbol, even if there wasn't one before,
1532 e.g. in the case of an iterator that appears in a specification
1533 expression in an interface mapping. */
1534 if (c->iterator)
1535 {
1536 gfc_symbol *sym = c->iterator->var->symtree->n.sym;
1537 tree type = gfc_typenode_for_spec (&sym->ts);
1538
1539 shadow_loopvar = gfc_create_var (type, "shadow_loopvar");
1540 gfc_shadow_sym (sym, shadow_loopvar, &saved_loopvar);
1541 }
1542
1543 gfc_start_block (&body);
1544
1545 if (c->expr->expr_type == EXPR_ARRAY)
1546 {
1547 /* Array constructors can be nested. */
1548 gfc_trans_array_constructor_value (&body, type, desc,
1549 c->expr->value.constructor,
1550 poffset, offsetvar, dynamic);
1551 }
1552 else if (c->expr->rank > 0)
1553 {
1554 gfc_trans_array_constructor_subarray (&body, type, desc, c->expr,
1555 poffset, offsetvar, dynamic);
1556 }
1557 else
1558 {
1559 /* This code really upsets the gimplifier so don't bother for now. */
1560 gfc_constructor *p;
1561 HOST_WIDE_INT n;
1562 HOST_WIDE_INT size;
1563
1564 p = c;
1565 n = 0;
1566 while (p && !(p->iterator || p->expr->expr_type != EXPR_CONSTANT))
1567 {
1568 p = gfc_constructor_next (p);
1569 n++;
1570 }
1571 if (n < 4)
1572 {
1573 /* Scalar values. */
1574 gfc_init_se (&se, NULL);
1575 gfc_trans_array_ctor_element (&body, desc, *poffset,
1576 &se, c->expr);
1577
1578 *poffset = fold_build2_loc (input_location, PLUS_EXPR,
1579 gfc_array_index_type,
1580 *poffset, gfc_index_one_node);
1581 }
1582 else
1583 {
1584 /* Collect multiple scalar constants into a constructor. */
1585 VEC(constructor_elt,gc) *v = NULL;
1586 tree init;
1587 tree bound;
1588 tree tmptype;
1589 HOST_WIDE_INT idx = 0;
1590
1591 p = c;
1592 /* Count the number of consecutive scalar constants. */
1593 while (p && !(p->iterator
1594 || p->expr->expr_type != EXPR_CONSTANT))
1595 {
1596 gfc_init_se (&se, NULL);
1597 gfc_conv_constant (&se, p->expr);
1598
1599 if (c->expr->ts.type != BT_CHARACTER)
1600 se.expr = fold_convert (type, se.expr);
1601 /* For constant character array constructors we build
1602 an array of pointers. */
1603 else if (POINTER_TYPE_P (type))
1604 se.expr = gfc_build_addr_expr
1605 (gfc_get_pchar_type (p->expr->ts.kind),
1606 se.expr);
1607
1608 CONSTRUCTOR_APPEND_ELT (v,
1609 build_int_cst (gfc_array_index_type,
1610 idx++),
1611 se.expr);
1612 c = p;
1613 p = gfc_constructor_next (p);
1614 }
1615
1616 bound = size_int (n - 1);
1617 /* Create an array type to hold them. */
1618 tmptype = build_range_type (gfc_array_index_type,
1619 gfc_index_zero_node, bound);
1620 tmptype = build_array_type (type, tmptype);
1621
1622 init = build_constructor (tmptype, v);
1623 TREE_CONSTANT (init) = 1;
1624 TREE_STATIC (init) = 1;
1625 /* Create a static variable to hold the data. */
1626 tmp = gfc_create_var (tmptype, "data");
1627 TREE_STATIC (tmp) = 1;
1628 TREE_CONSTANT (tmp) = 1;
1629 TREE_READONLY (tmp) = 1;
1630 DECL_INITIAL (tmp) = init;
1631 init = tmp;
1632
1633 /* Use BUILTIN_MEMCPY to assign the values. */
1634 tmp = gfc_conv_descriptor_data_get (desc);
1635 tmp = build_fold_indirect_ref_loc (input_location,
1636 tmp);
1637 tmp = gfc_build_array_ref (tmp, *poffset, NULL);
1638 tmp = gfc_build_addr_expr (NULL_TREE, tmp);
1639 init = gfc_build_addr_expr (NULL_TREE, init);
1640
1641 size = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (type));
1642 bound = build_int_cst (size_type_node, n * size);
1643 tmp = build_call_expr_loc (input_location,
1644 builtin_decl_explicit (BUILT_IN_MEMCPY),
1645 3, tmp, init, bound);
1646 gfc_add_expr_to_block (&body, tmp);
1647
1648 *poffset = fold_build2_loc (input_location, PLUS_EXPR,
1649 gfc_array_index_type, *poffset,
1650 build_int_cst (gfc_array_index_type, n));
1651 }
1652 if (!INTEGER_CST_P (*poffset))
1653 {
1654 gfc_add_modify (&body, *offsetvar, *poffset);
1655 *poffset = *offsetvar;
1656 }
1657 }
1658
1659 /* The frontend should already have done any expansions
1660 at compile-time. */
1661 if (!c->iterator)
1662 {
1663 /* Pass the code as is. */
1664 tmp = gfc_finish_block (&body);
1665 gfc_add_expr_to_block (pblock, tmp);
1666 }
1667 else
1668 {
1669 /* Build the implied do-loop. */
1670 stmtblock_t implied_do_block;
1671 tree cond;
1672 tree end;
1673 tree step;
1674 tree exit_label;
1675 tree loopbody;
1676 tree tmp2;
1677
1678 loopbody = gfc_finish_block (&body);
1679
1680 /* Create a new block that holds the implied-do loop. A temporary
1681 loop-variable is used. */
1682 gfc_start_block(&implied_do_block);
1683
1684 /* Initialize the loop. */
1685 gfc_init_se (&se, NULL);
1686 gfc_conv_expr_val (&se, c->iterator->start);
1687 gfc_add_block_to_block (&implied_do_block, &se.pre);
1688 gfc_add_modify (&implied_do_block, shadow_loopvar, se.expr);
1689
1690 gfc_init_se (&se, NULL);
1691 gfc_conv_expr_val (&se, c->iterator->end);
1692 gfc_add_block_to_block (&implied_do_block, &se.pre);
1693 end = gfc_evaluate_now (se.expr, &implied_do_block);
1694
1695 gfc_init_se (&se, NULL);
1696 gfc_conv_expr_val (&se, c->iterator->step);
1697 gfc_add_block_to_block (&implied_do_block, &se.pre);
1698 step = gfc_evaluate_now (se.expr, &implied_do_block);
1699
1700 /* If this array expands dynamically, and the number of iterations
1701 is not constant, we won't have allocated space for the static
1702 part of C->EXPR's size. Do that now. */
1703 if (dynamic && gfc_iterator_has_dynamic_bounds (c->iterator))
1704 {
1705 /* Get the number of iterations. */
1706 tmp = gfc_get_iteration_count (shadow_loopvar, end, step);
1707
1708 /* Get the static part of C->EXPR's size. */
1709 gfc_get_array_constructor_element_size (&size, c->expr);
1710 tmp2 = gfc_conv_mpz_to_tree (size, gfc_index_integer_kind);
1711
1712 /* Grow the array by TMP * TMP2 elements. */
1713 tmp = fold_build2_loc (input_location, MULT_EXPR,
1714 gfc_array_index_type, tmp, tmp2);
1715 gfc_grow_array (&implied_do_block, desc, tmp);
1716 }
1717
1718 /* Generate the loop body. */
1719 exit_label = gfc_build_label_decl (NULL_TREE);
1720 gfc_start_block (&body);
1721
1722 /* Generate the exit condition. Depending on the sign of
1723 the step variable we have to generate the correct
1724 comparison. */
1725 tmp = fold_build2_loc (input_location, GT_EXPR, boolean_type_node,
1726 step, build_int_cst (TREE_TYPE (step), 0));
1727 cond = fold_build3_loc (input_location, COND_EXPR,
1728 boolean_type_node, tmp,
1729 fold_build2_loc (input_location, GT_EXPR,
1730 boolean_type_node, shadow_loopvar, end),
1731 fold_build2_loc (input_location, LT_EXPR,
1732 boolean_type_node, shadow_loopvar, end));
1733 tmp = build1_v (GOTO_EXPR, exit_label);
1734 TREE_USED (exit_label) = 1;
1735 tmp = build3_v (COND_EXPR, cond, tmp,
1736 build_empty_stmt (input_location));
1737 gfc_add_expr_to_block (&body, tmp);
1738
1739 /* The main loop body. */
1740 gfc_add_expr_to_block (&body, loopbody);
1741
1742 /* Increase loop variable by step. */
1743 tmp = fold_build2_loc (input_location, PLUS_EXPR,
1744 TREE_TYPE (shadow_loopvar), shadow_loopvar,
1745 step);
1746 gfc_add_modify (&body, shadow_loopvar, tmp);
1747
1748 /* Finish the loop. */
1749 tmp = gfc_finish_block (&body);
1750 tmp = build1_v (LOOP_EXPR, tmp);
1751 gfc_add_expr_to_block (&implied_do_block, tmp);
1752
1753 /* Add the exit label. */
1754 tmp = build1_v (LABEL_EXPR, exit_label);
1755 gfc_add_expr_to_block (&implied_do_block, tmp);
1756
1757 /* Finishe the implied-do loop. */
1758 tmp = gfc_finish_block(&implied_do_block);
1759 gfc_add_expr_to_block(pblock, tmp);
1760
1761 gfc_restore_sym (c->iterator->var->symtree->n.sym, &saved_loopvar);
1762 }
1763 }
1764 mpz_clear (size);
1765 }
1766
1767
1768 /* A catch-all to obtain the string length for anything that is not a
1769 a substring of non-constant length, a constant, array or variable. */
1770
1771 static void
1772 get_array_ctor_all_strlen (stmtblock_t *block, gfc_expr *e, tree *len)
1773 {
1774 gfc_se se;
1775 gfc_ss *ss;
1776
1777 /* Don't bother if we already know the length is a constant. */
1778 if (*len && INTEGER_CST_P (*len))
1779 return;
1780
1781 if (!e->ref && e->ts.u.cl && e->ts.u.cl->length
1782 && e->ts.u.cl->length->expr_type == EXPR_CONSTANT)
1783 {
1784 /* This is easy. */
1785 gfc_conv_const_charlen (e->ts.u.cl);
1786 *len = e->ts.u.cl->backend_decl;
1787 }
1788 else
1789 {
1790 /* Otherwise, be brutal even if inefficient. */
1791 ss = gfc_walk_expr (e);
1792 gfc_init_se (&se, NULL);
1793
1794 /* No function call, in case of side effects. */
1795 se.no_function_call = 1;
1796 if (ss == gfc_ss_terminator)
1797 gfc_conv_expr (&se, e);
1798 else
1799 gfc_conv_expr_descriptor (&se, e, ss);
1800
1801 /* Fix the value. */
1802 *len = gfc_evaluate_now (se.string_length, &se.pre);
1803
1804 gfc_add_block_to_block (block, &se.pre);
1805 gfc_add_block_to_block (block, &se.post);
1806
1807 e->ts.u.cl->backend_decl = *len;
1808 }
1809 }
1810
1811
1812 /* Figure out the string length of a variable reference expression.
1813 Used by get_array_ctor_strlen. */
1814
1815 static void
1816 get_array_ctor_var_strlen (stmtblock_t *block, gfc_expr * expr, tree * len)
1817 {
1818 gfc_ref *ref;
1819 gfc_typespec *ts;
1820 mpz_t char_len;
1821
1822 /* Don't bother if we already know the length is a constant. */
1823 if (*len && INTEGER_CST_P (*len))
1824 return;
1825
1826 ts = &expr->symtree->n.sym->ts;
1827 for (ref = expr->ref; ref; ref = ref->next)
1828 {
1829 switch (ref->type)
1830 {
1831 case REF_ARRAY:
1832 /* Array references don't change the string length. */
1833 break;
1834
1835 case REF_COMPONENT:
1836 /* Use the length of the component. */
1837 ts = &ref->u.c.component->ts;
1838 break;
1839
1840 case REF_SUBSTRING:
1841 if (ref->u.ss.start->expr_type != EXPR_CONSTANT
1842 || ref->u.ss.end->expr_type != EXPR_CONSTANT)
1843 {
1844 /* Note that this might evaluate expr. */
1845 get_array_ctor_all_strlen (block, expr, len);
1846 return;
1847 }
1848 mpz_init_set_ui (char_len, 1);
1849 mpz_add (char_len, char_len, ref->u.ss.end->value.integer);
1850 mpz_sub (char_len, char_len, ref->u.ss.start->value.integer);
1851 *len = gfc_conv_mpz_to_tree (char_len, gfc_default_integer_kind);
1852 *len = convert (gfc_charlen_type_node, *len);
1853 mpz_clear (char_len);
1854 return;
1855
1856 default:
1857 gcc_unreachable ();
1858 }
1859 }
1860
1861 *len = ts->u.cl->backend_decl;
1862 }
1863
1864
1865 /* Figure out the string length of a character array constructor.
1866 If len is NULL, don't calculate the length; this happens for recursive calls
1867 when a sub-array-constructor is an element but not at the first position,
1868 so when we're not interested in the length.
1869 Returns TRUE if all elements are character constants. */
1870
1871 bool
1872 get_array_ctor_strlen (stmtblock_t *block, gfc_constructor_base base, tree * len)
1873 {
1874 gfc_constructor *c;
1875 bool is_const;
1876
1877 is_const = TRUE;
1878
1879 if (gfc_constructor_first (base) == NULL)
1880 {
1881 if (len)
1882 *len = build_int_cstu (gfc_charlen_type_node, 0);
1883 return is_const;
1884 }
1885
1886 /* Loop over all constructor elements to find out is_const, but in len we
1887 want to store the length of the first, not the last, element. We can
1888 of course exit the loop as soon as is_const is found to be false. */
1889 for (c = gfc_constructor_first (base);
1890 c && is_const; c = gfc_constructor_next (c))
1891 {
1892 switch (c->expr->expr_type)
1893 {
1894 case EXPR_CONSTANT:
1895 if (len && !(*len && INTEGER_CST_P (*len)))
1896 *len = build_int_cstu (gfc_charlen_type_node,
1897 c->expr->value.character.length);
1898 break;
1899
1900 case EXPR_ARRAY:
1901 if (!get_array_ctor_strlen (block, c->expr->value.constructor, len))
1902 is_const = false;
1903 break;
1904
1905 case EXPR_VARIABLE:
1906 is_const = false;
1907 if (len)
1908 get_array_ctor_var_strlen (block, c->expr, len);
1909 break;
1910
1911 default:
1912 is_const = false;
1913 if (len)
1914 get_array_ctor_all_strlen (block, c->expr, len);
1915 break;
1916 }
1917
1918 /* After the first iteration, we don't want the length modified. */
1919 len = NULL;
1920 }
1921
1922 return is_const;
1923 }
1924
1925 /* Check whether the array constructor C consists entirely of constant
1926 elements, and if so returns the number of those elements, otherwise
1927 return zero. Note, an empty or NULL array constructor returns zero. */
1928
1929 unsigned HOST_WIDE_INT
1930 gfc_constant_array_constructor_p (gfc_constructor_base base)
1931 {
1932 unsigned HOST_WIDE_INT nelem = 0;
1933
1934 gfc_constructor *c = gfc_constructor_first (base);
1935 while (c)
1936 {
1937 if (c->iterator
1938 || c->expr->rank > 0
1939 || c->expr->expr_type != EXPR_CONSTANT)
1940 return 0;
1941 c = gfc_constructor_next (c);
1942 nelem++;
1943 }
1944 return nelem;
1945 }
1946
1947
1948 /* Given EXPR, the constant array constructor specified by an EXPR_ARRAY,
1949 and the tree type of it's elements, TYPE, return a static constant
1950 variable that is compile-time initialized. */
1951
1952 tree
1953 gfc_build_constant_array_constructor (gfc_expr * expr, tree type)
1954 {
1955 tree tmptype, init, tmp;
1956 HOST_WIDE_INT nelem;
1957 gfc_constructor *c;
1958 gfc_array_spec as;
1959 gfc_se se;
1960 int i;
1961 VEC(constructor_elt,gc) *v = NULL;
1962
1963 /* First traverse the constructor list, converting the constants
1964 to tree to build an initializer. */
1965 nelem = 0;
1966 c = gfc_constructor_first (expr->value.constructor);
1967 while (c)
1968 {
1969 gfc_init_se (&se, NULL);
1970 gfc_conv_constant (&se, c->expr);
1971 if (c->expr->ts.type != BT_CHARACTER)
1972 se.expr = fold_convert (type, se.expr);
1973 else if (POINTER_TYPE_P (type))
1974 se.expr = gfc_build_addr_expr (gfc_get_pchar_type (c->expr->ts.kind),
1975 se.expr);
1976 CONSTRUCTOR_APPEND_ELT (v, build_int_cst (gfc_array_index_type, nelem),
1977 se.expr);
1978 c = gfc_constructor_next (c);
1979 nelem++;
1980 }
1981
1982 /* Next determine the tree type for the array. We use the gfortran
1983 front-end's gfc_get_nodesc_array_type in order to create a suitable
1984 GFC_ARRAY_TYPE_P that may be used by the scalarizer. */
1985
1986 memset (&as, 0, sizeof (gfc_array_spec));
1987
1988 as.rank = expr->rank;
1989 as.type = AS_EXPLICIT;
1990 if (!expr->shape)
1991 {
1992 as.lower[0] = gfc_get_int_expr (gfc_default_integer_kind, NULL, 0);
1993 as.upper[0] = gfc_get_int_expr (gfc_default_integer_kind,
1994 NULL, nelem - 1);
1995 }
1996 else
1997 for (i = 0; i < expr->rank; i++)
1998 {
1999 int tmp = (int) mpz_get_si (expr->shape[i]);
2000 as.lower[i] = gfc_get_int_expr (gfc_default_integer_kind, NULL, 0);
2001 as.upper[i] = gfc_get_int_expr (gfc_default_integer_kind,
2002 NULL, tmp - 1);
2003 }
2004
2005 tmptype = gfc_get_nodesc_array_type (type, &as, PACKED_STATIC, true);
2006
2007 /* as is not needed anymore. */
2008 for (i = 0; i < as.rank + as.corank; i++)
2009 {
2010 gfc_free_expr (as.lower[i]);
2011 gfc_free_expr (as.upper[i]);
2012 }
2013
2014 init = build_constructor (tmptype, v);
2015
2016 TREE_CONSTANT (init) = 1;
2017 TREE_STATIC (init) = 1;
2018
2019 tmp = gfc_create_var (tmptype, "A");
2020 TREE_STATIC (tmp) = 1;
2021 TREE_CONSTANT (tmp) = 1;
2022 TREE_READONLY (tmp) = 1;
2023 DECL_INITIAL (tmp) = init;
2024
2025 return tmp;
2026 }
2027
2028
2029 /* Translate a constant EXPR_ARRAY array constructor for the scalarizer.
2030 This mostly initializes the scalarizer state info structure with the
2031 appropriate values to directly use the array created by the function
2032 gfc_build_constant_array_constructor. */
2033
2034 static void
2035 trans_constant_array_constructor (gfc_ss * ss, tree type)
2036 {
2037 gfc_array_info *info;
2038 tree tmp;
2039 int i;
2040
2041 tmp = gfc_build_constant_array_constructor (ss->info->expr, type);
2042
2043 info = &ss->info->data.array;
2044
2045 info->descriptor = tmp;
2046 info->data = gfc_build_addr_expr (NULL_TREE, tmp);
2047 info->offset = gfc_index_zero_node;
2048
2049 for (i = 0; i < ss->dimen; i++)
2050 {
2051 info->delta[i] = gfc_index_zero_node;
2052 info->start[i] = gfc_index_zero_node;
2053 info->end[i] = gfc_index_zero_node;
2054 info->stride[i] = gfc_index_one_node;
2055 }
2056 }
2057
2058
2059 static int
2060 get_rank (gfc_loopinfo *loop)
2061 {
2062 int rank;
2063
2064 rank = 0;
2065 for (; loop; loop = loop->parent)
2066 rank += loop->dimen;
2067
2068 return rank;
2069 }
2070
2071
2072 /* Helper routine of gfc_trans_array_constructor to determine if the
2073 bounds of the loop specified by LOOP are constant and simple enough
2074 to use with trans_constant_array_constructor. Returns the
2075 iteration count of the loop if suitable, and NULL_TREE otherwise. */
2076
2077 static tree
2078 constant_array_constructor_loop_size (gfc_loopinfo * l)
2079 {
2080 gfc_loopinfo *loop;
2081 tree size = gfc_index_one_node;
2082 tree tmp;
2083 int i, total_dim;
2084
2085 total_dim = get_rank (l);
2086
2087 for (loop = l; loop; loop = loop->parent)
2088 {
2089 for (i = 0; i < loop->dimen; i++)
2090 {
2091 /* If the bounds aren't constant, return NULL_TREE. */
2092 if (!INTEGER_CST_P (loop->from[i]) || !INTEGER_CST_P (loop->to[i]))
2093 return NULL_TREE;
2094 if (!integer_zerop (loop->from[i]))
2095 {
2096 /* Only allow nonzero "from" in one-dimensional arrays. */
2097 if (total_dim != 1)
2098 return NULL_TREE;
2099 tmp = fold_build2_loc (input_location, MINUS_EXPR,
2100 gfc_array_index_type,
2101 loop->to[i], loop->from[i]);
2102 }
2103 else
2104 tmp = loop->to[i];
2105 tmp = fold_build2_loc (input_location, PLUS_EXPR,
2106 gfc_array_index_type, tmp, gfc_index_one_node);
2107 size = fold_build2_loc (input_location, MULT_EXPR,
2108 gfc_array_index_type, size, tmp);
2109 }
2110 }
2111
2112 return size;
2113 }
2114
2115
2116 static tree *
2117 get_loop_upper_bound_for_array (gfc_ss *array, int array_dim)
2118 {
2119 gfc_ss *ss;
2120 int n;
2121
2122 gcc_assert (array->nested_ss == NULL);
2123
2124 for (ss = array; ss; ss = ss->parent)
2125 for (n = 0; n < ss->loop->dimen; n++)
2126 if (array_dim == get_array_ref_dim_for_loop_dim (ss, n))
2127 return &(ss->loop->to[n]);
2128
2129 gcc_unreachable ();
2130 }
2131
2132
2133 static gfc_loopinfo *
2134 outermost_loop (gfc_loopinfo * loop)
2135 {
2136 while (loop->parent != NULL)
2137 loop = loop->parent;
2138
2139 return loop;
2140 }
2141
2142
2143 /* Array constructors are handled by constructing a temporary, then using that
2144 within the scalarization loop. This is not optimal, but seems by far the
2145 simplest method. */
2146
2147 static void
2148 trans_array_constructor (gfc_ss * ss, locus * where)
2149 {
2150 gfc_constructor_base c;
2151 tree offset;
2152 tree offsetvar;
2153 tree desc;
2154 tree type;
2155 tree tmp;
2156 tree *loop_ubound0;
2157 bool dynamic;
2158 bool old_first_len, old_typespec_chararray_ctor;
2159 tree old_first_len_val;
2160 gfc_loopinfo *loop, *outer_loop;
2161 gfc_ss_info *ss_info;
2162 gfc_expr *expr;
2163 gfc_ss *s;
2164
2165 /* Save the old values for nested checking. */
2166 old_first_len = first_len;
2167 old_first_len_val = first_len_val;
2168 old_typespec_chararray_ctor = typespec_chararray_ctor;
2169
2170 loop = ss->loop;
2171 outer_loop = outermost_loop (loop);
2172 ss_info = ss->info;
2173 expr = ss_info->expr;
2174
2175 /* Do bounds-checking here and in gfc_trans_array_ctor_element only if no
2176 typespec was given for the array constructor. */
2177 typespec_chararray_ctor = (expr->ts.u.cl
2178 && expr->ts.u.cl->length_from_typespec);
2179
2180 if ((gfc_option.rtcheck & GFC_RTCHECK_BOUNDS)
2181 && expr->ts.type == BT_CHARACTER && !typespec_chararray_ctor)
2182 {
2183 first_len_val = gfc_create_var (gfc_charlen_type_node, "len");
2184 first_len = true;
2185 }
2186
2187 gcc_assert (ss->dimen == ss->loop->dimen);
2188
2189 c = expr->value.constructor;
2190 if (expr->ts.type == BT_CHARACTER)
2191 {
2192 bool const_string;
2193
2194 /* get_array_ctor_strlen walks the elements of the constructor, if a
2195 typespec was given, we already know the string length and want the one
2196 specified there. */
2197 if (typespec_chararray_ctor && expr->ts.u.cl->length
2198 && expr->ts.u.cl->length->expr_type != EXPR_CONSTANT)
2199 {
2200 gfc_se length_se;
2201
2202 const_string = false;
2203 gfc_init_se (&length_se, NULL);
2204 gfc_conv_expr_type (&length_se, expr->ts.u.cl->length,
2205 gfc_charlen_type_node);
2206 ss_info->string_length = length_se.expr;
2207 gfc_add_block_to_block (&outer_loop->pre, &length_se.pre);
2208 gfc_add_block_to_block (&outer_loop->post, &length_se.post);
2209 }
2210 else
2211 const_string = get_array_ctor_strlen (&outer_loop->pre, c,
2212 &ss_info->string_length);
2213
2214 /* Complex character array constructors should have been taken care of
2215 and not end up here. */
2216 gcc_assert (ss_info->string_length);
2217
2218 expr->ts.u.cl->backend_decl = ss_info->string_length;
2219
2220 type = gfc_get_character_type_len (expr->ts.kind, ss_info->string_length);
2221 if (const_string)
2222 type = build_pointer_type (type);
2223 }
2224 else
2225 type = gfc_typenode_for_spec (&expr->ts);
2226
2227 /* See if the constructor determines the loop bounds. */
2228 dynamic = false;
2229
2230 loop_ubound0 = get_loop_upper_bound_for_array (ss, 0);
2231
2232 if (expr->shape && get_rank (loop) > 1 && *loop_ubound0 == NULL_TREE)
2233 {
2234 /* We have a multidimensional parameter. */
2235 for (s = ss; s; s = s->parent)
2236 {
2237 int n;
2238 for (n = 0; n < s->loop->dimen; n++)
2239 {
2240 s->loop->from[n] = gfc_index_zero_node;
2241 s->loop->to[n] = gfc_conv_mpz_to_tree (expr->shape[s->dim[n]],
2242 gfc_index_integer_kind);
2243 s->loop->to[n] = fold_build2_loc (input_location, MINUS_EXPR,
2244 gfc_array_index_type,
2245 s->loop->to[n],
2246 gfc_index_one_node);
2247 }
2248 }
2249 }
2250
2251 if (*loop_ubound0 == NULL_TREE)
2252 {
2253 mpz_t size;
2254
2255 /* We should have a 1-dimensional, zero-based loop. */
2256 gcc_assert (loop->parent == NULL && loop->nested == NULL);
2257 gcc_assert (loop->dimen == 1);
2258 gcc_assert (integer_zerop (loop->from[0]));
2259
2260 /* Split the constructor size into a static part and a dynamic part.
2261 Allocate the static size up-front and record whether the dynamic
2262 size might be nonzero. */
2263 mpz_init (size);
2264 dynamic = gfc_get_array_constructor_size (&size, c);
2265 mpz_sub_ui (size, size, 1);
2266 loop->to[0] = gfc_conv_mpz_to_tree (size, gfc_index_integer_kind);
2267 mpz_clear (size);
2268 }
2269
2270 /* Special case constant array constructors. */
2271 if (!dynamic)
2272 {
2273 unsigned HOST_WIDE_INT nelem = gfc_constant_array_constructor_p (c);
2274 if (nelem > 0)
2275 {
2276 tree size = constant_array_constructor_loop_size (loop);
2277 if (size && compare_tree_int (size, nelem) == 0)
2278 {
2279 trans_constant_array_constructor (ss, type);
2280 goto finish;
2281 }
2282 }
2283 }
2284
2285 if (TREE_CODE (*loop_ubound0) == VAR_DECL)
2286 dynamic = true;
2287
2288 gfc_trans_create_temp_array (&outer_loop->pre, &outer_loop->post, ss, type,
2289 NULL_TREE, dynamic, true, false, where);
2290
2291 desc = ss_info->data.array.descriptor;
2292 offset = gfc_index_zero_node;
2293 offsetvar = gfc_create_var_np (gfc_array_index_type, "offset");
2294 TREE_NO_WARNING (offsetvar) = 1;
2295 TREE_USED (offsetvar) = 0;
2296 gfc_trans_array_constructor_value (&outer_loop->pre, type, desc, c,
2297 &offset, &offsetvar, dynamic);
2298
2299 /* If the array grows dynamically, the upper bound of the loop variable
2300 is determined by the array's final upper bound. */
2301 if (dynamic)
2302 {
2303 tmp = fold_build2_loc (input_location, MINUS_EXPR,
2304 gfc_array_index_type,
2305 offsetvar, gfc_index_one_node);
2306 tmp = gfc_evaluate_now (tmp, &outer_loop->pre);
2307 gfc_conv_descriptor_ubound_set (&loop->pre, desc, gfc_rank_cst[0], tmp);
2308 if (*loop_ubound0 && TREE_CODE (*loop_ubound0) == VAR_DECL)
2309 gfc_add_modify (&outer_loop->pre, *loop_ubound0, tmp);
2310 else
2311 *loop_ubound0 = tmp;
2312 }
2313
2314 if (TREE_USED (offsetvar))
2315 pushdecl (offsetvar);
2316 else
2317 gcc_assert (INTEGER_CST_P (offset));
2318
2319 #if 0
2320 /* Disable bound checking for now because it's probably broken. */
2321 if (gfc_option.rtcheck & GFC_RTCHECK_BOUNDS)
2322 {
2323 gcc_unreachable ();
2324 }
2325 #endif
2326
2327 finish:
2328 /* Restore old values of globals. */
2329 first_len = old_first_len;
2330 first_len_val = old_first_len_val;
2331 typespec_chararray_ctor = old_typespec_chararray_ctor;
2332 }
2333
2334
2335 /* INFO describes a GFC_SS_SECTION in loop LOOP, and this function is
2336 called after evaluating all of INFO's vector dimensions. Go through
2337 each such vector dimension and see if we can now fill in any missing
2338 loop bounds. */
2339
2340 static void
2341 set_vector_loop_bounds (gfc_ss * ss)
2342 {
2343 gfc_loopinfo *loop, *outer_loop;
2344 gfc_array_info *info;
2345 gfc_se se;
2346 tree tmp;
2347 tree desc;
2348 tree zero;
2349 int n;
2350 int dim;
2351
2352 outer_loop = outermost_loop (ss->loop);
2353
2354 info = &ss->info->data.array;
2355
2356 for (; ss; ss = ss->parent)
2357 {
2358 loop = ss->loop;
2359
2360 for (n = 0; n < loop->dimen; n++)
2361 {
2362 dim = ss->dim[n];
2363 if (info->ref->u.ar.dimen_type[dim] != DIMEN_VECTOR
2364 || loop->to[n] != NULL)
2365 continue;
2366
2367 /* Loop variable N indexes vector dimension DIM, and we don't
2368 yet know the upper bound of loop variable N. Set it to the
2369 difference between the vector's upper and lower bounds. */
2370 gcc_assert (loop->from[n] == gfc_index_zero_node);
2371 gcc_assert (info->subscript[dim]
2372 && info->subscript[dim]->info->type == GFC_SS_VECTOR);
2373
2374 gfc_init_se (&se, NULL);
2375 desc = info->subscript[dim]->info->data.array.descriptor;
2376 zero = gfc_rank_cst[0];
2377 tmp = fold_build2_loc (input_location, MINUS_EXPR,
2378 gfc_array_index_type,
2379 gfc_conv_descriptor_ubound_get (desc, zero),
2380 gfc_conv_descriptor_lbound_get (desc, zero));
2381 tmp = gfc_evaluate_now (tmp, &outer_loop->pre);
2382 loop->to[n] = tmp;
2383 }
2384 }
2385 }
2386
2387
2388 /* Add the pre and post chains for all the scalar expressions in a SS chain
2389 to loop. This is called after the loop parameters have been calculated,
2390 but before the actual scalarizing loops. */
2391
2392 static void
2393 gfc_add_loop_ss_code (gfc_loopinfo * loop, gfc_ss * ss, bool subscript,
2394 locus * where)
2395 {
2396 gfc_loopinfo *nested_loop, *outer_loop;
2397 gfc_se se;
2398 gfc_ss_info *ss_info;
2399 gfc_array_info *info;
2400 gfc_expr *expr;
2401 bool skip_nested = false;
2402 int n;
2403
2404 outer_loop = outermost_loop (loop);
2405
2406 /* TODO: This can generate bad code if there are ordering dependencies,
2407 e.g., a callee allocated function and an unknown size constructor. */
2408 gcc_assert (ss != NULL);
2409
2410 for (; ss != gfc_ss_terminator; ss = ss->loop_chain)
2411 {
2412 gcc_assert (ss);
2413
2414 /* Cross loop arrays are handled from within the most nested loop. */
2415 if (ss->nested_ss != NULL)
2416 continue;
2417
2418 ss_info = ss->info;
2419 expr = ss_info->expr;
2420 info = &ss_info->data.array;
2421
2422 switch (ss_info->type)
2423 {
2424 case GFC_SS_SCALAR:
2425 /* Scalar expression. Evaluate this now. This includes elemental
2426 dimension indices, but not array section bounds. */
2427 gfc_init_se (&se, NULL);
2428 gfc_conv_expr (&se, expr);
2429 gfc_add_block_to_block (&outer_loop->pre, &se.pre);
2430
2431 if (expr->ts.type != BT_CHARACTER)
2432 {
2433 /* Move the evaluation of scalar expressions outside the
2434 scalarization loop, except for WHERE assignments. */
2435 if (subscript)
2436 se.expr = convert(gfc_array_index_type, se.expr);
2437 if (!ss_info->where)
2438 se.expr = gfc_evaluate_now (se.expr, &outer_loop->pre);
2439 gfc_add_block_to_block (&outer_loop->pre, &se.post);
2440 }
2441 else
2442 gfc_add_block_to_block (&outer_loop->post, &se.post);
2443
2444 ss_info->data.scalar.value = se.expr;
2445 ss_info->string_length = se.string_length;
2446 break;
2447
2448 case GFC_SS_REFERENCE:
2449 /* Scalar argument to elemental procedure. */
2450 gfc_init_se (&se, NULL);
2451 if (ss_info->can_be_null_ref)
2452 {
2453 /* If the actual argument can be absent (in other words, it can
2454 be a NULL reference), don't try to evaluate it; pass instead
2455 the reference directly. */
2456 gfc_conv_expr_reference (&se, expr);
2457 }
2458 else
2459 {
2460 /* Otherwise, evaluate the argument outside the loop and pass
2461 a reference to the value. */
2462 gfc_conv_expr (&se, expr);
2463 }
2464 gfc_add_block_to_block (&outer_loop->pre, &se.pre);
2465 gfc_add_block_to_block (&outer_loop->post, &se.post);
2466 if (gfc_is_class_scalar_expr (expr))
2467 /* This is necessary because the dynamic type will always be
2468 large than the declared type. In consequence, assigning
2469 the value to a temporary could segfault.
2470 OOP-TODO: see if this is generally correct or is the value
2471 has to be written to an allocated temporary, whose address
2472 is passed via ss_info. */
2473 ss_info->data.scalar.value = se.expr;
2474 else
2475 ss_info->data.scalar.value = gfc_evaluate_now (se.expr,
2476 &outer_loop->pre);
2477
2478 ss_info->string_length = se.string_length;
2479 break;
2480
2481 case GFC_SS_SECTION:
2482 /* Add the expressions for scalar and vector subscripts. */
2483 for (n = 0; n < GFC_MAX_DIMENSIONS; n++)
2484 if (info->subscript[n])
2485 {
2486 gfc_add_loop_ss_code (loop, info->subscript[n], true, where);
2487 /* The recursive call will have taken care of the nested loops.
2488 No need to do it twice. */
2489 skip_nested = true;
2490 }
2491
2492 set_vector_loop_bounds (ss);
2493 break;
2494
2495 case GFC_SS_VECTOR:
2496 /* Get the vector's descriptor and store it in SS. */
2497 gfc_init_se (&se, NULL);
2498 gfc_conv_expr_descriptor (&se, expr, gfc_walk_expr (expr));
2499 gfc_add_block_to_block (&outer_loop->pre, &se.pre);
2500 gfc_add_block_to_block (&outer_loop->post, &se.post);
2501 info->descriptor = se.expr;
2502 break;
2503
2504 case GFC_SS_INTRINSIC:
2505 gfc_add_intrinsic_ss_code (loop, ss);
2506 break;
2507
2508 case GFC_SS_FUNCTION:
2509 /* Array function return value. We call the function and save its
2510 result in a temporary for use inside the loop. */
2511 gfc_init_se (&se, NULL);
2512 se.loop = loop;
2513 se.ss = ss;
2514 gfc_conv_expr (&se, expr);
2515 gfc_add_block_to_block (&outer_loop->pre, &se.pre);
2516 gfc_add_block_to_block (&outer_loop->post, &se.post);
2517 ss_info->string_length = se.string_length;
2518 break;
2519
2520 case GFC_SS_CONSTRUCTOR:
2521 if (expr->ts.type == BT_CHARACTER
2522 && ss_info->string_length == NULL
2523 && expr->ts.u.cl
2524 && expr->ts.u.cl->length)
2525 {
2526 gfc_init_se (&se, NULL);
2527 gfc_conv_expr_type (&se, expr->ts.u.cl->length,
2528 gfc_charlen_type_node);
2529 ss_info->string_length = se.expr;
2530 gfc_add_block_to_block (&outer_loop->pre, &se.pre);
2531 gfc_add_block_to_block (&outer_loop->post, &se.post);
2532 }
2533 trans_array_constructor (ss, where);
2534 break;
2535
2536 case GFC_SS_TEMP:
2537 case GFC_SS_COMPONENT:
2538 /* Do nothing. These are handled elsewhere. */
2539 break;
2540
2541 default:
2542 gcc_unreachable ();
2543 }
2544 }
2545
2546 if (!skip_nested)
2547 for (nested_loop = loop->nested; nested_loop;
2548 nested_loop = nested_loop->next)
2549 gfc_add_loop_ss_code (nested_loop, nested_loop->ss, subscript, where);
2550 }
2551
2552
2553 /* Translate expressions for the descriptor and data pointer of a SS. */
2554 /*GCC ARRAYS*/
2555
2556 static void
2557 gfc_conv_ss_descriptor (stmtblock_t * block, gfc_ss * ss, int base)
2558 {
2559 gfc_se se;
2560 gfc_ss_info *ss_info;
2561 gfc_array_info *info;
2562 tree tmp;
2563
2564 ss_info = ss->info;
2565 info = &ss_info->data.array;
2566
2567 /* Get the descriptor for the array to be scalarized. */
2568 gcc_assert (ss_info->expr->expr_type == EXPR_VARIABLE);
2569 gfc_init_se (&se, NULL);
2570 se.descriptor_only = 1;
2571 gfc_conv_expr_lhs (&se, ss_info->expr);
2572 gfc_add_block_to_block (block, &se.pre);
2573 info->descriptor = se.expr;
2574 ss_info->string_length = se.string_length;
2575
2576 if (base)
2577 {
2578 /* Also the data pointer. */
2579 tmp = gfc_conv_array_data (se.expr);
2580 /* If this is a variable or address of a variable we use it directly.
2581 Otherwise we must evaluate it now to avoid breaking dependency
2582 analysis by pulling the expressions for elemental array indices
2583 inside the loop. */
2584 if (!(DECL_P (tmp)
2585 || (TREE_CODE (tmp) == ADDR_EXPR
2586 && DECL_P (TREE_OPERAND (tmp, 0)))))
2587 tmp = gfc_evaluate_now (tmp, block);
2588 info->data = tmp;
2589
2590 tmp = gfc_conv_array_offset (se.expr);
2591 info->offset = gfc_evaluate_now (tmp, block);
2592
2593 /* Make absolutely sure that the saved_offset is indeed saved
2594 so that the variable is still accessible after the loops
2595 are translated. */
2596 info->saved_offset = info->offset;
2597 }
2598 }
2599
2600
2601 /* Initialize a gfc_loopinfo structure. */
2602
2603 void
2604 gfc_init_loopinfo (gfc_loopinfo * loop)
2605 {
2606 int n;
2607
2608 memset (loop, 0, sizeof (gfc_loopinfo));
2609 gfc_init_block (&loop->pre);
2610 gfc_init_block (&loop->post);
2611
2612 /* Initially scalarize in order and default to no loop reversal. */
2613 for (n = 0; n < GFC_MAX_DIMENSIONS; n++)
2614 {
2615 loop->order[n] = n;
2616 loop->reverse[n] = GFC_INHIBIT_REVERSE;
2617 }
2618
2619 loop->ss = gfc_ss_terminator;
2620 }
2621
2622
2623 /* Copies the loop variable info to a gfc_se structure. Does not copy the SS
2624 chain. */
2625
2626 void
2627 gfc_copy_loopinfo_to_se (gfc_se * se, gfc_loopinfo * loop)
2628 {
2629 se->loop = loop;
2630 }
2631
2632
2633 /* Return an expression for the data pointer of an array. */
2634
2635 tree
2636 gfc_conv_array_data (tree descriptor)
2637 {
2638 tree type;
2639
2640 type = TREE_TYPE (descriptor);
2641 if (GFC_ARRAY_TYPE_P (type))
2642 {
2643 if (TREE_CODE (type) == POINTER_TYPE)
2644 return descriptor;
2645 else
2646 {
2647 /* Descriptorless arrays. */
2648 return gfc_build_addr_expr (NULL_TREE, descriptor);
2649 }
2650 }
2651 else
2652 return gfc_conv_descriptor_data_get (descriptor);
2653 }
2654
2655
2656 /* Return an expression for the base offset of an array. */
2657
2658 tree
2659 gfc_conv_array_offset (tree descriptor)
2660 {
2661 tree type;
2662
2663 type = TREE_TYPE (descriptor);
2664 if (GFC_ARRAY_TYPE_P (type))
2665 return GFC_TYPE_ARRAY_OFFSET (type);
2666 else
2667 return gfc_conv_descriptor_offset_get (descriptor);
2668 }
2669
2670
2671 /* Get an expression for the array stride. */
2672
2673 tree
2674 gfc_conv_array_stride (tree descriptor, int dim)
2675 {
2676 tree tmp;
2677 tree type;
2678
2679 type = TREE_TYPE (descriptor);
2680
2681 /* For descriptorless arrays use the array size. */
2682 tmp = GFC_TYPE_ARRAY_STRIDE (type, dim);
2683 if (tmp != NULL_TREE)
2684 return tmp;
2685
2686 tmp = gfc_conv_descriptor_stride_get (descriptor, gfc_rank_cst[dim]);
2687 return tmp;
2688 }
2689
2690
2691 /* Like gfc_conv_array_stride, but for the lower bound. */
2692
2693 tree
2694 gfc_conv_array_lbound (tree descriptor, int dim)
2695 {
2696 tree tmp;
2697 tree type;
2698
2699 type = TREE_TYPE (descriptor);
2700
2701 tmp = GFC_TYPE_ARRAY_LBOUND (type, dim);
2702 if (tmp != NULL_TREE)
2703 return tmp;
2704
2705 tmp = gfc_conv_descriptor_lbound_get (descriptor, gfc_rank_cst[dim]);
2706 return tmp;
2707 }
2708
2709
2710 /* Like gfc_conv_array_stride, but for the upper bound. */
2711
2712 tree
2713 gfc_conv_array_ubound (tree descriptor, int dim)
2714 {
2715 tree tmp;
2716 tree type;
2717
2718 type = TREE_TYPE (descriptor);
2719
2720 tmp = GFC_TYPE_ARRAY_UBOUND (type, dim);
2721 if (tmp != NULL_TREE)
2722 return tmp;
2723
2724 /* This should only ever happen when passing an assumed shape array
2725 as an actual parameter. The value will never be used. */
2726 if (GFC_ARRAY_TYPE_P (TREE_TYPE (descriptor)))
2727 return gfc_index_zero_node;
2728
2729 tmp = gfc_conv_descriptor_ubound_get (descriptor, gfc_rank_cst[dim]);
2730 return tmp;
2731 }
2732
2733
2734 /* Generate code to perform an array index bound check. */
2735
2736 static tree
2737 trans_array_bound_check (gfc_se * se, gfc_ss *ss, tree index, int n,
2738 locus * where, bool check_upper)
2739 {
2740 tree fault;
2741 tree tmp_lo, tmp_up;
2742 tree descriptor;
2743 char *msg;
2744 const char * name = NULL;
2745
2746 if (!(gfc_option.rtcheck & GFC_RTCHECK_BOUNDS))
2747 return index;
2748
2749 descriptor = ss->info->data.array.descriptor;
2750
2751 index = gfc_evaluate_now (index, &se->pre);
2752
2753 /* We find a name for the error message. */
2754 name = ss->info->expr->symtree->n.sym->name;
2755 gcc_assert (name != NULL);
2756
2757 if (TREE_CODE (descriptor) == VAR_DECL)
2758 name = IDENTIFIER_POINTER (DECL_NAME (descriptor));
2759
2760 /* If upper bound is present, include both bounds in the error message. */
2761 if (check_upper)
2762 {
2763 tmp_lo = gfc_conv_array_lbound (descriptor, n);
2764 tmp_up = gfc_conv_array_ubound (descriptor, n);
2765
2766 if (name)
2767 asprintf (&msg, "Index '%%ld' of dimension %d of array '%s' "
2768 "outside of expected range (%%ld:%%ld)", n+1, name);
2769 else
2770 asprintf (&msg, "Index '%%ld' of dimension %d "
2771 "outside of expected range (%%ld:%%ld)", n+1);
2772
2773 fault = fold_build2_loc (input_location, LT_EXPR, boolean_type_node,
2774 index, tmp_lo);
2775 gfc_trans_runtime_check (true, false, fault, &se->pre, where, msg,
2776 fold_convert (long_integer_type_node, index),
2777 fold_convert (long_integer_type_node, tmp_lo),
2778 fold_convert (long_integer_type_node, tmp_up));
2779 fault = fold_build2_loc (input_location, GT_EXPR, boolean_type_node,
2780 index, tmp_up);
2781 gfc_trans_runtime_check (true, false, fault, &se->pre, where, msg,
2782 fold_convert (long_integer_type_node, index),
2783 fold_convert (long_integer_type_node, tmp_lo),
2784 fold_convert (long_integer_type_node, tmp_up));
2785 free (msg);
2786 }
2787 else
2788 {
2789 tmp_lo = gfc_conv_array_lbound (descriptor, n);
2790
2791 if (name)
2792 asprintf (&msg, "Index '%%ld' of dimension %d of array '%s' "
2793 "below lower bound of %%ld", n+1, name);
2794 else
2795 asprintf (&msg, "Index '%%ld' of dimension %d "
2796 "below lower bound of %%ld", n+1);
2797
2798 fault = fold_build2_loc (input_location, LT_EXPR, boolean_type_node,
2799 index, tmp_lo);
2800 gfc_trans_runtime_check (true, false, fault, &se->pre, where, msg,
2801 fold_convert (long_integer_type_node, index),
2802 fold_convert (long_integer_type_node, tmp_lo));
2803 free (msg);
2804 }
2805
2806 return index;
2807 }
2808
2809
2810 /* Return the offset for an index. Performs bound checking for elemental
2811 dimensions. Single element references are processed separately.
2812 DIM is the array dimension, I is the loop dimension. */
2813
2814 static tree
2815 conv_array_index_offset (gfc_se * se, gfc_ss * ss, int dim, int i,
2816 gfc_array_ref * ar, tree stride)
2817 {
2818 gfc_array_info *info;
2819 tree index;
2820 tree desc;
2821 tree data;
2822
2823 info = &ss->info->data.array;
2824
2825 /* Get the index into the array for this dimension. */
2826 if (ar)
2827 {
2828 gcc_assert (ar->type != AR_ELEMENT);
2829 switch (ar->dimen_type[dim])
2830 {
2831 case DIMEN_THIS_IMAGE:
2832 gcc_unreachable ();
2833 break;
2834 case DIMEN_ELEMENT:
2835 /* Elemental dimension. */
2836 gcc_assert (info->subscript[dim]
2837 && info->subscript[dim]->info->type == GFC_SS_SCALAR);
2838 /* We've already translated this value outside the loop. */
2839 index = info->subscript[dim]->info->data.scalar.value;
2840
2841 index = trans_array_bound_check (se, ss, index, dim, &ar->where,
2842 ar->as->type != AS_ASSUMED_SIZE
2843 || dim < ar->dimen - 1);
2844 break;
2845
2846 case DIMEN_VECTOR:
2847 gcc_assert (info && se->loop);
2848 gcc_assert (info->subscript[dim]
2849 && info->subscript[dim]->info->type == GFC_SS_VECTOR);
2850 desc = info->subscript[dim]->info->data.array.descriptor;
2851
2852 /* Get a zero-based index into the vector. */
2853 index = fold_build2_loc (input_location, MINUS_EXPR,
2854 gfc_array_index_type,
2855 se->loop->loopvar[i], se->loop->from[i]);
2856
2857 /* Multiply the index by the stride. */
2858 index = fold_build2_loc (input_location, MULT_EXPR,
2859 gfc_array_index_type,
2860 index, gfc_conv_array_stride (desc, 0));
2861
2862 /* Read the vector to get an index into info->descriptor. */
2863 data = build_fold_indirect_ref_loc (input_location,
2864 gfc_conv_array_data (desc));
2865 index = gfc_build_array_ref (data, index, NULL);
2866 index = gfc_evaluate_now (index, &se->pre);
2867 index = fold_convert (gfc_array_index_type, index);
2868
2869 /* Do any bounds checking on the final info->descriptor index. */
2870 index = trans_array_bound_check (se, ss, index, dim, &ar->where,
2871 ar->as->type != AS_ASSUMED_SIZE
2872 || dim < ar->dimen - 1);
2873 break;
2874
2875 case DIMEN_RANGE:
2876 /* Scalarized dimension. */
2877 gcc_assert (info && se->loop);
2878
2879 /* Multiply the loop variable by the stride and delta. */
2880 index = se->loop->loopvar[i];
2881 if (!integer_onep (info->stride[dim]))
2882 index = fold_build2_loc (input_location, MULT_EXPR,
2883 gfc_array_index_type, index,
2884 info->stride[dim]);
2885 if (!integer_zerop (info->delta[dim]))
2886 index = fold_build2_loc (input_location, PLUS_EXPR,
2887 gfc_array_index_type, index,
2888 info->delta[dim]);
2889 break;
2890
2891 default:
2892 gcc_unreachable ();
2893 }
2894 }
2895 else
2896 {
2897 /* Temporary array or derived type component. */
2898 gcc_assert (se->loop);
2899 index = se->loop->loopvar[se->loop->order[i]];
2900
2901 /* Pointer functions can have stride[0] different from unity.
2902 Use the stride returned by the function call and stored in
2903 the descriptor for the temporary. */
2904 if (se->ss && se->ss->info->type == GFC_SS_FUNCTION
2905 && se->ss->info->expr
2906 && se->ss->info->expr->symtree
2907 && se->ss->info->expr->symtree->n.sym->result
2908 && se->ss->info->expr->symtree->n.sym->result->attr.pointer)
2909 stride = gfc_conv_descriptor_stride_get (info->descriptor,
2910 gfc_rank_cst[dim]);
2911
2912 if (!integer_zerop (info->delta[dim]))
2913 index = fold_build2_loc (input_location, PLUS_EXPR,
2914 gfc_array_index_type, index, info->delta[dim]);
2915 }
2916
2917 /* Multiply by the stride. */
2918 if (!integer_onep (stride))
2919 index = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
2920 index, stride);
2921
2922 return index;
2923 }
2924
2925
2926 /* Build a scalarized array reference using the vptr 'size'. */
2927
2928 static bool
2929 build_class_array_ref (gfc_se *se, tree base, tree index)
2930 {
2931 tree type;
2932 tree size;
2933 tree offset;
2934 tree decl;
2935 tree tmp;
2936 gfc_expr *expr = se->ss->info->expr;
2937 gfc_ref *ref;
2938 gfc_ref *class_ref;
2939 gfc_typespec *ts;
2940
2941 if (expr == NULL || expr->ts.type != BT_CLASS)
2942 return false;
2943
2944 if (expr->symtree && expr->symtree->n.sym->ts.type == BT_CLASS)
2945 ts = &expr->symtree->n.sym->ts;
2946 else
2947 ts = NULL;
2948 class_ref = NULL;
2949
2950 for (ref = expr->ref; ref; ref = ref->next)
2951 {
2952 if (ref->type == REF_COMPONENT
2953 && ref->u.c.component->ts.type == BT_CLASS
2954 && ref->next && ref->next->type == REF_COMPONENT
2955 && strcmp (ref->next->u.c.component->name, "_data") == 0
2956 && ref->next->next
2957 && ref->next->next->type == REF_ARRAY
2958 && ref->next->next->u.ar.type != AR_ELEMENT)
2959 {
2960 ts = &ref->u.c.component->ts;
2961 class_ref = ref;
2962 break;
2963 }
2964 }
2965
2966 if (ts == NULL)
2967 return false;
2968
2969 if (class_ref == NULL)
2970 decl = expr->symtree->n.sym->backend_decl;
2971 else
2972 {
2973 /* Remove everything after the last class reference, convert the
2974 expression and then recover its tailend once more. */
2975 gfc_se tmpse;
2976 ref = class_ref->next;
2977 class_ref->next = NULL;
2978 gfc_init_se (&tmpse, NULL);
2979 gfc_conv_expr (&tmpse, expr);
2980 decl = tmpse.expr;
2981 class_ref->next = ref;
2982 }
2983
2984 size = gfc_vtable_size_get (decl);
2985
2986 /* Build the address of the element. */
2987 type = TREE_TYPE (TREE_TYPE (base));
2988 size = fold_convert (TREE_TYPE (index), size);
2989 offset = fold_build2_loc (input_location, MULT_EXPR,
2990 gfc_array_index_type,
2991 index, size);
2992 tmp = gfc_build_addr_expr (pvoid_type_node, base);
2993 tmp = fold_build_pointer_plus_loc (input_location, tmp, offset);
2994 tmp = fold_convert (build_pointer_type (type), tmp);
2995
2996 /* Return the element in the se expression. */
2997 se->expr = build_fold_indirect_ref_loc (input_location, tmp);
2998 return true;
2999 }
3000
3001
3002 /* Build a scalarized reference to an array. */
3003
3004 static void
3005 gfc_conv_scalarized_array_ref (gfc_se * se, gfc_array_ref * ar)
3006 {
3007 gfc_array_info *info;
3008 tree decl = NULL_TREE;
3009 tree index;
3010 tree tmp;
3011 gfc_ss *ss;
3012 gfc_expr *expr;
3013 int n;
3014
3015 ss = se->ss;
3016 expr = ss->info->expr;
3017 info = &ss->info->data.array;
3018 if (ar)
3019 n = se->loop->order[0];
3020 else
3021 n = 0;
3022
3023 index = conv_array_index_offset (se, ss, ss->dim[n], n, ar, info->stride0);
3024 /* Add the offset for this dimension to the stored offset for all other
3025 dimensions. */
3026 if (!integer_zerop (info->offset))
3027 index = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
3028 index, info->offset);
3029
3030 if (expr && is_subref_array (expr))
3031 decl = expr->symtree->n.sym->backend_decl;
3032
3033 tmp = build_fold_indirect_ref_loc (input_location, info->data);
3034
3035 /* Use the vptr 'size' field to access a class the element of a class
3036 array. */
3037 if (build_class_array_ref (se, tmp, index))
3038 return;
3039
3040 se->expr = gfc_build_array_ref (tmp, index, decl);
3041 }
3042
3043
3044 /* Translate access of temporary array. */
3045
3046 void
3047 gfc_conv_tmp_array_ref (gfc_se * se)
3048 {
3049 se->string_length = se->ss->info->string_length;
3050 gfc_conv_scalarized_array_ref (se, NULL);
3051 gfc_advance_se_ss_chain (se);
3052 }
3053
3054 /* Add T to the offset pair *OFFSET, *CST_OFFSET. */
3055
3056 static void
3057 add_to_offset (tree *cst_offset, tree *offset, tree t)
3058 {
3059 if (TREE_CODE (t) == INTEGER_CST)
3060 *cst_offset = int_const_binop (PLUS_EXPR, *cst_offset, t);
3061 else
3062 {
3063 if (!integer_zerop (*offset))
3064 *offset = fold_build2_loc (input_location, PLUS_EXPR,
3065 gfc_array_index_type, *offset, t);
3066 else
3067 *offset = t;
3068 }
3069 }
3070
3071 /* Build an array reference. se->expr already holds the array descriptor.
3072 This should be either a variable, indirect variable reference or component
3073 reference. For arrays which do not have a descriptor, se->expr will be
3074 the data pointer.
3075 a(i, j, k) = base[offset + i * stride[0] + j * stride[1] + k * stride[2]]*/
3076
3077 void
3078 gfc_conv_array_ref (gfc_se * se, gfc_array_ref * ar, gfc_symbol * sym,
3079 locus * where)
3080 {
3081 int n;
3082 tree offset, cst_offset;
3083 tree tmp;
3084 tree stride;
3085 gfc_se indexse;
3086 gfc_se tmpse;
3087
3088 if (ar->dimen == 0)
3089 {
3090 gcc_assert (ar->codimen);
3091
3092 if (GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (se->expr)))
3093 se->expr = build_fold_indirect_ref (gfc_conv_array_data (se->expr));
3094 else
3095 {
3096 if (GFC_ARRAY_TYPE_P (TREE_TYPE (se->expr))
3097 && TREE_CODE (TREE_TYPE (se->expr)) == POINTER_TYPE)
3098 se->expr = build_fold_indirect_ref_loc (input_location, se->expr);
3099
3100 /* Use the actual tree type and not the wrapped coarray. */
3101 if (!se->want_pointer)
3102 se->expr = fold_convert (TYPE_MAIN_VARIANT (TREE_TYPE (se->expr)),
3103 se->expr);
3104 }
3105
3106 return;
3107 }
3108
3109 /* Handle scalarized references separately. */
3110 if (ar->type != AR_ELEMENT)
3111 {
3112 gfc_conv_scalarized_array_ref (se, ar);
3113 gfc_advance_se_ss_chain (se);
3114 return;
3115 }
3116
3117 cst_offset = offset = gfc_index_zero_node;
3118 add_to_offset (&cst_offset, &offset, gfc_conv_array_offset (se->expr));
3119
3120 /* Calculate the offsets from all the dimensions. Make sure to associate
3121 the final offset so that we form a chain of loop invariant summands. */
3122 for (n = ar->dimen - 1; n >= 0; n--)
3123 {
3124 /* Calculate the index for this dimension. */
3125 gfc_init_se (&indexse, se);
3126 gfc_conv_expr_type (&indexse, ar->start[n], gfc_array_index_type);
3127 gfc_add_block_to_block (&se->pre, &indexse.pre);
3128
3129 if (gfc_option.rtcheck & GFC_RTCHECK_BOUNDS)
3130 {
3131 /* Check array bounds. */
3132 tree cond;
3133 char *msg;
3134
3135 /* Evaluate the indexse.expr only once. */
3136 indexse.expr = save_expr (indexse.expr);
3137
3138 /* Lower bound. */
3139 tmp = gfc_conv_array_lbound (se->expr, n);
3140 if (sym->attr.temporary)
3141 {
3142 gfc_init_se (&tmpse, se);
3143 gfc_conv_expr_type (&tmpse, ar->as->lower[n],
3144 gfc_array_index_type);
3145 gfc_add_block_to_block (&se->pre, &tmpse.pre);
3146 tmp = tmpse.expr;
3147 }
3148
3149 cond = fold_build2_loc (input_location, LT_EXPR, boolean_type_node,
3150 indexse.expr, tmp);
3151 asprintf (&msg, "Index '%%ld' of dimension %d of array '%s' "
3152 "below lower bound of %%ld", n+1, sym->name);
3153 gfc_trans_runtime_check (true, false, cond, &se->pre, where, msg,
3154 fold_convert (long_integer_type_node,
3155 indexse.expr),
3156 fold_convert (long_integer_type_node, tmp));
3157 free (msg);
3158
3159 /* Upper bound, but not for the last dimension of assumed-size
3160 arrays. */
3161 if (n < ar->dimen - 1 || ar->as->type != AS_ASSUMED_SIZE)
3162 {
3163 tmp = gfc_conv_array_ubound (se->expr, n);
3164 if (sym->attr.temporary)
3165 {
3166 gfc_init_se (&tmpse, se);
3167 gfc_conv_expr_type (&tmpse, ar->as->upper[n],
3168 gfc_array_index_type);
3169 gfc_add_block_to_block (&se->pre, &tmpse.pre);
3170 tmp = tmpse.expr;
3171 }
3172
3173 cond = fold_build2_loc (input_location, GT_EXPR,
3174 boolean_type_node, indexse.expr, tmp);
3175 asprintf (&msg, "Index '%%ld' of dimension %d of array '%s' "
3176 "above upper bound of %%ld", n+1, sym->name);
3177 gfc_trans_runtime_check (true, false, cond, &se->pre, where, msg,
3178 fold_convert (long_integer_type_node,
3179 indexse.expr),
3180 fold_convert (long_integer_type_node, tmp));
3181 free (msg);
3182 }
3183 }
3184
3185 /* Multiply the index by the stride. */
3186 stride = gfc_conv_array_stride (se->expr, n);
3187 tmp = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
3188 indexse.expr, stride);
3189
3190 /* And add it to the total. */
3191 add_to_offset (&cst_offset, &offset, tmp);
3192 }
3193
3194 if (!integer_zerop (cst_offset))
3195 offset = fold_build2_loc (input_location, PLUS_EXPR,
3196 gfc_array_index_type, offset, cst_offset);
3197
3198 /* Access the calculated element. */
3199 tmp = gfc_conv_array_data (se->expr);
3200 tmp = build_fold_indirect_ref (tmp);
3201 se->expr = gfc_build_array_ref (tmp, offset, sym->backend_decl);
3202 }
3203
3204
3205 /* Add the offset corresponding to array's ARRAY_DIM dimension and loop's
3206 LOOP_DIM dimension (if any) to array's offset. */
3207
3208 static void
3209 add_array_offset (stmtblock_t *pblock, gfc_loopinfo *loop, gfc_ss *ss,
3210 gfc_array_ref *ar, int array_dim, int loop_dim)
3211 {
3212 gfc_se se;
3213 gfc_array_info *info;
3214 tree stride, index;
3215
3216 info = &ss->info->data.array;
3217
3218 gfc_init_se (&se, NULL);
3219 se.loop = loop;
3220 se.expr = info->descriptor;
3221 stride = gfc_conv_array_stride (info->descriptor, array_dim);
3222 index = conv_array_index_offset (&se, ss, array_dim, loop_dim, ar, stride);
3223 gfc_add_block_to_block (pblock, &se.pre);
3224
3225 info->offset = fold_build2_loc (input_location, PLUS_EXPR,
3226 gfc_array_index_type,
3227 info->offset, index);
3228 info->offset = gfc_evaluate_now (info->offset, pblock);
3229 }
3230
3231
3232 /* Generate the code to be executed immediately before entering a
3233 scalarization loop. */
3234
3235 static void
3236 gfc_trans_preloop_setup (gfc_loopinfo * loop, int dim, int flag,
3237 stmtblock_t * pblock)
3238 {
3239 tree stride;
3240 gfc_ss_info *ss_info;
3241 gfc_array_info *info;
3242 gfc_ss_type ss_type;
3243 gfc_ss *ss, *pss;
3244 gfc_loopinfo *ploop;
3245 gfc_array_ref *ar;
3246 int i;
3247
3248 /* This code will be executed before entering the scalarization loop
3249 for this dimension. */
3250 for (ss = loop->ss; ss != gfc_ss_terminator; ss = ss->loop_chain)
3251 {
3252 ss_info = ss->info;
3253
3254 if ((ss_info->useflags & flag) == 0)
3255 continue;
3256
3257 ss_type = ss_info->type;
3258 if (ss_type != GFC_SS_SECTION
3259 && ss_type != GFC_SS_FUNCTION
3260 && ss_type != GFC_SS_CONSTRUCTOR
3261 && ss_type != GFC_SS_COMPONENT)
3262 continue;
3263
3264 info = &ss_info->data.array;
3265
3266 gcc_assert (dim < ss->dimen);
3267 gcc_assert (ss->dimen == loop->dimen);
3268
3269 if (info->ref)
3270 ar = &info->ref->u.ar;
3271 else
3272 ar = NULL;
3273
3274 if (dim == loop->dimen - 1 && loop->parent != NULL)
3275 {
3276 /* If we are in the outermost dimension of this loop, the previous
3277 dimension shall be in the parent loop. */
3278 gcc_assert (ss->parent != NULL);
3279
3280 pss = ss->parent;
3281 ploop = loop->parent;
3282
3283 /* ss and ss->parent are about the same array. */
3284 gcc_assert (ss_info == pss->info);
3285 }
3286 else
3287 {
3288 ploop = loop;
3289 pss = ss;
3290 }
3291
3292 if (dim == loop->dimen - 1)
3293 i = 0;
3294 else
3295 i = dim + 1;
3296
3297 /* For the time being, there is no loop reordering. */
3298 gcc_assert (i == ploop->order[i]);
3299 i = ploop->order[i];
3300
3301 if (dim == loop->dimen - 1 && loop->parent == NULL)
3302 {
3303 stride = gfc_conv_array_stride (info->descriptor,
3304 innermost_ss (ss)->dim[i]);
3305
3306 /* Calculate the stride of the innermost loop. Hopefully this will
3307 allow the backend optimizers to do their stuff more effectively.
3308 */
3309 info->stride0 = gfc_evaluate_now (stride, pblock);
3310
3311 /* For the outermost loop calculate the offset due to any
3312 elemental dimensions. It will have been initialized with the
3313 base offset of the array. */
3314 if (info->ref)
3315 {
3316 for (i = 0; i < ar->dimen; i++)
3317 {
3318 if (ar->dimen_type[i] != DIMEN_ELEMENT)
3319 continue;
3320
3321 add_array_offset (pblock, loop, ss, ar, i, /* unused */ -1);
3322 }
3323 }
3324 }
3325 else
3326 /* Add the offset for the previous loop dimension. */
3327 add_array_offset (pblock, ploop, ss, ar, pss->dim[i], i);
3328
3329 /* Remember this offset for the second loop. */
3330 if (dim == loop->temp_dim - 1 && loop->parent == NULL)
3331 info->saved_offset = info->offset;
3332 }
3333 }
3334
3335
3336 /* Start a scalarized expression. Creates a scope and declares loop
3337 variables. */
3338
3339 void
3340 gfc_start_scalarized_body (gfc_loopinfo * loop, stmtblock_t * pbody)
3341 {
3342 int dim;
3343 int n;
3344 int flags;
3345
3346 gcc_assert (!loop->array_parameter);
3347
3348 for (dim = loop->dimen - 1; dim >= 0; dim--)
3349 {
3350 n = loop->order[dim];
3351
3352 gfc_start_block (&loop->code[n]);
3353
3354 /* Create the loop variable. */
3355 loop->loopvar[n] = gfc_create_var (gfc_array_index_type, "S");
3356
3357 if (dim < loop->temp_dim)
3358 flags = 3;
3359 else
3360 flags = 1;
3361 /* Calculate values that will be constant within this loop. */
3362 gfc_trans_preloop_setup (loop, dim, flags, &loop->code[n]);
3363 }
3364 gfc_start_block (pbody);
3365 }
3366
3367
3368 /* Generates the actual loop code for a scalarization loop. */
3369
3370 void
3371 gfc_trans_scalarized_loop_end (gfc_loopinfo * loop, int n,
3372 stmtblock_t * pbody)
3373 {
3374 stmtblock_t block;
3375 tree cond;
3376 tree tmp;
3377 tree loopbody;
3378 tree exit_label;
3379 tree stmt;
3380 tree init;
3381 tree incr;
3382
3383 if ((ompws_flags & (OMPWS_WORKSHARE_FLAG | OMPWS_SCALARIZER_WS))
3384 == (OMPWS_WORKSHARE_FLAG | OMPWS_SCALARIZER_WS)
3385 && n == loop->dimen - 1)
3386 {
3387 /* We create an OMP_FOR construct for the outermost scalarized loop. */
3388 init = make_tree_vec (1);
3389 cond = make_tree_vec (1);
3390 incr = make_tree_vec (1);
3391
3392 /* Cycle statement is implemented with a goto. Exit statement must not
3393 be present for this loop. */
3394 exit_label = gfc_build_label_decl (NULL_TREE);
3395 TREE_USED (exit_label) = 1;
3396
3397 /* Label for cycle statements (if needed). */
3398 tmp = build1_v (LABEL_EXPR, exit_label);
3399 gfc_add_expr_to_block (pbody, tmp);
3400
3401 stmt = make_node (OMP_FOR);
3402
3403 TREE_TYPE (stmt) = void_type_node;
3404 OMP_FOR_BODY (stmt) = loopbody = gfc_finish_block (pbody);
3405
3406 OMP_FOR_CLAUSES (stmt) = build_omp_clause (input_location,
3407 OMP_CLAUSE_SCHEDULE);
3408 OMP_CLAUSE_SCHEDULE_KIND (OMP_FOR_CLAUSES (stmt))
3409 = OMP_CLAUSE_SCHEDULE_STATIC;
3410 if (ompws_flags & OMPWS_NOWAIT)
3411 OMP_CLAUSE_CHAIN (OMP_FOR_CLAUSES (stmt))
3412 = build_omp_clause (input_location, OMP_CLAUSE_NOWAIT);
3413
3414 /* Initialize the loopvar. */
3415 TREE_VEC_ELT (init, 0) = build2_v (MODIFY_EXPR, loop->loopvar[n],
3416 loop->from[n]);
3417 OMP_FOR_INIT (stmt) = init;
3418 /* The exit condition. */
3419 TREE_VEC_ELT (cond, 0) = build2_loc (input_location, LE_EXPR,
3420 boolean_type_node,
3421 loop->loopvar[n], loop->to[n]);
3422 SET_EXPR_LOCATION (TREE_VEC_ELT (cond, 0), input_location);
3423 OMP_FOR_COND (stmt) = cond;
3424 /* Increment the loopvar. */
3425 tmp = build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
3426 loop->loopvar[n], gfc_index_one_node);
3427 TREE_VEC_ELT (incr, 0) = fold_build2_loc (input_location, MODIFY_EXPR,
3428 void_type_node, loop->loopvar[n], tmp);
3429 OMP_FOR_INCR (stmt) = incr;
3430
3431 ompws_flags &= ~OMPWS_CURR_SINGLEUNIT;
3432 gfc_add_expr_to_block (&loop->code[n], stmt);
3433 }
3434 else
3435 {
3436 bool reverse_loop = (loop->reverse[n] == GFC_REVERSE_SET)
3437 && (loop->temp_ss == NULL);
3438
3439 loopbody = gfc_finish_block (pbody);
3440
3441 if (reverse_loop)
3442 {
3443 tmp = loop->from[n];
3444 loop->from[n] = loop->to[n];
3445 loop->to[n] = tmp;
3446 }
3447
3448 /* Initialize the loopvar. */
3449 if (loop->loopvar[n] != loop->from[n])
3450 gfc_add_modify (&loop->code[n], loop->loopvar[n], loop->from[n]);
3451
3452 exit_label = gfc_build_label_decl (NULL_TREE);
3453
3454 /* Generate the loop body. */
3455 gfc_init_block (&block);
3456
3457 /* The exit condition. */
3458 cond = fold_build2_loc (input_location, reverse_loop ? LT_EXPR : GT_EXPR,
3459 boolean_type_node, loop->loopvar[n], loop->to[n]);
3460 tmp = build1_v (GOTO_EXPR, exit_label);
3461 TREE_USED (exit_label) = 1;
3462 tmp = build3_v (COND_EXPR, cond, tmp, build_empty_stmt (input_location));
3463 gfc_add_expr_to_block (&block, tmp);
3464
3465 /* The main body. */
3466 gfc_add_expr_to_block (&block, loopbody);
3467
3468 /* Increment the loopvar. */
3469 tmp = fold_build2_loc (input_location,
3470 reverse_loop ? MINUS_EXPR : PLUS_EXPR,
3471 gfc_array_index_type, loop->loopvar[n],
3472 gfc_index_one_node);
3473
3474 gfc_add_modify (&block, loop->loopvar[n], tmp);
3475
3476 /* Build the loop. */
3477 tmp = gfc_finish_block (&block);
3478 tmp = build1_v (LOOP_EXPR, tmp);
3479 gfc_add_expr_to_block (&loop->code[n], tmp);
3480
3481 /* Add the exit label. */
3482 tmp = build1_v (LABEL_EXPR, exit_label);
3483 gfc_add_expr_to_block (&loop->code[n], tmp);
3484 }
3485
3486 }
3487
3488
3489 /* Finishes and generates the loops for a scalarized expression. */
3490
3491 void
3492 gfc_trans_scalarizing_loops (gfc_loopinfo * loop, stmtblock_t * body)
3493 {
3494 int dim;
3495 int n;
3496 gfc_ss *ss;
3497 stmtblock_t *pblock;
3498 tree tmp;
3499
3500 pblock = body;
3501 /* Generate the loops. */
3502 for (dim = 0; dim < loop->dimen; dim++)
3503 {
3504 n = loop->order[dim];
3505 gfc_trans_scalarized_loop_end (loop, n, pblock);
3506 loop->loopvar[n] = NULL_TREE;
3507 pblock = &loop->code[n];
3508 }
3509
3510 tmp = gfc_finish_block (pblock);
3511 gfc_add_expr_to_block (&loop->pre, tmp);
3512
3513 /* Clear all the used flags. */
3514 for (ss = loop->ss; ss != gfc_ss_terminator; ss = ss->loop_chain)
3515 if (ss->parent == NULL)
3516 ss->info->useflags = 0;
3517 }
3518
3519
3520 /* Finish the main body of a scalarized expression, and start the secondary
3521 copying body. */
3522
3523 void
3524 gfc_trans_scalarized_loop_boundary (gfc_loopinfo * loop, stmtblock_t * body)
3525 {
3526 int dim;
3527 int n;
3528 stmtblock_t *pblock;
3529 gfc_ss *ss;
3530
3531 pblock = body;
3532 /* We finish as many loops as are used by the temporary. */
3533 for (dim = 0; dim < loop->temp_dim - 1; dim++)
3534 {
3535 n = loop->order[dim];
3536 gfc_trans_scalarized_loop_end (loop, n, pblock);
3537 loop->loopvar[n] = NULL_TREE;
3538 pblock = &loop->code[n];
3539 }
3540
3541 /* We don't want to finish the outermost loop entirely. */
3542 n = loop->order[loop->temp_dim - 1];
3543 gfc_trans_scalarized_loop_end (loop, n, pblock);
3544
3545 /* Restore the initial offsets. */
3546 for (ss = loop->ss; ss != gfc_ss_terminator; ss = ss->loop_chain)
3547 {
3548 gfc_ss_type ss_type;
3549 gfc_ss_info *ss_info;
3550
3551 ss_info = ss->info;
3552
3553 if ((ss_info->useflags & 2) == 0)
3554 continue;
3555
3556 ss_type = ss_info->type;
3557 if (ss_type != GFC_SS_SECTION
3558 && ss_type != GFC_SS_FUNCTION
3559 && ss_type != GFC_SS_CONSTRUCTOR
3560 && ss_type != GFC_SS_COMPONENT)
3561 continue;
3562
3563 ss_info->data.array.offset = ss_info->data.array.saved_offset;
3564 }
3565
3566 /* Restart all the inner loops we just finished. */
3567 for (dim = loop->temp_dim - 2; dim >= 0; dim--)
3568 {
3569 n = loop->order[dim];
3570
3571 gfc_start_block (&loop->code[n]);
3572
3573 loop->loopvar[n] = gfc_create_var (gfc_array_index_type, "Q");
3574
3575 gfc_trans_preloop_setup (loop, dim, 2, &loop->code[n]);
3576 }
3577
3578 /* Start a block for the secondary copying code. */
3579 gfc_start_block (body);
3580 }
3581
3582
3583 /* Precalculate (either lower or upper) bound of an array section.
3584 BLOCK: Block in which the (pre)calculation code will go.
3585 BOUNDS[DIM]: Where the bound value will be stored once evaluated.
3586 VALUES[DIM]: Specified bound (NULL <=> unspecified).
3587 DESC: Array descriptor from which the bound will be picked if unspecified
3588 (either lower or upper bound according to LBOUND). */
3589
3590 static void
3591 evaluate_bound (stmtblock_t *block, tree *bounds, gfc_expr ** values,
3592 tree desc, int dim, bool lbound)
3593 {
3594 gfc_se se;
3595 gfc_expr * input_val = values[dim];
3596 tree *output = &bounds[dim];
3597
3598
3599 if (input_val)
3600 {
3601 /* Specified section bound. */
3602 gfc_init_se (&se, NULL);
3603 gfc_conv_expr_type (&se, input_val, gfc_array_index_type);
3604 gfc_add_block_to_block (block, &se.pre);
3605 *output = se.expr;
3606 }
3607 else
3608 {
3609 /* No specific bound specified so use the bound of the array. */
3610 *output = lbound ? gfc_conv_array_lbound (desc, dim) :
3611 gfc_conv_array_ubound (desc, dim);
3612 }
3613 *output = gfc_evaluate_now (*output, block);
3614 }
3615
3616
3617 /* Calculate the lower bound of an array section. */
3618
3619 static void
3620 gfc_conv_section_startstride (gfc_loopinfo * loop, gfc_ss * ss, int dim)
3621 {
3622 gfc_expr *stride = NULL;
3623 tree desc;
3624 gfc_se se;
3625 gfc_array_info *info;
3626 gfc_array_ref *ar;
3627
3628 gcc_assert (ss->info->type == GFC_SS_SECTION);
3629
3630 info = &ss->info->data.array;
3631 ar = &info->ref->u.ar;
3632
3633 if (ar->dimen_type[dim] == DIMEN_VECTOR)
3634 {
3635 /* We use a zero-based index to access the vector. */
3636 info->start[dim] = gfc_index_zero_node;
3637 info->end[dim] = NULL;
3638 info->stride[dim] = gfc_index_one_node;
3639 return;
3640 }
3641
3642 gcc_assert (ar->dimen_type[dim] == DIMEN_RANGE
3643 || ar->dimen_type[dim] == DIMEN_THIS_IMAGE);
3644 desc = info->descriptor;
3645 stride = ar->stride[dim];
3646
3647 /* Calculate the start of the range. For vector subscripts this will
3648 be the range of the vector. */
3649 evaluate_bound (&loop->pre, info->start, ar->start, desc, dim, true);
3650
3651 /* Similarly calculate the end. Although this is not used in the
3652 scalarizer, it is needed when checking bounds and where the end
3653 is an expression with side-effects. */
3654 evaluate_bound (&loop->pre, info->end, ar->end, desc, dim, false);
3655
3656 /* Calculate the stride. */
3657 if (stride == NULL)
3658 info->stride[dim] = gfc_index_one_node;
3659 else
3660 {
3661 gfc_init_se (&se, NULL);
3662 gfc_conv_expr_type (&se, stride, gfc_array_index_type);
3663 gfc_add_block_to_block (&loop->pre, &se.pre);
3664 info->stride[dim] = gfc_evaluate_now (se.expr, &loop->pre);
3665 }
3666 }
3667
3668
3669 /* Calculates the range start and stride for a SS chain. Also gets the
3670 descriptor and data pointer. The range of vector subscripts is the size
3671 of the vector. Array bounds are also checked. */
3672
3673 void
3674 gfc_conv_ss_startstride (gfc_loopinfo * loop)
3675 {
3676 int n;
3677 tree tmp;
3678 gfc_ss *ss;
3679 tree desc;
3680
3681 loop->dimen = 0;
3682 /* Determine the rank of the loop. */
3683 for (ss = loop->ss; ss != gfc_ss_terminator; ss = ss->loop_chain)
3684 {
3685 switch (ss->info->type)
3686 {
3687 case GFC_SS_SECTION:
3688 case GFC_SS_CONSTRUCTOR:
3689 case GFC_SS_FUNCTION:
3690 case GFC_SS_COMPONENT:
3691 loop->dimen = ss->dimen;
3692 goto done;
3693
3694 /* As usual, lbound and ubound are exceptions!. */
3695 case GFC_SS_INTRINSIC:
3696 switch (ss->info->expr->value.function.isym->id)
3697 {
3698 case GFC_ISYM_LBOUND:
3699 case GFC_ISYM_UBOUND:
3700 case GFC_ISYM_LCOBOUND:
3701 case GFC_ISYM_UCOBOUND:
3702 case GFC_ISYM_THIS_IMAGE:
3703 loop->dimen = ss->dimen;
3704 goto done;
3705
3706 default:
3707 break;
3708 }
3709
3710 default:
3711 break;
3712 }
3713 }
3714
3715 /* We should have determined the rank of the expression by now. If
3716 not, that's bad news. */
3717 gcc_unreachable ();
3718
3719 done:
3720 /* Loop over all the SS in the chain. */
3721 for (ss = loop->ss; ss != gfc_ss_terminator; ss = ss->loop_chain)
3722 {
3723 gfc_ss_info *ss_info;
3724 gfc_array_info *info;
3725 gfc_expr *expr;
3726
3727 ss_info = ss->info;
3728 expr = ss_info->expr;
3729 info = &ss_info->data.array;
3730
3731 if (expr && expr->shape && !info->shape)
3732 info->shape = expr->shape;
3733
3734 switch (ss_info->type)
3735 {
3736 case GFC_SS_SECTION:
3737 /* Get the descriptor for the array. If it is a cross loops array,
3738 we got the descriptor already in the outermost loop. */
3739 if (ss->parent == NULL)
3740 gfc_conv_ss_descriptor (&loop->pre, ss, !loop->array_parameter);
3741
3742 for (n = 0; n < ss->dimen; n++)
3743 gfc_conv_section_startstride (loop, ss, ss->dim[n]);
3744 break;
3745
3746 case GFC_SS_INTRINSIC:
3747 switch (expr->value.function.isym->id)
3748 {
3749 /* Fall through to supply start and stride. */
3750 case GFC_ISYM_LBOUND:
3751 case GFC_ISYM_UBOUND:
3752 case GFC_ISYM_LCOBOUND:
3753 case GFC_ISYM_UCOBOUND:
3754 case GFC_ISYM_THIS_IMAGE:
3755 break;
3756
3757 default:
3758 continue;
3759 }
3760
3761 case GFC_SS_CONSTRUCTOR:
3762 case GFC_SS_FUNCTION:
3763 for (n = 0; n < ss->dimen; n++)
3764 {
3765 int dim = ss->dim[n];
3766
3767 info->start[dim] = gfc_index_zero_node;
3768 info->end[dim] = gfc_index_zero_node;
3769 info->stride[dim] = gfc_index_one_node;
3770 }
3771 break;
3772
3773 default:
3774 break;
3775 }
3776 }
3777
3778 /* The rest is just runtime bound checking. */
3779 if (gfc_option.rtcheck & GFC_RTCHECK_BOUNDS)
3780 {
3781 stmtblock_t block;
3782 tree lbound, ubound;
3783 tree end;
3784 tree size[GFC_MAX_DIMENSIONS];
3785 tree stride_pos, stride_neg, non_zerosized, tmp2, tmp3;
3786 gfc_array_info *info;
3787 char *msg;
3788 int dim;
3789
3790 gfc_start_block (&block);
3791
3792 for (n = 0; n < loop->dimen; n++)
3793 size[n] = NULL_TREE;
3794
3795 for (ss = loop->ss; ss != gfc_ss_terminator; ss = ss->loop_chain)
3796 {
3797 stmtblock_t inner;
3798 gfc_ss_info *ss_info;
3799 gfc_expr *expr;
3800 locus *expr_loc;
3801 const char *expr_name;
3802
3803 ss_info = ss->info;
3804 if (ss_info->type != GFC_SS_SECTION)
3805 continue;
3806
3807 /* Catch allocatable lhs in f2003. */
3808 if (gfc_option.flag_realloc_lhs && ss->is_alloc_lhs)
3809 continue;
3810
3811 expr = ss_info->expr;
3812 expr_loc = &expr->where;
3813 expr_name = expr->symtree->name;
3814
3815 gfc_start_block (&inner);
3816
3817 /* TODO: range checking for mapped dimensions. */
3818 info = &ss_info->data.array;
3819
3820 /* This code only checks ranges. Elemental and vector
3821 dimensions are checked later. */
3822 for (n = 0; n < loop->dimen; n++)
3823 {
3824 bool check_upper;
3825
3826 dim = ss->dim[n];
3827 if (info->ref->u.ar.dimen_type[dim] != DIMEN_RANGE)
3828 continue;
3829
3830 if (dim == info->ref->u.ar.dimen - 1
3831 && info->ref->u.ar.as->type == AS_ASSUMED_SIZE)
3832 check_upper = false;
3833 else
3834 check_upper = true;
3835
3836 /* Zero stride is not allowed. */
3837 tmp = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node,
3838 info->stride[dim], gfc_index_zero_node);
3839 asprintf (&msg, "Zero stride is not allowed, for dimension %d "
3840 "of array '%s'", dim + 1, expr_name);
3841 gfc_trans_runtime_check (true, false, tmp, &inner,
3842 expr_loc, msg);
3843 free (msg);
3844
3845 desc = info->descriptor;
3846
3847 /* This is the run-time equivalent of resolve.c's
3848 check_dimension(). The logical is more readable there
3849 than it is here, with all the trees. */
3850 lbound = gfc_conv_array_lbound (desc, dim);
3851 end = info->end[dim];
3852 if (check_upper)
3853 ubound = gfc_conv_array_ubound (desc, dim);
3854 else
3855 ubound = NULL;
3856
3857 /* non_zerosized is true when the selected range is not
3858 empty. */
3859 stride_pos = fold_build2_loc (input_location, GT_EXPR,
3860 boolean_type_node, info->stride[dim],
3861 gfc_index_zero_node);
3862 tmp = fold_build2_loc (input_location, LE_EXPR, boolean_type_node,
3863 info->start[dim], end);
3864 stride_pos = fold_build2_loc (input_location, TRUTH_AND_EXPR,
3865 boolean_type_node, stride_pos, tmp);
3866
3867 stride_neg = fold_build2_loc (input_location, LT_EXPR,
3868 boolean_type_node,
3869 info->stride[dim], gfc_index_zero_node);
3870 tmp = fold_build2_loc (input_location, GE_EXPR, boolean_type_node,
3871 info->start[dim], end);
3872 stride_neg = fold_build2_loc (input_location, TRUTH_AND_EXPR,
3873 boolean_type_node,
3874 stride_neg, tmp);
3875 non_zerosized = fold_build2_loc (input_location, TRUTH_OR_EXPR,
3876 boolean_type_node,
3877 stride_pos, stride_neg);
3878
3879 /* Check the start of the range against the lower and upper
3880 bounds of the array, if the range is not empty.
3881 If upper bound is present, include both bounds in the
3882 error message. */
3883 if (check_upper)
3884 {
3885 tmp = fold_build2_loc (input_location, LT_EXPR,
3886 boolean_type_node,
3887 info->start[dim], lbound);
3888 tmp = fold_build2_loc (input_location, TRUTH_AND_EXPR,
3889 boolean_type_node,
3890 non_zerosized, tmp);
3891 tmp2 = fold_build2_loc (input_location, GT_EXPR,
3892 boolean_type_node,
3893 info->start[dim], ubound);
3894 tmp2 = fold_build2_loc (input_location, TRUTH_AND_EXPR,
3895 boolean_type_node,
3896 non_zerosized, tmp2);
3897 asprintf (&msg, "Index '%%ld' of dimension %d of array '%s' "
3898 "outside of expected range (%%ld:%%ld)",
3899 dim + 1, expr_name);
3900 gfc_trans_runtime_check (true, false, tmp, &inner,
3901 expr_loc, msg,
3902 fold_convert (long_integer_type_node, info->start[dim]),
3903 fold_convert (long_integer_type_node, lbound),
3904 fold_convert (long_integer_type_node, ubound));
3905 gfc_trans_runtime_check (true, false, tmp2, &inner,
3906 expr_loc, msg,
3907 fold_convert (long_integer_type_node, info->start[dim]),
3908 fold_convert (long_integer_type_node, lbound),
3909 fold_convert (long_integer_type_node, ubound));
3910 free (msg);
3911 }
3912 else
3913 {
3914 tmp = fold_build2_loc (input_location, LT_EXPR,
3915 boolean_type_node,
3916 info->start[dim], lbound);
3917 tmp = fold_build2_loc (input_location, TRUTH_AND_EXPR,
3918 boolean_type_node, non_zerosized, tmp);
3919 asprintf (&msg, "Index '%%ld' of dimension %d of array '%s' "
3920 "below lower bound of %%ld",
3921 dim + 1, expr_name);
3922 gfc_trans_runtime_check (true, false, tmp, &inner,
3923 expr_loc, msg,
3924 fold_convert (long_integer_type_node, info->start[dim]),
3925 fold_convert (long_integer_type_node, lbound));
3926 free (msg);
3927 }
3928
3929 /* Compute the last element of the range, which is not
3930 necessarily "end" (think 0:5:3, which doesn't contain 5)
3931 and check it against both lower and upper bounds. */
3932
3933 tmp = fold_build2_loc (input_location, MINUS_EXPR,
3934 gfc_array_index_type, end,
3935 info->start[dim]);
3936 tmp = fold_build2_loc (input_location, TRUNC_MOD_EXPR,
3937 gfc_array_index_type, tmp,
3938 info->stride[dim]);
3939 tmp = fold_build2_loc (input_location, MINUS_EXPR,
3940 gfc_array_index_type, end, tmp);
3941 tmp2 = fold_build2_loc (input_location, LT_EXPR,
3942 boolean_type_node, tmp, lbound);
3943 tmp2 = fold_build2_loc (input_location, TRUTH_AND_EXPR,
3944 boolean_type_node, non_zerosized, tmp2);
3945 if (check_upper)
3946 {
3947 tmp3 = fold_build2_loc (input_location, GT_EXPR,
3948 boolean_type_node, tmp, ubound);
3949 tmp3 = fold_build2_loc (input_location, TRUTH_AND_EXPR,
3950 boolean_type_node, non_zerosized, tmp3);
3951 asprintf (&msg, "Index '%%ld' of dimension %d of array '%s' "
3952 "outside of expected range (%%ld:%%ld)",
3953 dim + 1, expr_name);
3954 gfc_trans_runtime_check (true, false, tmp2, &inner,
3955 expr_loc, msg,
3956 fold_convert (long_integer_type_node, tmp),
3957 fold_convert (long_integer_type_node, ubound),
3958 fold_convert (long_integer_type_node, lbound));
3959 gfc_trans_runtime_check (true, false, tmp3, &inner,
3960 expr_loc, msg,
3961 fold_convert (long_integer_type_node, tmp),
3962 fold_convert (long_integer_type_node, ubound),
3963 fold_convert (long_integer_type_node, lbound));
3964 free (msg);
3965 }
3966 else
3967 {
3968 asprintf (&msg, "Index '%%ld' of dimension %d of array '%s' "
3969 "below lower bound of %%ld",
3970 dim + 1, expr_name);
3971 gfc_trans_runtime_check (true, false, tmp2, &inner,
3972 expr_loc, msg,
3973 fold_convert (long_integer_type_node, tmp),
3974 fold_convert (long_integer_type_node, lbound));
3975 free (msg);
3976 }
3977
3978 /* Check the section sizes match. */
3979 tmp = fold_build2_loc (input_location, MINUS_EXPR,
3980 gfc_array_index_type, end,
3981 info->start[dim]);
3982 tmp = fold_build2_loc (input_location, FLOOR_DIV_EXPR,
3983 gfc_array_index_type, tmp,
3984 info->stride[dim]);
3985 tmp = fold_build2_loc (input_location, PLUS_EXPR,
3986 gfc_array_index_type,
3987 gfc_index_one_node, tmp);
3988 tmp = fold_build2_loc (input_location, MAX_EXPR,
3989 gfc_array_index_type, tmp,
3990 build_int_cst (gfc_array_index_type, 0));
3991 /* We remember the size of the first section, and check all the
3992 others against this. */
3993 if (size[n])
3994 {
3995 tmp3 = fold_build2_loc (input_location, NE_EXPR,
3996 boolean_type_node, tmp, size[n]);
3997 asprintf (&msg, "Array bound mismatch for dimension %d "
3998 "of array '%s' (%%ld/%%ld)",
3999 dim + 1, expr_name);
4000
4001 gfc_trans_runtime_check (true, false, tmp3, &inner,
4002 expr_loc, msg,
4003 fold_convert (long_integer_type_node, tmp),
4004 fold_convert (long_integer_type_node, size[n]));
4005
4006 free (msg);
4007 }
4008 else
4009 size[n] = gfc_evaluate_now (tmp, &inner);
4010 }
4011
4012 tmp = gfc_finish_block (&inner);
4013
4014 /* For optional arguments, only check bounds if the argument is
4015 present. */
4016 if (expr->symtree->n.sym->attr.optional
4017 || expr->symtree->n.sym->attr.not_always_present)
4018 tmp = build3_v (COND_EXPR,
4019 gfc_conv_expr_present (expr->symtree->n.sym),
4020 tmp, build_empty_stmt (input_location));
4021
4022 gfc_add_expr_to_block (&block, tmp);
4023
4024 }
4025
4026 tmp = gfc_finish_block (&block);
4027 gfc_add_expr_to_block (&loop->pre, tmp);
4028 }
4029
4030 for (loop = loop->nested; loop; loop = loop->next)
4031 gfc_conv_ss_startstride (loop);
4032 }
4033
4034 /* Return true if both symbols could refer to the same data object. Does
4035 not take account of aliasing due to equivalence statements. */
4036
4037 static int
4038 symbols_could_alias (gfc_symbol *lsym, gfc_symbol *rsym, bool lsym_pointer,
4039 bool lsym_target, bool rsym_pointer, bool rsym_target)
4040 {
4041 /* Aliasing isn't possible if the symbols have different base types. */
4042 if (gfc_compare_types (&lsym->ts, &rsym->ts) == 0)
4043 return 0;
4044
4045 /* Pointers can point to other pointers and target objects. */
4046
4047 if ((lsym_pointer && (rsym_pointer || rsym_target))
4048 || (rsym_pointer && (lsym_pointer || lsym_target)))
4049 return 1;
4050
4051 /* Special case: Argument association, cf. F90 12.4.1.6, F2003 12.4.1.7
4052 and F2008 12.5.2.13 items 3b and 4b. The pointer case (a) is already
4053 checked above. */
4054 if (lsym_target && rsym_target
4055 && ((lsym->attr.dummy && !lsym->attr.contiguous
4056 && (!lsym->attr.dimension || lsym->as->type == AS_ASSUMED_SHAPE))
4057 || (rsym->attr.dummy && !rsym->attr.contiguous
4058 && (!rsym->attr.dimension
4059 || rsym->as->type == AS_ASSUMED_SHAPE))))
4060 return 1;
4061
4062 return 0;
4063 }
4064
4065
4066 /* Return true if the two SS could be aliased, i.e. both point to the same data
4067 object. */
4068 /* TODO: resolve aliases based on frontend expressions. */
4069
4070 static int
4071 gfc_could_be_alias (gfc_ss * lss, gfc_ss * rss)
4072 {
4073 gfc_ref *lref;
4074 gfc_ref *rref;
4075 gfc_expr *lexpr, *rexpr;
4076 gfc_symbol *lsym;
4077 gfc_symbol *rsym;
4078 bool lsym_pointer, lsym_target, rsym_pointer, rsym_target;
4079
4080 lexpr = lss->info->expr;
4081 rexpr = rss->info->expr;
4082
4083 lsym = lexpr->symtree->n.sym;
4084 rsym = rexpr->symtree->n.sym;
4085
4086 lsym_pointer = lsym->attr.pointer;
4087 lsym_target = lsym->attr.target;
4088 rsym_pointer = rsym->attr.pointer;
4089 rsym_target = rsym->attr.target;
4090
4091 if (symbols_could_alias (lsym, rsym, lsym_pointer, lsym_target,
4092 rsym_pointer, rsym_target))
4093 return 1;
4094
4095 if (rsym->ts.type != BT_DERIVED && rsym->ts.type != BT_CLASS
4096 && lsym->ts.type != BT_DERIVED && lsym->ts.type != BT_CLASS)
4097 return 0;
4098
4099 /* For derived types we must check all the component types. We can ignore
4100 array references as these will have the same base type as the previous
4101 component ref. */
4102 for (lref = lexpr->ref; lref != lss->info->data.array.ref; lref = lref->next)
4103 {
4104 if (lref->type != REF_COMPONENT)
4105 continue;
4106
4107 lsym_pointer = lsym_pointer || lref->u.c.sym->attr.pointer;
4108 lsym_target = lsym_target || lref->u.c.sym->attr.target;
4109
4110 if (symbols_could_alias (lref->u.c.sym, rsym, lsym_pointer, lsym_target,
4111 rsym_pointer, rsym_target))
4112 return 1;
4113
4114 if ((lsym_pointer && (rsym_pointer || rsym_target))
4115 || (rsym_pointer && (lsym_pointer || lsym_target)))
4116 {
4117 if (gfc_compare_types (&lref->u.c.component->ts,
4118 &rsym->ts))
4119 return 1;
4120 }
4121
4122 for (rref = rexpr->ref; rref != rss->info->data.array.ref;
4123 rref = rref->next)
4124 {
4125 if (rref->type != REF_COMPONENT)
4126 continue;
4127
4128 rsym_pointer = rsym_pointer || rref->u.c.sym->attr.pointer;
4129 rsym_target = lsym_target || rref->u.c.sym->attr.target;
4130
4131 if (symbols_could_alias (lref->u.c.sym, rref->u.c.sym,
4132 lsym_pointer, lsym_target,
4133 rsym_pointer, rsym_target))
4134 return 1;
4135
4136 if ((lsym_pointer && (rsym_pointer || rsym_target))
4137 || (rsym_pointer && (lsym_pointer || lsym_target)))
4138 {
4139 if (gfc_compare_types (&lref->u.c.component->ts,
4140 &rref->u.c.sym->ts))
4141 return 1;
4142 if (gfc_compare_types (&lref->u.c.sym->ts,
4143 &rref->u.c.component->ts))
4144 return 1;
4145 if (gfc_compare_types (&lref->u.c.component->ts,
4146 &rref->u.c.component->ts))
4147 return 1;
4148 }
4149 }
4150 }
4151
4152 lsym_pointer = lsym->attr.pointer;
4153 lsym_target = lsym->attr.target;
4154 lsym_pointer = lsym->attr.pointer;
4155 lsym_target = lsym->attr.target;
4156
4157 for (rref = rexpr->ref; rref != rss->info->data.array.ref; rref = rref->next)
4158 {
4159 if (rref->type != REF_COMPONENT)
4160 break;
4161
4162 rsym_pointer = rsym_pointer || rref->u.c.sym->attr.pointer;
4163 rsym_target = lsym_target || rref->u.c.sym->attr.target;
4164
4165 if (symbols_could_alias (rref->u.c.sym, lsym,
4166 lsym_pointer, lsym_target,
4167 rsym_pointer, rsym_target))
4168 return 1;
4169
4170 if ((lsym_pointer && (rsym_pointer || rsym_target))
4171 || (rsym_pointer && (lsym_pointer || lsym_target)))
4172 {
4173 if (gfc_compare_types (&lsym->ts, &rref->u.c.component->ts))
4174 return 1;
4175 }
4176 }
4177
4178 return 0;
4179 }
4180
4181
4182 /* Resolve array data dependencies. Creates a temporary if required. */
4183 /* TODO: Calc dependencies with gfc_expr rather than gfc_ss, and move to
4184 dependency.c. */
4185
4186 void
4187 gfc_conv_resolve_dependencies (gfc_loopinfo * loop, gfc_ss * dest,
4188 gfc_ss * rss)
4189 {
4190 gfc_ss *ss;
4191 gfc_ref *lref;
4192 gfc_ref *rref;
4193 gfc_expr *dest_expr;
4194 gfc_expr *ss_expr;
4195 int nDepend = 0;
4196 int i, j;
4197
4198 loop->temp_ss = NULL;
4199 dest_expr = dest->info->expr;
4200
4201 for (ss = rss; ss != gfc_ss_terminator; ss = ss->next)
4202 {
4203 if (ss->info->type != GFC_SS_SECTION)
4204 continue;
4205
4206 ss_expr = ss->info->expr;
4207
4208 if (dest_expr->symtree->n.sym != ss_expr->symtree->n.sym)
4209 {
4210 if (gfc_could_be_alias (dest, ss)
4211 || gfc_are_equivalenced_arrays (dest_expr, ss_expr))
4212 {
4213 nDepend = 1;
4214 break;
4215 }
4216 }
4217 else
4218 {
4219 lref = dest_expr->ref;
4220 rref = ss_expr->ref;
4221
4222 nDepend = gfc_dep_resolver (lref, rref, &loop->reverse[0]);
4223
4224 if (nDepend == 1)
4225 break;
4226
4227 for (i = 0; i < dest->dimen; i++)
4228 for (j = 0; j < ss->dimen; j++)
4229 if (i != j
4230 && dest->dim[i] == ss->dim[j])
4231 {
4232 /* If we don't access array elements in the same order,
4233 there is a dependency. */
4234 nDepend = 1;
4235 goto temporary;
4236 }
4237 #if 0
4238 /* TODO : loop shifting. */
4239 if (nDepend == 1)
4240 {
4241 /* Mark the dimensions for LOOP SHIFTING */
4242 for (n = 0; n < loop->dimen; n++)
4243 {
4244 int dim = dest->data.info.dim[n];
4245
4246 if (lref->u.ar.dimen_type[dim] == DIMEN_VECTOR)
4247 depends[n] = 2;
4248 else if (! gfc_is_same_range (&lref->u.ar,
4249 &rref->u.ar, dim, 0))
4250 depends[n] = 1;
4251 }
4252
4253 /* Put all the dimensions with dependencies in the
4254 innermost loops. */
4255 dim = 0;
4256 for (n = 0; n < loop->dimen; n++)
4257 {
4258 gcc_assert (loop->order[n] == n);
4259 if (depends[n])
4260 loop->order[dim++] = n;
4261 }
4262 for (n = 0; n < loop->dimen; n++)
4263 {
4264 if (! depends[n])
4265 loop->order[dim++] = n;
4266 }
4267
4268 gcc_assert (dim == loop->dimen);
4269 break;
4270 }
4271 #endif
4272 }
4273 }
4274
4275 temporary:
4276
4277 if (nDepend == 1)
4278 {
4279 tree base_type = gfc_typenode_for_spec (&dest_expr->ts);
4280 if (GFC_ARRAY_TYPE_P (base_type)
4281 || GFC_DESCRIPTOR_TYPE_P (base_type))
4282 base_type = gfc_get_element_type (base_type);
4283 loop->temp_ss = gfc_get_temp_ss (base_type, dest->info->string_length,
4284 loop->dimen);
4285 gfc_add_ss_to_loop (loop, loop->temp_ss);
4286 }
4287 else
4288 loop->temp_ss = NULL;
4289 }
4290
4291
4292 /* Browse through each array's information from the scalarizer and set the loop
4293 bounds according to the "best" one (per dimension), i.e. the one which
4294 provides the most information (constant bounds, shape, etc). */
4295
4296 static void
4297 set_loop_bounds (gfc_loopinfo *loop)
4298 {
4299 int n, dim, spec_dim;
4300 gfc_array_info *info;
4301 gfc_array_info *specinfo;
4302 gfc_ss *ss;
4303 tree tmp;
4304 gfc_ss **loopspec;
4305 bool dynamic[GFC_MAX_DIMENSIONS];
4306 mpz_t *cshape;
4307 mpz_t i;
4308
4309 loopspec = loop->specloop;
4310
4311 mpz_init (i);
4312 for (n = 0; n < loop->dimen; n++)
4313 {
4314 loopspec[n] = NULL;
4315 dynamic[n] = false;
4316 /* We use one SS term, and use that to determine the bounds of the
4317 loop for this dimension. We try to pick the simplest term. */
4318 for (ss = loop->ss; ss != gfc_ss_terminator; ss = ss->loop_chain)
4319 {
4320 gfc_ss_type ss_type;
4321
4322 ss_type = ss->info->type;
4323 if (ss_type == GFC_SS_SCALAR
4324 || ss_type == GFC_SS_TEMP
4325 || ss_type == GFC_SS_REFERENCE)
4326 continue;
4327
4328 info = &ss->info->data.array;
4329 dim = ss->dim[n];
4330
4331 if (loopspec[n] != NULL)
4332 {
4333 specinfo = &loopspec[n]->info->data.array;
4334 spec_dim = loopspec[n]->dim[n];
4335 }
4336 else
4337 {
4338 /* Silence unitialized warnings. */
4339 specinfo = NULL;
4340 spec_dim = 0;
4341 }
4342
4343 if (info->shape)
4344 {
4345 gcc_assert (info->shape[dim]);
4346 /* The frontend has worked out the size for us. */
4347 if (!loopspec[n]
4348 || !specinfo->shape
4349 || !integer_zerop (specinfo->start[spec_dim]))
4350 /* Prefer zero-based descriptors if possible. */
4351 loopspec[n] = ss;
4352 continue;
4353 }
4354
4355 if (ss_type == GFC_SS_CONSTRUCTOR)
4356 {
4357 gfc_constructor_base base;
4358 /* An unknown size constructor will always be rank one.
4359 Higher rank constructors will either have known shape,
4360 or still be wrapped in a call to reshape. */
4361 gcc_assert (loop->dimen == 1);
4362
4363 /* Always prefer to use the constructor bounds if the size
4364 can be determined at compile time. Prefer not to otherwise,
4365 since the general case involves realloc, and it's better to
4366 avoid that overhead if possible. */
4367 base = ss->info->expr->value.constructor;
4368 dynamic[n] = gfc_get_array_constructor_size (&i, base);
4369 if (!dynamic[n] || !loopspec[n])
4370 loopspec[n] = ss;
4371 continue;
4372 }
4373
4374 /* TODO: Pick the best bound if we have a choice between a
4375 function and something else. */
4376 if (ss_type == GFC_SS_FUNCTION)
4377 {
4378 loopspec[n] = ss;
4379 continue;
4380 }
4381
4382 /* Avoid using an allocatable lhs in an assignment, since
4383 there might be a reallocation coming. */
4384 if (loopspec[n] && ss->is_alloc_lhs)
4385 continue;
4386
4387 if (ss_type != GFC_SS_SECTION)
4388 continue;
4389
4390 if (!loopspec[n])
4391 loopspec[n] = ss;
4392 /* Criteria for choosing a loop specifier (most important first):
4393 doesn't need realloc
4394 stride of one
4395 known stride
4396 known lower bound
4397 known upper bound
4398 */
4399 else if ((loopspec[n]->info->type == GFC_SS_CONSTRUCTOR && dynamic[n])
4400 || n >= loop->dimen)
4401 loopspec[n] = ss;
4402 else if (integer_onep (info->stride[dim])
4403 && !integer_onep (specinfo->stride[spec_dim]))
4404 loopspec[n] = ss;
4405 else if (INTEGER_CST_P (info->stride[dim])
4406 && !INTEGER_CST_P (specinfo->stride[spec_dim]))
4407 loopspec[n] = ss;
4408 else if (INTEGER_CST_P (info->start[dim])
4409 && !INTEGER_CST_P (specinfo->start[spec_dim]))
4410 loopspec[n] = ss;
4411 /* We don't work out the upper bound.
4412 else if (INTEGER_CST_P (info->finish[n])
4413 && ! INTEGER_CST_P (specinfo->finish[n]))
4414 loopspec[n] = ss; */
4415 }
4416
4417 /* We should have found the scalarization loop specifier. If not,
4418 that's bad news. */
4419 gcc_assert (loopspec[n]);
4420
4421 info = &loopspec[n]->info->data.array;
4422 dim = loopspec[n]->dim[n];
4423
4424 /* Set the extents of this range. */
4425 cshape = info->shape;
4426 if (cshape && INTEGER_CST_P (info->start[dim])
4427 && INTEGER_CST_P (info->stride[dim]))
4428 {
4429 loop->from[n] = info->start[dim];
4430 mpz_set (i, cshape[get_array_ref_dim_for_loop_dim (loopspec[n], n)]);
4431 mpz_sub_ui (i, i, 1);
4432 /* To = from + (size - 1) * stride. */
4433 tmp = gfc_conv_mpz_to_tree (i, gfc_index_integer_kind);
4434 if (!integer_onep (info->stride[dim]))
4435 tmp = fold_build2_loc (input_location, MULT_EXPR,
4436 gfc_array_index_type, tmp,
4437 info->stride[dim]);
4438 loop->to[n] = fold_build2_loc (input_location, PLUS_EXPR,
4439 gfc_array_index_type,
4440 loop->from[n], tmp);
4441 }
4442 else
4443 {
4444 loop->from[n] = info->start[dim];
4445 switch (loopspec[n]->info->type)
4446 {
4447 case GFC_SS_CONSTRUCTOR:
4448 /* The upper bound is calculated when we expand the
4449 constructor. */
4450 gcc_assert (loop->to[n] == NULL_TREE);
4451 break;
4452
4453 case GFC_SS_SECTION:
4454 /* Use the end expression if it exists and is not constant,
4455 so that it is only evaluated once. */
4456 loop->to[n] = info->end[dim];
4457 break;
4458
4459 case GFC_SS_FUNCTION:
4460 /* The loop bound will be set when we generate the call. */
4461 gcc_assert (loop->to[n] == NULL_TREE);
4462 break;
4463
4464 default:
4465 gcc_unreachable ();
4466 }
4467 }
4468
4469 /* Transform everything so we have a simple incrementing variable. */
4470 if (integer_onep (info->stride[dim]))
4471 info->delta[dim] = gfc_index_zero_node;
4472 else
4473 {
4474 /* Set the delta for this section. */
4475 info->delta[dim] = gfc_evaluate_now (loop->from[n], &loop->pre);
4476 /* Number of iterations is (end - start + step) / step.
4477 with start = 0, this simplifies to
4478 last = end / step;
4479 for (i = 0; i<=last; i++){...}; */
4480 tmp = fold_build2_loc (input_location, MINUS_EXPR,
4481 gfc_array_index_type, loop->to[n],
4482 loop->from[n]);
4483 tmp = fold_build2_loc (input_location, FLOOR_DIV_EXPR,
4484 gfc_array_index_type, tmp, info->stride[dim]);
4485 tmp = fold_build2_loc (input_location, MAX_EXPR, gfc_array_index_type,
4486 tmp, build_int_cst (gfc_array_index_type, -1));
4487 loop->to[n] = gfc_evaluate_now (tmp, &loop->pre);
4488 /* Make the loop variable start at 0. */
4489 loop->from[n] = gfc_index_zero_node;
4490 }
4491 }
4492 mpz_clear (i);
4493
4494 for (loop = loop->nested; loop; loop = loop->next)
4495 set_loop_bounds (loop);
4496 }
4497
4498
4499 /* Initialize the scalarization loop. Creates the loop variables. Determines
4500 the range of the loop variables. Creates a temporary if required.
4501 Also generates code for scalar expressions which have been
4502 moved outside the loop. */
4503
4504 void
4505 gfc_conv_loop_setup (gfc_loopinfo * loop, locus * where)
4506 {
4507 gfc_ss *tmp_ss;
4508 tree tmp;
4509
4510 set_loop_bounds (loop);
4511
4512 /* Add all the scalar code that can be taken out of the loops.
4513 This may include calculating the loop bounds, so do it before
4514 allocating the temporary. */
4515 gfc_add_loop_ss_code (loop, loop->ss, false, where);
4516
4517 tmp_ss = loop->temp_ss;
4518 /* If we want a temporary then create it. */
4519 if (tmp_ss != NULL)
4520 {
4521 gfc_ss_info *tmp_ss_info;
4522
4523 tmp_ss_info = tmp_ss->info;
4524 gcc_assert (tmp_ss_info->type == GFC_SS_TEMP);
4525 gcc_assert (loop->parent == NULL);
4526
4527 /* Make absolutely sure that this is a complete type. */
4528 if (tmp_ss_info->string_length)
4529 tmp_ss_info->data.temp.type
4530 = gfc_get_character_type_len_for_eltype
4531 (TREE_TYPE (tmp_ss_info->data.temp.type),
4532 tmp_ss_info->string_length);
4533
4534 tmp = tmp_ss_info->data.temp.type;
4535 memset (&tmp_ss_info->data.array, 0, sizeof (gfc_array_info));
4536 tmp_ss_info->type = GFC_SS_SECTION;
4537
4538 gcc_assert (tmp_ss->dimen != 0);
4539
4540 gfc_trans_create_temp_array (&loop->pre, &loop->post, tmp_ss, tmp,
4541 NULL_TREE, false, true, false, where);
4542 }
4543
4544 /* For array parameters we don't have loop variables, so don't calculate the
4545 translations. */
4546 if (!loop->array_parameter)
4547 gfc_set_delta (loop);
4548 }
4549
4550
4551 /* Calculates how to transform from loop variables to array indices for each
4552 array: once loop bounds are chosen, sets the difference (DELTA field) between
4553 loop bounds and array reference bounds, for each array info. */
4554
4555 void
4556 gfc_set_delta (gfc_loopinfo *loop)
4557 {
4558 gfc_ss *ss, **loopspec;
4559 gfc_array_info *info;
4560 tree tmp;
4561 int n, dim;
4562
4563 loopspec = loop->specloop;
4564
4565 /* Calculate the translation from loop variables to array indices. */
4566 for (ss = loop->ss; ss != gfc_ss_terminator; ss = ss->loop_chain)
4567 {
4568 gfc_ss_type ss_type;
4569
4570 ss_type = ss->info->type;
4571 if (ss_type != GFC_SS_SECTION
4572 && ss_type != GFC_SS_COMPONENT
4573 && ss_type != GFC_SS_CONSTRUCTOR)
4574 continue;
4575
4576 info = &ss->info->data.array;
4577
4578 for (n = 0; n < ss->dimen; n++)
4579 {
4580 /* If we are specifying the range the delta is already set. */
4581 if (loopspec[n] != ss)
4582 {
4583 dim = ss->dim[n];
4584
4585 /* Calculate the offset relative to the loop variable.
4586 First multiply by the stride. */
4587 tmp = loop->from[n];
4588 if (!integer_onep (info->stride[dim]))
4589 tmp = fold_build2_loc (input_location, MULT_EXPR,
4590 gfc_array_index_type,
4591 tmp, info->stride[dim]);
4592
4593 /* Then subtract this from our starting value. */
4594 tmp = fold_build2_loc (input_location, MINUS_EXPR,
4595 gfc_array_index_type,
4596 info->start[dim], tmp);
4597
4598 info->delta[dim] = gfc_evaluate_now (tmp, &loop->pre);
4599 }
4600 }
4601 }
4602
4603 for (loop = loop->nested; loop; loop = loop->next)
4604 gfc_set_delta (loop);
4605 }
4606
4607
4608 /* Calculate the size of a given array dimension from the bounds. This
4609 is simply (ubound - lbound + 1) if this expression is positive
4610 or 0 if it is negative (pick either one if it is zero). Optionally
4611 (if or_expr is present) OR the (expression != 0) condition to it. */
4612
4613 tree
4614 gfc_conv_array_extent_dim (tree lbound, tree ubound, tree* or_expr)
4615 {
4616 tree res;
4617 tree cond;
4618
4619 /* Calculate (ubound - lbound + 1). */
4620 res = fold_build2_loc (input_location, MINUS_EXPR, gfc_array_index_type,
4621 ubound, lbound);
4622 res = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type, res,
4623 gfc_index_one_node);
4624
4625 /* Check whether the size for this dimension is negative. */
4626 cond = fold_build2_loc (input_location, LE_EXPR, boolean_type_node, res,
4627 gfc_index_zero_node);
4628 res = fold_build3_loc (input_location, COND_EXPR, gfc_array_index_type, cond,
4629 gfc_index_zero_node, res);
4630
4631 /* Build OR expression. */
4632 if (or_expr)
4633 *or_expr = fold_build2_loc (input_location, TRUTH_OR_EXPR,
4634 boolean_type_node, *or_expr, cond);
4635
4636 return res;
4637 }
4638
4639
4640 /* For an array descriptor, get the total number of elements. This is just
4641 the product of the extents along from_dim to to_dim. */
4642
4643 static tree
4644 gfc_conv_descriptor_size_1 (tree desc, int from_dim, int to_dim)
4645 {
4646 tree res;
4647 int dim;
4648
4649 res = gfc_index_one_node;
4650
4651 for (dim = from_dim; dim < to_dim; ++dim)
4652 {
4653 tree lbound;
4654 tree ubound;
4655 tree extent;
4656
4657 lbound = gfc_conv_descriptor_lbound_get (desc, gfc_rank_cst[dim]);
4658 ubound = gfc_conv_descriptor_ubound_get (desc, gfc_rank_cst[dim]);
4659
4660 extent = gfc_conv_array_extent_dim (lbound, ubound, NULL);
4661 res = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
4662 res, extent);
4663 }
4664
4665 return res;
4666 }
4667
4668
4669 /* Full size of an array. */
4670
4671 tree
4672 gfc_conv_descriptor_size (tree desc, int rank)
4673 {
4674 return gfc_conv_descriptor_size_1 (desc, 0, rank);
4675 }
4676
4677
4678 /* Size of a coarray for all dimensions but the last. */
4679
4680 tree
4681 gfc_conv_descriptor_cosize (tree desc, int rank, int corank)
4682 {
4683 return gfc_conv_descriptor_size_1 (desc, rank, rank + corank - 1);
4684 }
4685
4686
4687 /* Fills in an array descriptor, and returns the size of the array.
4688 The size will be a simple_val, ie a variable or a constant. Also
4689 calculates the offset of the base. The pointer argument overflow,
4690 which should be of integer type, will increase in value if overflow
4691 occurs during the size calculation. Returns the size of the array.
4692 {
4693 stride = 1;
4694 offset = 0;
4695 for (n = 0; n < rank; n++)
4696 {
4697 a.lbound[n] = specified_lower_bound;
4698 offset = offset + a.lbond[n] * stride;
4699 size = 1 - lbound;
4700 a.ubound[n] = specified_upper_bound;
4701 a.stride[n] = stride;
4702 size = size >= 0 ? ubound + size : 0; //size = ubound + 1 - lbound
4703 overflow += size == 0 ? 0: (MAX/size < stride ? 1: 0);
4704 stride = stride * size;
4705 }
4706 for (n = rank; n < rank+corank; n++)
4707 (Set lcobound/ucobound as above.)
4708 element_size = sizeof (array element);
4709 if (!rank)
4710 return element_size
4711 stride = (size_t) stride;
4712 overflow += element_size == 0 ? 0: (MAX/element_size < stride ? 1: 0);
4713 stride = stride * element_size;
4714 return (stride);
4715 } */
4716 /*GCC ARRAYS*/
4717
4718 static tree
4719 gfc_array_init_size (tree descriptor, int rank, int corank, tree * poffset,
4720 gfc_expr ** lower, gfc_expr ** upper, stmtblock_t * pblock,
4721 stmtblock_t * descriptor_block, tree * overflow,
4722 tree expr3_elem_size, tree *nelems, gfc_expr *expr3)
4723 {
4724 tree type;
4725 tree tmp;
4726 tree size;
4727 tree offset;
4728 tree stride;
4729 tree element_size;
4730 tree or_expr;
4731 tree thencase;
4732 tree elsecase;
4733 tree cond;
4734 tree var;
4735 stmtblock_t thenblock;
4736 stmtblock_t elseblock;
4737 gfc_expr *ubound;
4738 gfc_se se;
4739 int n;
4740
4741 type = TREE_TYPE (descriptor);
4742
4743 stride = gfc_index_one_node;
4744 offset = gfc_index_zero_node;
4745
4746 /* Set the dtype. */
4747 tmp = gfc_conv_descriptor_dtype (descriptor);
4748 gfc_add_modify (descriptor_block, tmp, gfc_get_dtype (TREE_TYPE (descriptor)));
4749
4750 or_expr = boolean_false_node;
4751
4752 for (n = 0; n < rank; n++)
4753 {
4754 tree conv_lbound;
4755 tree conv_ubound;
4756
4757 /* We have 3 possibilities for determining the size of the array:
4758 lower == NULL => lbound = 1, ubound = upper[n]
4759 upper[n] = NULL => lbound = 1, ubound = lower[n]
4760 upper[n] != NULL => lbound = lower[n], ubound = upper[n] */
4761 ubound = upper[n];
4762
4763 /* Set lower bound. */
4764 gfc_init_se (&se, NULL);
4765 if (lower == NULL)
4766 se.expr = gfc_index_one_node;
4767 else
4768 {
4769 gcc_assert (lower[n]);
4770 if (ubound)
4771 {
4772 gfc_conv_expr_type (&se, lower[n], gfc_array_index_type);
4773 gfc_add_block_to_block (pblock, &se.pre);
4774 }
4775 else
4776 {
4777 se.expr = gfc_index_one_node;
4778 ubound = lower[n];
4779 }
4780 }
4781 gfc_conv_descriptor_lbound_set (descriptor_block, descriptor,
4782 gfc_rank_cst[n], se.expr);
4783 conv_lbound = se.expr;
4784
4785 /* Work out the offset for this component. */
4786 tmp = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
4787 se.expr, stride);
4788 offset = fold_build2_loc (input_location, MINUS_EXPR,
4789 gfc_array_index_type, offset, tmp);
4790
4791 /* Set upper bound. */
4792 gfc_init_se (&se, NULL);
4793 gcc_assert (ubound);
4794 gfc_conv_expr_type (&se, ubound, gfc_array_index_type);
4795 gfc_add_block_to_block (pblock, &se.pre);
4796
4797 gfc_conv_descriptor_ubound_set (descriptor_block, descriptor,
4798 gfc_rank_cst[n], se.expr);
4799 conv_ubound = se.expr;
4800
4801 /* Store the stride. */
4802 gfc_conv_descriptor_stride_set (descriptor_block, descriptor,
4803 gfc_rank_cst[n], stride);
4804
4805 /* Calculate size and check whether extent is negative. */
4806 size = gfc_conv_array_extent_dim (conv_lbound, conv_ubound, &or_expr);
4807 size = gfc_evaluate_now (size, pblock);
4808
4809 /* Check whether multiplying the stride by the number of
4810 elements in this dimension would overflow. We must also check
4811 whether the current dimension has zero size in order to avoid
4812 division by zero.
4813 */
4814 tmp = fold_build2_loc (input_location, TRUNC_DIV_EXPR,
4815 gfc_array_index_type,
4816 fold_convert (gfc_array_index_type,
4817 TYPE_MAX_VALUE (gfc_array_index_type)),
4818 size);
4819 cond = gfc_unlikely (fold_build2_loc (input_location, LT_EXPR,
4820 boolean_type_node, tmp, stride));
4821 tmp = fold_build3_loc (input_location, COND_EXPR, integer_type_node, cond,
4822 integer_one_node, integer_zero_node);
4823 cond = gfc_unlikely (fold_build2_loc (input_location, EQ_EXPR,
4824 boolean_type_node, size,
4825 gfc_index_zero_node));
4826 tmp = fold_build3_loc (input_location, COND_EXPR, integer_type_node, cond,
4827 integer_zero_node, tmp);
4828 tmp = fold_build2_loc (input_location, PLUS_EXPR, integer_type_node,
4829 *overflow, tmp);
4830 *overflow = gfc_evaluate_now (tmp, pblock);
4831
4832 /* Multiply the stride by the number of elements in this dimension. */
4833 stride = fold_build2_loc (input_location, MULT_EXPR,
4834 gfc_array_index_type, stride, size);
4835 stride = gfc_evaluate_now (stride, pblock);
4836 }
4837
4838 for (n = rank; n < rank + corank; n++)
4839 {
4840 ubound = upper[n];
4841
4842 /* Set lower bound. */
4843 gfc_init_se (&se, NULL);
4844 if (lower == NULL || lower[n] == NULL)
4845 {
4846 gcc_assert (n == rank + corank - 1);
4847 se.expr = gfc_index_one_node;
4848 }
4849 else
4850 {
4851 if (ubound || n == rank + corank - 1)
4852 {
4853 gfc_conv_expr_type (&se, lower[n], gfc_array_index_type);
4854 gfc_add_block_to_block (pblock, &se.pre);
4855 }
4856 else
4857 {
4858 se.expr = gfc_index_one_node;
4859 ubound = lower[n];
4860 }
4861 }
4862 gfc_conv_descriptor_lbound_set (descriptor_block, descriptor,
4863 gfc_rank_cst[n], se.expr);
4864
4865 if (n < rank + corank - 1)
4866 {
4867 gfc_init_se (&se, NULL);
4868 gcc_assert (ubound);
4869 gfc_conv_expr_type (&se, ubound, gfc_array_index_type);
4870 gfc_add_block_to_block (pblock, &se.pre);
4871 gfc_conv_descriptor_ubound_set (descriptor_block, descriptor,
4872 gfc_rank_cst[n], se.expr);
4873 }
4874 }
4875
4876 /* The stride is the number of elements in the array, so multiply by the
4877 size of an element to get the total size. Obviously, if there ia a
4878 SOURCE expression (expr3) we must use its element size. */
4879 if (expr3_elem_size != NULL_TREE)
4880 tmp = expr3_elem_size;
4881 else if (expr3 != NULL)
4882 {
4883 if (expr3->ts.type == BT_CLASS)
4884 {
4885 gfc_se se_sz;
4886 gfc_expr *sz = gfc_copy_expr (expr3);
4887 gfc_add_vptr_component (sz);
4888 gfc_add_size_component (sz);
4889 gfc_init_se (&se_sz, NULL);
4890 gfc_conv_expr (&se_sz, sz);
4891 gfc_free_expr (sz);
4892 tmp = se_sz.expr;
4893 }
4894 else
4895 {
4896 tmp = gfc_typenode_for_spec (&expr3->ts);
4897 tmp = TYPE_SIZE_UNIT (tmp);
4898 }
4899 }
4900 else
4901 tmp = TYPE_SIZE_UNIT (gfc_get_element_type (type));
4902
4903 /* Convert to size_t. */
4904 element_size = fold_convert (size_type_node, tmp);
4905
4906 if (rank == 0)
4907 return element_size;
4908
4909 *nelems = gfc_evaluate_now (stride, pblock);
4910 stride = fold_convert (size_type_node, stride);
4911
4912 /* First check for overflow. Since an array of type character can
4913 have zero element_size, we must check for that before
4914 dividing. */
4915 tmp = fold_build2_loc (input_location, TRUNC_DIV_EXPR,
4916 size_type_node,
4917 TYPE_MAX_VALUE (size_type_node), element_size);
4918 cond = gfc_unlikely (fold_build2_loc (input_location, LT_EXPR,
4919 boolean_type_node, tmp, stride));
4920 tmp = fold_build3_loc (input_location, COND_EXPR, integer_type_node, cond,
4921 integer_one_node, integer_zero_node);
4922 cond = gfc_unlikely (fold_build2_loc (input_location, EQ_EXPR,
4923 boolean_type_node, element_size,
4924 build_int_cst (size_type_node, 0)));
4925 tmp = fold_build3_loc (input_location, COND_EXPR, integer_type_node, cond,
4926 integer_zero_node, tmp);
4927 tmp = fold_build2_loc (input_location, PLUS_EXPR, integer_type_node,
4928 *overflow, tmp);
4929 *overflow = gfc_evaluate_now (tmp, pblock);
4930
4931 size = fold_build2_loc (input_location, MULT_EXPR, size_type_node,
4932 stride, element_size);
4933
4934 if (poffset != NULL)
4935 {
4936 offset = gfc_evaluate_now (offset, pblock);
4937 *poffset = offset;
4938 }
4939
4940 if (integer_zerop (or_expr))
4941 return size;
4942 if (integer_onep (or_expr))
4943 return build_int_cst (size_type_node, 0);
4944
4945 var = gfc_create_var (TREE_TYPE (size), "size");
4946 gfc_start_block (&thenblock);
4947 gfc_add_modify (&thenblock, var, build_int_cst (size_type_node, 0));
4948 thencase = gfc_finish_block (&thenblock);
4949
4950 gfc_start_block (&elseblock);
4951 gfc_add_modify (&elseblock, var, size);
4952 elsecase = gfc_finish_block (&elseblock);
4953
4954 tmp = gfc_evaluate_now (or_expr, pblock);
4955 tmp = build3_v (COND_EXPR, tmp, thencase, elsecase);
4956 gfc_add_expr_to_block (pblock, tmp);
4957
4958 return var;
4959 }
4960
4961
4962 /* Initializes the descriptor and generates a call to _gfor_allocate. Does
4963 the work for an ALLOCATE statement. */
4964 /*GCC ARRAYS*/
4965
4966 bool
4967 gfc_array_allocate (gfc_se * se, gfc_expr * expr, tree status, tree errmsg,
4968 tree errlen, tree label_finish, tree expr3_elem_size,
4969 tree *nelems, gfc_expr *expr3)
4970 {
4971 tree tmp;
4972 tree pointer;
4973 tree offset = NULL_TREE;
4974 tree token = NULL_TREE;
4975 tree size;
4976 tree msg;
4977 tree error = NULL_TREE;
4978 tree overflow; /* Boolean storing whether size calculation overflows. */
4979 tree var_overflow = NULL_TREE;
4980 tree cond;
4981 tree set_descriptor;
4982 stmtblock_t set_descriptor_block;
4983 stmtblock_t elseblock;
4984 gfc_expr **lower;
4985 gfc_expr **upper;
4986 gfc_ref *ref, *prev_ref = NULL;
4987 bool allocatable, coarray, dimension;
4988
4989 ref = expr->ref;
4990
4991 /* Find the last reference in the chain. */
4992 while (ref && ref->next != NULL)
4993 {
4994 gcc_assert (ref->type != REF_ARRAY || ref->u.ar.type == AR_ELEMENT
4995 || (ref->u.ar.dimen == 0 && ref->u.ar.codimen > 0));
4996 prev_ref = ref;
4997 ref = ref->next;
4998 }
4999
5000 if (ref == NULL || ref->type != REF_ARRAY)
5001 return false;
5002
5003 if (!prev_ref)
5004 {
5005 allocatable = expr->symtree->n.sym->attr.allocatable;
5006 coarray = expr->symtree->n.sym->attr.codimension;
5007 dimension = expr->symtree->n.sym->attr.dimension;
5008 }
5009 else
5010 {
5011 allocatable = prev_ref->u.c.component->attr.allocatable;
5012 coarray = prev_ref->u.c.component->attr.codimension;
5013 dimension = prev_ref->u.c.component->attr.dimension;
5014 }
5015
5016 if (!dimension)
5017 gcc_assert (coarray);
5018
5019 /* Figure out the size of the array. */
5020 switch (ref->u.ar.type)
5021 {
5022 case AR_ELEMENT:
5023 if (!coarray)
5024 {
5025 lower = NULL;
5026 upper = ref->u.ar.start;
5027 break;
5028 }
5029 /* Fall through. */
5030
5031 case AR_SECTION:
5032 lower = ref->u.ar.start;
5033 upper = ref->u.ar.end;
5034 break;
5035
5036 case AR_FULL:
5037 gcc_assert (ref->u.ar.as->type == AS_EXPLICIT);
5038
5039 lower = ref->u.ar.as->lower;
5040 upper = ref->u.ar.as->upper;
5041 break;
5042
5043 default:
5044 gcc_unreachable ();
5045 break;
5046 }
5047
5048 overflow = integer_zero_node;
5049
5050 gfc_init_block (&set_descriptor_block);
5051 size = gfc_array_init_size (se->expr, ref->u.ar.as->rank,
5052 ref->u.ar.as->corank, &offset, lower, upper,
5053 &se->pre, &set_descriptor_block, &overflow,
5054 expr3_elem_size, nelems, expr3);
5055
5056 if (dimension)
5057 {
5058
5059 var_overflow = gfc_create_var (integer_type_node, "overflow");
5060 gfc_add_modify (&se->pre, var_overflow, overflow);
5061
5062 /* Generate the block of code handling overflow. */
5063 msg = gfc_build_addr_expr (pchar_type_node,
5064 gfc_build_localized_cstring_const
5065 ("Integer overflow when calculating the amount of "
5066 "memory to allocate"));
5067 error = build_call_expr_loc (input_location, gfor_fndecl_runtime_error,
5068 1, msg);
5069 }
5070
5071 if (status != NULL_TREE)
5072 {
5073 tree status_type = TREE_TYPE (status);
5074 stmtblock_t set_status_block;
5075
5076 gfc_start_block (&set_status_block);
5077 gfc_add_modify (&set_status_block, status,
5078 build_int_cst (status_type, LIBERROR_ALLOCATION));
5079 error = gfc_finish_block (&set_status_block);
5080 }
5081
5082 gfc_start_block (&elseblock);
5083
5084 /* Allocate memory to store the data. */
5085 if (POINTER_TYPE_P (TREE_TYPE (se->expr)))
5086 se->expr = build_fold_indirect_ref_loc (input_location, se->expr);
5087
5088 pointer = gfc_conv_descriptor_data_get (se->expr);
5089 STRIP_NOPS (pointer);
5090
5091 if (coarray && gfc_option.coarray == GFC_FCOARRAY_LIB)
5092 token = gfc_build_addr_expr (NULL_TREE,
5093 gfc_conv_descriptor_token (se->expr));
5094
5095 /* The allocatable variant takes the old pointer as first argument. */
5096 if (allocatable)
5097 gfc_allocate_allocatable (&elseblock, pointer, size, token,
5098 status, errmsg, errlen, label_finish, expr);
5099 else
5100 gfc_allocate_using_malloc (&elseblock, pointer, size, status);
5101
5102 if (dimension)
5103 {
5104 cond = gfc_unlikely (fold_build2_loc (input_location, NE_EXPR,
5105 boolean_type_node, var_overflow, integer_zero_node));
5106 tmp = fold_build3_loc (input_location, COND_EXPR, void_type_node, cond,
5107 error, gfc_finish_block (&elseblock));
5108 }
5109 else
5110 tmp = gfc_finish_block (&elseblock);
5111
5112 gfc_add_expr_to_block (&se->pre, tmp);
5113
5114 if (expr->ts.type == BT_CLASS)
5115 {
5116 tmp = build_int_cst (unsigned_char_type_node, 0);
5117 /* With class objects, it is best to play safe and null the
5118 memory because we cannot know if dynamic types have allocatable
5119 components or not. */
5120 tmp = build_call_expr_loc (input_location,
5121 builtin_decl_explicit (BUILT_IN_MEMSET),
5122 3, pointer, tmp, size);
5123 gfc_add_expr_to_block (&se->pre, tmp);
5124 }
5125
5126 /* Update the array descriptors. */
5127 if (dimension)
5128 gfc_conv_descriptor_offset_set (&set_descriptor_block, se->expr, offset);
5129
5130 set_descriptor = gfc_finish_block (&set_descriptor_block);
5131 if (status != NULL_TREE)
5132 {
5133 cond = fold_build2_loc (input_location, EQ_EXPR,
5134 boolean_type_node, status,
5135 build_int_cst (TREE_TYPE (status), 0));
5136 gfc_add_expr_to_block (&se->pre,
5137 fold_build3_loc (input_location, COND_EXPR, void_type_node,
5138 gfc_likely (cond), set_descriptor,
5139 build_empty_stmt (input_location)));
5140 }
5141 else
5142 gfc_add_expr_to_block (&se->pre, set_descriptor);
5143
5144 if ((expr->ts.type == BT_DERIVED)
5145 && expr->ts.u.derived->attr.alloc_comp)
5146 {
5147 tmp = gfc_nullify_alloc_comp (expr->ts.u.derived, se->expr,
5148 ref->u.ar.as->rank);
5149 gfc_add_expr_to_block (&se->pre, tmp);
5150 }
5151
5152 return true;
5153 }
5154
5155
5156 /* Deallocate an array variable. Also used when an allocated variable goes
5157 out of scope. */
5158 /*GCC ARRAYS*/
5159
5160 tree
5161 gfc_array_deallocate (tree descriptor, tree pstat, tree errmsg, tree errlen,
5162 tree label_finish, gfc_expr* expr)
5163 {
5164 tree var;
5165 tree tmp;
5166 stmtblock_t block;
5167 bool coarray = gfc_is_coarray (expr);
5168
5169 gfc_start_block (&block);
5170
5171 /* Get a pointer to the data. */
5172 var = gfc_conv_descriptor_data_get (descriptor);
5173 STRIP_NOPS (var);
5174
5175 /* Parameter is the address of the data component. */
5176 tmp = gfc_deallocate_with_status (coarray ? descriptor : var, pstat, errmsg,
5177 errlen, label_finish, false, expr, coarray);
5178 gfc_add_expr_to_block (&block, tmp);
5179
5180 /* Zero the data pointer; only for coarrays an error can occur and then
5181 the allocation status may not be changed. */
5182 tmp = fold_build2_loc (input_location, MODIFY_EXPR, void_type_node,
5183 var, build_int_cst (TREE_TYPE (var), 0));
5184 if (pstat != NULL_TREE && coarray && gfc_option.coarray == GFC_FCOARRAY_LIB)
5185 {
5186 tree cond;
5187 tree stat = build_fold_indirect_ref_loc (input_location, pstat);
5188
5189 cond = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node,
5190 stat, build_int_cst (TREE_TYPE (stat), 0));
5191 tmp = fold_build3_loc (input_location, COND_EXPR, void_type_node,
5192 cond, tmp, build_empty_stmt (input_location));
5193 }
5194
5195 gfc_add_expr_to_block (&block, tmp);
5196
5197 return gfc_finish_block (&block);
5198 }
5199
5200
5201 /* Create an array constructor from an initialization expression.
5202 We assume the frontend already did any expansions and conversions. */
5203
5204 tree
5205 gfc_conv_array_initializer (tree type, gfc_expr * expr)
5206 {
5207 gfc_constructor *c;
5208 tree tmp;
5209 gfc_se se;
5210 HOST_WIDE_INT hi;
5211 unsigned HOST_WIDE_INT lo;
5212 tree index, range;
5213 VEC(constructor_elt,gc) *v = NULL;
5214
5215 if (expr->expr_type == EXPR_VARIABLE
5216 && expr->symtree->n.sym->attr.flavor == FL_PARAMETER
5217 && expr->symtree->n.sym->value)
5218 expr = expr->symtree->n.sym->value;
5219
5220 switch (expr->expr_type)
5221 {
5222 case EXPR_CONSTANT:
5223 case EXPR_STRUCTURE:
5224 /* A single scalar or derived type value. Create an array with all
5225 elements equal to that value. */
5226 gfc_init_se (&se, NULL);
5227
5228 if (expr->expr_type == EXPR_CONSTANT)
5229 gfc_conv_constant (&se, expr);
5230 else
5231 gfc_conv_structure (&se, expr, 1);
5232
5233 tmp = TYPE_MAX_VALUE (TYPE_DOMAIN (type));
5234 gcc_assert (tmp && INTEGER_CST_P (tmp));
5235 hi = TREE_INT_CST_HIGH (tmp);
5236 lo = TREE_INT_CST_LOW (tmp);
5237 lo++;
5238 if (lo == 0)
5239 hi++;
5240 /* This will probably eat buckets of memory for large arrays. */
5241 while (hi != 0 || lo != 0)
5242 {
5243 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, se.expr);
5244 if (lo == 0)
5245 hi--;
5246 lo--;
5247 }
5248 break;
5249
5250 case EXPR_ARRAY:
5251 /* Create a vector of all the elements. */
5252 for (c = gfc_constructor_first (expr->value.constructor);
5253 c; c = gfc_constructor_next (c))
5254 {
5255 if (c->iterator)
5256 {
5257 /* Problems occur when we get something like
5258 integer :: a(lots) = (/(i, i=1, lots)/) */
5259 gfc_fatal_error ("The number of elements in the array constructor "
5260 "at %L requires an increase of the allowed %d "
5261 "upper limit. See -fmax-array-constructor "
5262 "option", &expr->where,
5263 gfc_option.flag_max_array_constructor);
5264 return NULL_TREE;
5265 }
5266 if (mpz_cmp_si (c->offset, 0) != 0)
5267 index = gfc_conv_mpz_to_tree (c->offset, gfc_index_integer_kind);
5268 else
5269 index = NULL_TREE;
5270
5271 if (mpz_cmp_si (c->repeat, 1) > 0)
5272 {
5273 tree tmp1, tmp2;
5274 mpz_t maxval;
5275
5276 mpz_init (maxval);
5277 mpz_add (maxval, c->offset, c->repeat);
5278 mpz_sub_ui (maxval, maxval, 1);
5279 tmp2 = gfc_conv_mpz_to_tree (maxval, gfc_index_integer_kind);
5280 if (mpz_cmp_si (c->offset, 0) != 0)
5281 {
5282 mpz_add_ui (maxval, c->offset, 1);
5283 tmp1 = gfc_conv_mpz_to_tree (maxval, gfc_index_integer_kind);
5284 }
5285 else
5286 tmp1 = gfc_conv_mpz_to_tree (c->offset, gfc_index_integer_kind);
5287
5288 range = fold_build2 (RANGE_EXPR, gfc_array_index_type, tmp1, tmp2);
5289 mpz_clear (maxval);
5290 }
5291 else
5292 range = NULL;
5293
5294 gfc_init_se (&se, NULL);
5295 switch (c->expr->expr_type)
5296 {
5297 case EXPR_CONSTANT:
5298 gfc_conv_constant (&se, c->expr);
5299 break;
5300
5301 case EXPR_STRUCTURE:
5302 gfc_conv_structure (&se, c->expr, 1);
5303 break;
5304
5305 default:
5306 /* Catch those occasional beasts that do not simplify
5307 for one reason or another, assuming that if they are
5308 standard defying the frontend will catch them. */
5309 gfc_conv_expr (&se, c->expr);
5310 break;
5311 }
5312
5313 if (range == NULL_TREE)
5314 CONSTRUCTOR_APPEND_ELT (v, index, se.expr);
5315 else
5316 {
5317 if (index != NULL_TREE)
5318 CONSTRUCTOR_APPEND_ELT (v, index, se.expr);
5319 CONSTRUCTOR_APPEND_ELT (v, range, se.expr);
5320 }
5321 }
5322 break;
5323
5324 case EXPR_NULL:
5325 return gfc_build_null_descriptor (type);
5326
5327 default:
5328 gcc_unreachable ();
5329 }
5330
5331 /* Create a constructor from the list of elements. */
5332 tmp = build_constructor (type, v);
5333 TREE_CONSTANT (tmp) = 1;
5334 return tmp;
5335 }
5336
5337
5338 /* Generate code to evaluate non-constant coarray cobounds. */
5339
5340 void
5341 gfc_trans_array_cobounds (tree type, stmtblock_t * pblock,
5342 const gfc_symbol *sym)
5343 {
5344 int dim;
5345 tree ubound;
5346 tree lbound;
5347 gfc_se se;
5348 gfc_array_spec *as;
5349
5350 as = sym->as;
5351
5352 for (dim = as->rank; dim < as->rank + as->corank; dim++)
5353 {
5354 /* Evaluate non-constant array bound expressions. */
5355 lbound = GFC_TYPE_ARRAY_LBOUND (type, dim);
5356 if (as->lower[dim] && !INTEGER_CST_P (lbound))
5357 {
5358 gfc_init_se (&se, NULL);
5359 gfc_conv_expr_type (&se, as->lower[dim], gfc_array_index_type);
5360 gfc_add_block_to_block (pblock, &se.pre);
5361 gfc_add_modify (pblock, lbound, se.expr);
5362 }
5363 ubound = GFC_TYPE_ARRAY_UBOUND (type, dim);
5364 if (as->upper[dim] && !INTEGER_CST_P (ubound))
5365 {
5366 gfc_init_se (&se, NULL);
5367 gfc_conv_expr_type (&se, as->upper[dim], gfc_array_index_type);
5368 gfc_add_block_to_block (pblock, &se.pre);
5369 gfc_add_modify (pblock, ubound, se.expr);
5370 }
5371 }
5372 }
5373
5374
5375 /* Generate code to evaluate non-constant array bounds. Sets *poffset and
5376 returns the size (in elements) of the array. */
5377
5378 static tree
5379 gfc_trans_array_bounds (tree type, gfc_symbol * sym, tree * poffset,
5380 stmtblock_t * pblock)
5381 {
5382 gfc_array_spec *as;
5383 tree size;
5384 tree stride;
5385 tree offset;
5386 tree ubound;
5387 tree lbound;
5388 tree tmp;
5389 gfc_se se;
5390
5391 int dim;
5392
5393 as = sym->as;
5394
5395 size = gfc_index_one_node;
5396 offset = gfc_index_zero_node;
5397 for (dim = 0; dim < as->rank; dim++)
5398 {
5399 /* Evaluate non-constant array bound expressions. */
5400 lbound = GFC_TYPE_ARRAY_LBOUND (type, dim);
5401 if (as->lower[dim] && !INTEGER_CST_P (lbound))
5402 {
5403 gfc_init_se (&se, NULL);
5404 gfc_conv_expr_type (&se, as->lower[dim], gfc_array_index_type);
5405 gfc_add_block_to_block (pblock, &se.pre);
5406 gfc_add_modify (pblock, lbound, se.expr);
5407 }
5408 ubound = GFC_TYPE_ARRAY_UBOUND (type, dim);
5409 if (as->upper[dim] && !INTEGER_CST_P (ubound))
5410 {
5411 gfc_init_se (&se, NULL);
5412 gfc_conv_expr_type (&se, as->upper[dim], gfc_array_index_type);
5413 gfc_add_block_to_block (pblock, &se.pre);
5414 gfc_add_modify (pblock, ubound, se.expr);
5415 }
5416 /* The offset of this dimension. offset = offset - lbound * stride. */
5417 tmp = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
5418 lbound, size);
5419 offset = fold_build2_loc (input_location, MINUS_EXPR, gfc_array_index_type,
5420 offset, tmp);
5421
5422 /* The size of this dimension, and the stride of the next. */
5423 if (dim + 1 < as->rank)
5424 stride = GFC_TYPE_ARRAY_STRIDE (type, dim + 1);
5425 else
5426 stride = GFC_TYPE_ARRAY_SIZE (type);
5427
5428 if (ubound != NULL_TREE && !(stride && INTEGER_CST_P (stride)))
5429 {
5430 /* Calculate stride = size * (ubound + 1 - lbound). */
5431 tmp = fold_build2_loc (input_location, MINUS_EXPR,
5432 gfc_array_index_type,
5433 gfc_index_one_node, lbound);
5434 tmp = fold_build2_loc (input_location, PLUS_EXPR,
5435 gfc_array_index_type, ubound, tmp);
5436 tmp = fold_build2_loc (input_location, MULT_EXPR,
5437 gfc_array_index_type, size, tmp);
5438 if (stride)
5439 gfc_add_modify (pblock, stride, tmp);
5440 else
5441 stride = gfc_evaluate_now (tmp, pblock);
5442
5443 /* Make sure that negative size arrays are translated
5444 to being zero size. */
5445 tmp = fold_build2_loc (input_location, GE_EXPR, boolean_type_node,
5446 stride, gfc_index_zero_node);
5447 tmp = fold_build3_loc (input_location, COND_EXPR,
5448 gfc_array_index_type, tmp,
5449 stride, gfc_index_zero_node);
5450 gfc_add_modify (pblock, stride, tmp);
5451 }
5452
5453 size = stride;
5454 }
5455
5456 gfc_trans_array_cobounds (type, pblock, sym);
5457 gfc_trans_vla_type_sizes (sym, pblock);
5458
5459 *poffset = offset;
5460 return size;
5461 }
5462
5463
5464 /* Generate code to initialize/allocate an array variable. */
5465
5466 void
5467 gfc_trans_auto_array_allocation (tree decl, gfc_symbol * sym,
5468 gfc_wrapped_block * block)
5469 {
5470 stmtblock_t init;
5471 tree type;
5472 tree tmp = NULL_TREE;
5473 tree size;
5474 tree offset;
5475 tree space;
5476 tree inittree;
5477 bool onstack;
5478
5479 gcc_assert (!(sym->attr.pointer || sym->attr.allocatable));
5480
5481 /* Do nothing for USEd variables. */
5482 if (sym->attr.use_assoc)
5483 return;
5484
5485 type = TREE_TYPE (decl);
5486 gcc_assert (GFC_ARRAY_TYPE_P (type));
5487 onstack = TREE_CODE (type) != POINTER_TYPE;
5488
5489 gfc_init_block (&init);
5490
5491 /* Evaluate character string length. */
5492 if (sym->ts.type == BT_CHARACTER
5493 && onstack && !INTEGER_CST_P (sym->ts.u.cl->backend_decl))
5494 {
5495 gfc_conv_string_length (sym->ts.u.cl, NULL, &init);
5496
5497 gfc_trans_vla_type_sizes (sym, &init);
5498
5499 /* Emit a DECL_EXPR for this variable, which will cause the
5500 gimplifier to allocate storage, and all that good stuff. */
5501 tmp = fold_build1_loc (input_location, DECL_EXPR, TREE_TYPE (decl), decl);
5502 gfc_add_expr_to_block (&init, tmp);
5503 }
5504
5505 if (onstack)
5506 {
5507 gfc_add_init_cleanup (block, gfc_finish_block (&init), NULL_TREE);
5508 return;
5509 }
5510
5511 type = TREE_TYPE (type);
5512
5513 gcc_assert (!sym->attr.use_assoc);
5514 gcc_assert (!TREE_STATIC (decl));
5515 gcc_assert (!sym->module);
5516
5517 if (sym->ts.type == BT_CHARACTER
5518 && !INTEGER_CST_P (sym->ts.u.cl->backend_decl))
5519 gfc_conv_string_length (sym->ts.u.cl, NULL, &init);
5520
5521 size = gfc_trans_array_bounds (type, sym, &offset, &init);
5522
5523 /* Don't actually allocate space for Cray Pointees. */
5524 if (sym->attr.cray_pointee)
5525 {
5526 if (TREE_CODE (GFC_TYPE_ARRAY_OFFSET (type)) == VAR_DECL)
5527 gfc_add_modify (&init, GFC_TYPE_ARRAY_OFFSET (type), offset);
5528
5529 gfc_add_init_cleanup (block, gfc_finish_block (&init), NULL_TREE);
5530 return;
5531 }
5532
5533 if (gfc_option.flag_stack_arrays)
5534 {
5535 gcc_assert (TREE_CODE (TREE_TYPE (decl)) == POINTER_TYPE);
5536 space = build_decl (sym->declared_at.lb->location,
5537 VAR_DECL, create_tmp_var_name ("A"),
5538 TREE_TYPE (TREE_TYPE (decl)));
5539 gfc_trans_vla_type_sizes (sym, &init);
5540 }
5541 else
5542 {
5543 /* The size is the number of elements in the array, so multiply by the
5544 size of an element to get the total size. */
5545 tmp = TYPE_SIZE_UNIT (gfc_get_element_type (type));
5546 size = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
5547 size, fold_convert (gfc_array_index_type, tmp));
5548
5549 /* Allocate memory to hold the data. */
5550 tmp = gfc_call_malloc (&init, TREE_TYPE (decl), size);
5551 gfc_add_modify (&init, decl, tmp);
5552
5553 /* Free the temporary. */
5554 tmp = gfc_call_free (convert (pvoid_type_node, decl));
5555 space = NULL_TREE;
5556 }
5557
5558 /* Set offset of the array. */
5559 if (TREE_CODE (GFC_TYPE_ARRAY_OFFSET (type)) == VAR_DECL)
5560 gfc_add_modify (&init, GFC_TYPE_ARRAY_OFFSET (type), offset);
5561
5562 /* Automatic arrays should not have initializers. */
5563 gcc_assert (!sym->value);
5564
5565 inittree = gfc_finish_block (&init);
5566
5567 if (space)
5568 {
5569 tree addr;
5570 pushdecl (space);
5571
5572 /* Don't create new scope, emit the DECL_EXPR in exactly the scope
5573 where also space is located. */
5574 gfc_init_block (&init);
5575 tmp = fold_build1_loc (input_location, DECL_EXPR,
5576 TREE_TYPE (space), space);
5577 gfc_add_expr_to_block (&init, tmp);
5578 addr = fold_build1_loc (sym->declared_at.lb->location,
5579 ADDR_EXPR, TREE_TYPE (decl), space);
5580 gfc_add_modify (&init, decl, addr);
5581 gfc_add_init_cleanup (block, gfc_finish_block (&init), NULL_TREE);
5582 tmp = NULL_TREE;
5583 }
5584 gfc_add_init_cleanup (block, inittree, tmp);
5585 }
5586
5587
5588 /* Generate entry and exit code for g77 calling convention arrays. */
5589
5590 void
5591 gfc_trans_g77_array (gfc_symbol * sym, gfc_wrapped_block * block)
5592 {
5593 tree parm;
5594 tree type;
5595 locus loc;
5596 tree offset;
5597 tree tmp;
5598 tree stmt;
5599 stmtblock_t init;
5600
5601 gfc_save_backend_locus (&loc);
5602 gfc_set_backend_locus (&sym->declared_at);
5603
5604 /* Descriptor type. */
5605 parm = sym->backend_decl;
5606 type = TREE_TYPE (parm);
5607 gcc_assert (GFC_ARRAY_TYPE_P (type));
5608
5609 gfc_start_block (&init);
5610
5611 if (sym->ts.type == BT_CHARACTER
5612 && TREE_CODE (sym->ts.u.cl->backend_decl) == VAR_DECL)
5613 gfc_conv_string_length (sym->ts.u.cl, NULL, &init);
5614
5615 /* Evaluate the bounds of the array. */
5616 gfc_trans_array_bounds (type, sym, &offset, &init);
5617
5618 /* Set the offset. */
5619 if (TREE_CODE (GFC_TYPE_ARRAY_OFFSET (type)) == VAR_DECL)
5620 gfc_add_modify (&init, GFC_TYPE_ARRAY_OFFSET (type), offset);
5621
5622 /* Set the pointer itself if we aren't using the parameter directly. */
5623 if (TREE_CODE (parm) != PARM_DECL)
5624 {
5625 tmp = convert (TREE_TYPE (parm), GFC_DECL_SAVED_DESCRIPTOR (parm));
5626 gfc_add_modify (&init, parm, tmp);
5627 }
5628 stmt = gfc_finish_block (&init);
5629
5630 gfc_restore_backend_locus (&loc);
5631
5632 /* Add the initialization code to the start of the function. */
5633
5634 if (sym->attr.optional || sym->attr.not_always_present)
5635 {
5636 tmp = gfc_conv_expr_present (sym);
5637 stmt = build3_v (COND_EXPR, tmp, stmt, build_empty_stmt (input_location));
5638 }
5639
5640 gfc_add_init_cleanup (block, stmt, NULL_TREE);
5641 }
5642
5643
5644 /* Modify the descriptor of an array parameter so that it has the
5645 correct lower bound. Also move the upper bound accordingly.
5646 If the array is not packed, it will be copied into a temporary.
5647 For each dimension we set the new lower and upper bounds. Then we copy the
5648 stride and calculate the offset for this dimension. We also work out
5649 what the stride of a packed array would be, and see it the two match.
5650 If the array need repacking, we set the stride to the values we just
5651 calculated, recalculate the offset and copy the array data.
5652 Code is also added to copy the data back at the end of the function.
5653 */
5654
5655 void
5656 gfc_trans_dummy_array_bias (gfc_symbol * sym, tree tmpdesc,
5657 gfc_wrapped_block * block)
5658 {
5659 tree size;
5660 tree type;
5661 tree offset;
5662 locus loc;
5663 stmtblock_t init;
5664 tree stmtInit, stmtCleanup;
5665 tree lbound;
5666 tree ubound;
5667 tree dubound;
5668 tree dlbound;
5669 tree dumdesc;
5670 tree tmp;
5671 tree stride, stride2;
5672 tree stmt_packed;
5673 tree stmt_unpacked;
5674 tree partial;
5675 gfc_se se;
5676 int n;
5677 int checkparm;
5678 int no_repack;
5679 bool optional_arg;
5680
5681 /* Do nothing for pointer and allocatable arrays. */
5682 if (sym->attr.pointer || sym->attr.allocatable)
5683 return;
5684
5685 if (sym->attr.dummy && gfc_is_nodesc_array (sym))
5686 {
5687 gfc_trans_g77_array (sym, block);
5688 return;
5689 }
5690
5691 gfc_save_backend_locus (&loc);
5692 gfc_set_backend_locus (&sym->declared_at);
5693
5694 /* Descriptor type. */
5695 type = TREE_TYPE (tmpdesc);
5696 gcc_assert (GFC_ARRAY_TYPE_P (type));
5697 dumdesc = GFC_DECL_SAVED_DESCRIPTOR (tmpdesc);
5698 dumdesc = build_fold_indirect_ref_loc (input_location, dumdesc);
5699 gfc_start_block (&init);
5700
5701 if (sym->ts.type == BT_CHARACTER
5702 && TREE_CODE (sym->ts.u.cl->backend_decl) == VAR_DECL)
5703 gfc_conv_string_length (sym->ts.u.cl, NULL, &init);
5704
5705 checkparm = (sym->as->type == AS_EXPLICIT
5706 && (gfc_option.rtcheck & GFC_RTCHECK_BOUNDS));
5707
5708 no_repack = !(GFC_DECL_PACKED_ARRAY (tmpdesc)
5709 || GFC_DECL_PARTIAL_PACKED_ARRAY (tmpdesc));
5710
5711 if (GFC_DECL_PARTIAL_PACKED_ARRAY (tmpdesc))
5712 {
5713 /* For non-constant shape arrays we only check if the first dimension
5714 is contiguous. Repacking higher dimensions wouldn't gain us
5715 anything as we still don't know the array stride. */
5716 partial = gfc_create_var (boolean_type_node, "partial");
5717 TREE_USED (partial) = 1;
5718 tmp = gfc_conv_descriptor_stride_get (dumdesc, gfc_rank_cst[0]);
5719 tmp = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node, tmp,
5720 gfc_index_one_node);
5721 gfc_add_modify (&init, partial, tmp);
5722 }
5723 else
5724 partial = NULL_TREE;
5725
5726 /* The naming of stmt_unpacked and stmt_packed may be counter-intuitive
5727 here, however I think it does the right thing. */
5728 if (no_repack)
5729 {
5730 /* Set the first stride. */
5731 stride = gfc_conv_descriptor_stride_get (dumdesc, gfc_rank_cst[0]);
5732 stride = gfc_evaluate_now (stride, &init);
5733
5734 tmp = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node,
5735 stride, gfc_index_zero_node);
5736 tmp = fold_build3_loc (input_location, COND_EXPR, gfc_array_index_type,
5737 tmp, gfc_index_one_node, stride);
5738 stride = GFC_TYPE_ARRAY_STRIDE (type, 0);
5739 gfc_add_modify (&init, stride, tmp);
5740
5741 /* Allow the user to disable array repacking. */
5742 stmt_unpacked = NULL_TREE;
5743 }
5744 else
5745 {
5746 gcc_assert (integer_onep (GFC_TYPE_ARRAY_STRIDE (type, 0)));
5747 /* A library call to repack the array if necessary. */
5748 tmp = GFC_DECL_SAVED_DESCRIPTOR (tmpdesc);
5749 stmt_unpacked = build_call_expr_loc (input_location,
5750 gfor_fndecl_in_pack, 1, tmp);
5751
5752 stride = gfc_index_one_node;
5753
5754 if (gfc_option.warn_array_temp)
5755 gfc_warning ("Creating array temporary at %L", &loc);
5756 }
5757
5758 /* This is for the case where the array data is used directly without
5759 calling the repack function. */
5760 if (no_repack || partial != NULL_TREE)
5761 stmt_packed = gfc_conv_descriptor_data_get (dumdesc);
5762 else
5763 stmt_packed = NULL_TREE;
5764
5765 /* Assign the data pointer. */
5766 if (stmt_packed != NULL_TREE && stmt_unpacked != NULL_TREE)
5767 {
5768 /* Don't repack unknown shape arrays when the first stride is 1. */
5769 tmp = fold_build3_loc (input_location, COND_EXPR, TREE_TYPE (stmt_packed),
5770 partial, stmt_packed, stmt_unpacked);
5771 }
5772 else
5773 tmp = stmt_packed != NULL_TREE ? stmt_packed : stmt_unpacked;
5774 gfc_add_modify (&init, tmpdesc, fold_convert (type, tmp));
5775
5776 offset = gfc_index_zero_node;
5777 size = gfc_index_one_node;
5778
5779 /* Evaluate the bounds of the array. */
5780 for (n = 0; n < sym->as->rank; n++)
5781 {
5782 if (checkparm || !sym->as->upper[n])
5783 {
5784 /* Get the bounds of the actual parameter. */
5785 dubound = gfc_conv_descriptor_ubound_get (dumdesc, gfc_rank_cst[n]);
5786 dlbound = gfc_conv_descriptor_lbound_get (dumdesc, gfc_rank_cst[n]);
5787 }
5788 else
5789 {
5790 dubound = NULL_TREE;
5791 dlbound = NULL_TREE;
5792 }
5793
5794 lbound = GFC_TYPE_ARRAY_LBOUND (type, n);
5795 if (!INTEGER_CST_P (lbound))
5796 {
5797 gfc_init_se (&se, NULL);
5798 gfc_conv_expr_type (&se, sym->as->lower[n],
5799 gfc_array_index_type);
5800 gfc_add_block_to_block (&init, &se.pre);
5801 gfc_add_modify (&init, lbound, se.expr);
5802 }
5803
5804 ubound = GFC_TYPE_ARRAY_UBOUND (type, n);
5805 /* Set the desired upper bound. */
5806 if (sym->as->upper[n])
5807 {
5808 /* We know what we want the upper bound to be. */
5809 if (!INTEGER_CST_P (ubound))
5810 {
5811 gfc_init_se (&se, NULL);
5812 gfc_conv_expr_type (&se, sym->as->upper[n],
5813 gfc_array_index_type);
5814 gfc_add_block_to_block (&init, &se.pre);
5815 gfc_add_modify (&init, ubound, se.expr);
5816 }
5817
5818 /* Check the sizes match. */
5819 if (checkparm)
5820 {
5821 /* Check (ubound(a) - lbound(a) == ubound(b) - lbound(b)). */
5822 char * msg;
5823 tree temp;
5824
5825 temp = fold_build2_loc (input_location, MINUS_EXPR,
5826 gfc_array_index_type, ubound, lbound);
5827 temp = fold_build2_loc (input_location, PLUS_EXPR,
5828 gfc_array_index_type,
5829 gfc_index_one_node, temp);
5830 stride2 = fold_build2_loc (input_location, MINUS_EXPR,
5831 gfc_array_index_type, dubound,
5832 dlbound);
5833 stride2 = fold_build2_loc (input_location, PLUS_EXPR,
5834 gfc_array_index_type,
5835 gfc_index_one_node, stride2);
5836 tmp = fold_build2_loc (input_location, NE_EXPR,
5837 gfc_array_index_type, temp, stride2);
5838 asprintf (&msg, "Dimension %d of array '%s' has extent "
5839 "%%ld instead of %%ld", n+1, sym->name);
5840
5841 gfc_trans_runtime_check (true, false, tmp, &init, &loc, msg,
5842 fold_convert (long_integer_type_node, temp),
5843 fold_convert (long_integer_type_node, stride2));
5844
5845 free (msg);
5846 }
5847 }
5848 else
5849 {
5850 /* For assumed shape arrays move the upper bound by the same amount
5851 as the lower bound. */
5852 tmp = fold_build2_loc (input_location, MINUS_EXPR,
5853 gfc_array_index_type, dubound, dlbound);
5854 tmp = fold_build2_loc (input_location, PLUS_EXPR,
5855 gfc_array_index_type, tmp, lbound);
5856 gfc_add_modify (&init, ubound, tmp);
5857 }
5858 /* The offset of this dimension. offset = offset - lbound * stride. */
5859 tmp = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
5860 lbound, stride);
5861 offset = fold_build2_loc (input_location, MINUS_EXPR,
5862 gfc_array_index_type, offset, tmp);
5863
5864 /* The size of this dimension, and the stride of the next. */
5865 if (n + 1 < sym->as->rank)
5866 {
5867 stride = GFC_TYPE_ARRAY_STRIDE (type, n + 1);
5868
5869 if (no_repack || partial != NULL_TREE)
5870 stmt_unpacked =
5871 gfc_conv_descriptor_stride_get (dumdesc, gfc_rank_cst[n+1]);
5872
5873 /* Figure out the stride if not a known constant. */
5874 if (!INTEGER_CST_P (stride))
5875 {
5876 if (no_repack)
5877 stmt_packed = NULL_TREE;
5878 else
5879 {
5880 /* Calculate stride = size * (ubound + 1 - lbound). */
5881 tmp = fold_build2_loc (input_location, MINUS_EXPR,
5882 gfc_array_index_type,
5883 gfc_index_one_node, lbound);
5884 tmp = fold_build2_loc (input_location, PLUS_EXPR,
5885 gfc_array_index_type, ubound, tmp);
5886 size = fold_build2_loc (input_location, MULT_EXPR,
5887 gfc_array_index_type, size, tmp);
5888 stmt_packed = size;
5889 }
5890
5891 /* Assign the stride. */
5892 if (stmt_packed != NULL_TREE && stmt_unpacked != NULL_TREE)
5893 tmp = fold_build3_loc (input_location, COND_EXPR,
5894 gfc_array_index_type, partial,
5895 stmt_unpacked, stmt_packed);
5896 else
5897 tmp = (stmt_packed != NULL_TREE) ? stmt_packed : stmt_unpacked;
5898 gfc_add_modify (&init, stride, tmp);
5899 }
5900 }
5901 else
5902 {
5903 stride = GFC_TYPE_ARRAY_SIZE (type);
5904
5905 if (stride && !INTEGER_CST_P (stride))
5906 {
5907 /* Calculate size = stride * (ubound + 1 - lbound). */
5908 tmp = fold_build2_loc (input_location, MINUS_EXPR,
5909 gfc_array_index_type,
5910 gfc_index_one_node, lbound);
5911 tmp = fold_build2_loc (input_location, PLUS_EXPR,
5912 gfc_array_index_type,
5913 ubound, tmp);
5914 tmp = fold_build2_loc (input_location, MULT_EXPR,
5915 gfc_array_index_type,
5916 GFC_TYPE_ARRAY_STRIDE (type, n), tmp);
5917 gfc_add_modify (&init, stride, tmp);
5918 }
5919 }
5920 }
5921
5922 gfc_trans_array_cobounds (type, &init, sym);
5923
5924 /* Set the offset. */
5925 if (TREE_CODE (GFC_TYPE_ARRAY_OFFSET (type)) == VAR_DECL)
5926 gfc_add_modify (&init, GFC_TYPE_ARRAY_OFFSET (type), offset);
5927
5928 gfc_trans_vla_type_sizes (sym, &init);
5929
5930 stmtInit = gfc_finish_block (&init);
5931
5932 /* Only do the entry/initialization code if the arg is present. */
5933 dumdesc = GFC_DECL_SAVED_DESCRIPTOR (tmpdesc);
5934 optional_arg = (sym->attr.optional
5935 || (sym->ns->proc_name->attr.entry_master
5936 && sym->attr.dummy));
5937 if (optional_arg)
5938 {
5939 tmp = gfc_conv_expr_present (sym);
5940 stmtInit = build3_v (COND_EXPR, tmp, stmtInit,
5941 build_empty_stmt (input_location));
5942 }
5943
5944 /* Cleanup code. */
5945 if (no_repack)
5946 stmtCleanup = NULL_TREE;
5947 else
5948 {
5949 stmtblock_t cleanup;
5950 gfc_start_block (&cleanup);
5951
5952 if (sym->attr.intent != INTENT_IN)
5953 {
5954 /* Copy the data back. */
5955 tmp = build_call_expr_loc (input_location,
5956 gfor_fndecl_in_unpack, 2, dumdesc, tmpdesc);
5957 gfc_add_expr_to_block (&cleanup, tmp);
5958 }
5959
5960 /* Free the temporary. */
5961 tmp = gfc_call_free (tmpdesc);
5962 gfc_add_expr_to_block (&cleanup, tmp);
5963
5964 stmtCleanup = gfc_finish_block (&cleanup);
5965
5966 /* Only do the cleanup if the array was repacked. */
5967 tmp = build_fold_indirect_ref_loc (input_location, dumdesc);
5968 tmp = gfc_conv_descriptor_data_get (tmp);
5969 tmp = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
5970 tmp, tmpdesc);
5971 stmtCleanup = build3_v (COND_EXPR, tmp, stmtCleanup,
5972 build_empty_stmt (input_location));
5973
5974 if (optional_arg)
5975 {
5976 tmp = gfc_conv_expr_present (sym);
5977 stmtCleanup = build3_v (COND_EXPR, tmp, stmtCleanup,
5978 build_empty_stmt (input_location));
5979 }
5980 }
5981
5982 /* We don't need to free any memory allocated by internal_pack as it will
5983 be freed at the end of the function by pop_context. */
5984 gfc_add_init_cleanup (block, stmtInit, stmtCleanup);
5985
5986 gfc_restore_backend_locus (&loc);
5987 }
5988
5989
5990 /* Calculate the overall offset, including subreferences. */
5991 static void
5992 gfc_get_dataptr_offset (stmtblock_t *block, tree parm, tree desc, tree offset,
5993 bool subref, gfc_expr *expr)
5994 {
5995 tree tmp;
5996 tree field;
5997 tree stride;
5998 tree index;
5999 gfc_ref *ref;
6000 gfc_se start;
6001 int n;
6002
6003 /* If offset is NULL and this is not a subreferenced array, there is
6004 nothing to do. */
6005 if (offset == NULL_TREE)
6006 {
6007 if (subref)
6008 offset = gfc_index_zero_node;
6009 else
6010 return;
6011 }
6012
6013 tmp = gfc_conv_array_data (desc);
6014 tmp = build_fold_indirect_ref_loc (input_location,
6015 tmp);
6016 tmp = gfc_build_array_ref (tmp, offset, NULL);
6017
6018 /* Offset the data pointer for pointer assignments from arrays with
6019 subreferences; e.g. my_integer => my_type(:)%integer_component. */
6020 if (subref)
6021 {
6022 /* Go past the array reference. */
6023 for (ref = expr->ref; ref; ref = ref->next)
6024 if (ref->type == REF_ARRAY &&
6025 ref->u.ar.type != AR_ELEMENT)
6026 {
6027 ref = ref->next;
6028 break;
6029 }
6030
6031 /* Calculate the offset for each subsequent subreference. */
6032 for (; ref; ref = ref->next)
6033 {
6034 switch (ref->type)
6035 {
6036 case REF_COMPONENT:
6037 field = ref->u.c.component->backend_decl;
6038 gcc_assert (field && TREE_CODE (field) == FIELD_DECL);
6039 tmp = fold_build3_loc (input_location, COMPONENT_REF,
6040 TREE_TYPE (field),
6041 tmp, field, NULL_TREE);
6042 break;
6043
6044 case REF_SUBSTRING:
6045 gcc_assert (TREE_CODE (TREE_TYPE (tmp)) == ARRAY_TYPE);
6046 gfc_init_se (&start, NULL);
6047 gfc_conv_expr_type (&start, ref->u.ss.start, gfc_charlen_type_node);
6048 gfc_add_block_to_block (block, &start.pre);
6049 tmp = gfc_build_array_ref (tmp, start.expr, NULL);
6050 break;
6051
6052 case REF_ARRAY:
6053 gcc_assert (TREE_CODE (TREE_TYPE (tmp)) == ARRAY_TYPE
6054 && ref->u.ar.type == AR_ELEMENT);
6055
6056 /* TODO - Add bounds checking. */
6057 stride = gfc_index_one_node;
6058 index = gfc_index_zero_node;
6059 for (n = 0; n < ref->u.ar.dimen; n++)
6060 {
6061 tree itmp;
6062 tree jtmp;
6063
6064 /* Update the index. */
6065 gfc_init_se (&start, NULL);
6066 gfc_conv_expr_type (&start, ref->u.ar.start[n], gfc_array_index_type);
6067 itmp = gfc_evaluate_now (start.expr, block);
6068 gfc_init_se (&start, NULL);
6069 gfc_conv_expr_type (&start, ref->u.ar.as->lower[n], gfc_array_index_type);
6070 jtmp = gfc_evaluate_now (start.expr, block);
6071 itmp = fold_build2_loc (input_location, MINUS_EXPR,
6072 gfc_array_index_type, itmp, jtmp);
6073 itmp = fold_build2_loc (input_location, MULT_EXPR,
6074 gfc_array_index_type, itmp, stride);
6075 index = fold_build2_loc (input_location, PLUS_EXPR,
6076 gfc_array_index_type, itmp, index);
6077 index = gfc_evaluate_now (index, block);
6078
6079 /* Update the stride. */
6080 gfc_init_se (&start, NULL);
6081 gfc_conv_expr_type (&start, ref->u.ar.as->upper[n], gfc_array_index_type);
6082 itmp = fold_build2_loc (input_location, MINUS_EXPR,
6083 gfc_array_index_type, start.expr,
6084 jtmp);
6085 itmp = fold_build2_loc (input_location, PLUS_EXPR,
6086 gfc_array_index_type,
6087 gfc_index_one_node, itmp);
6088 stride = fold_build2_loc (input_location, MULT_EXPR,
6089 gfc_array_index_type, stride, itmp);
6090 stride = gfc_evaluate_now (stride, block);
6091 }
6092
6093 /* Apply the index to obtain the array element. */
6094 tmp = gfc_build_array_ref (tmp, index, NULL);
6095 break;
6096
6097 default:
6098 gcc_unreachable ();
6099 break;
6100 }
6101 }
6102 }
6103
6104 /* Set the target data pointer. */
6105 offset = gfc_build_addr_expr (gfc_array_dataptr_type (desc), tmp);
6106 gfc_conv_descriptor_data_set (block, parm, offset);
6107 }
6108
6109
6110 /* gfc_conv_expr_descriptor needs the string length an expression
6111 so that the size of the temporary can be obtained. This is done
6112 by adding up the string lengths of all the elements in the
6113 expression. Function with non-constant expressions have their
6114 string lengths mapped onto the actual arguments using the
6115 interface mapping machinery in trans-expr.c. */
6116 static void
6117 get_array_charlen (gfc_expr *expr, gfc_se *se)
6118 {
6119 gfc_interface_mapping mapping;
6120 gfc_formal_arglist *formal;
6121 gfc_actual_arglist *arg;
6122 gfc_se tse;
6123
6124 if (expr->ts.u.cl->length
6125 && gfc_is_constant_expr (expr->ts.u.cl->length))
6126 {
6127 if (!expr->ts.u.cl->backend_decl)
6128 gfc_conv_string_length (expr->ts.u.cl, expr, &se->pre);
6129 return;
6130 }
6131
6132 switch (expr->expr_type)
6133 {
6134 case EXPR_OP:
6135 get_array_charlen (expr->value.op.op1, se);
6136
6137 /* For parentheses the expression ts.u.cl is identical. */
6138 if (expr->value.op.op == INTRINSIC_PARENTHESES)
6139 return;
6140
6141 expr->ts.u.cl->backend_decl =
6142 gfc_create_var (gfc_charlen_type_node, "sln");
6143
6144 if (expr->value.op.op2)
6145 {
6146 get_array_charlen (expr->value.op.op2, se);
6147
6148 gcc_assert (expr->value.op.op == INTRINSIC_CONCAT);
6149
6150 /* Add the string lengths and assign them to the expression
6151 string length backend declaration. */
6152 gfc_add_modify (&se->pre, expr->ts.u.cl->backend_decl,
6153 fold_build2_loc (input_location, PLUS_EXPR,
6154 gfc_charlen_type_node,
6155 expr->value.op.op1->ts.u.cl->backend_decl,
6156 expr->value.op.op2->ts.u.cl->backend_decl));
6157 }
6158 else
6159 gfc_add_modify (&se->pre, expr->ts.u.cl->backend_decl,
6160 expr->value.op.op1->ts.u.cl->backend_decl);
6161 break;
6162
6163 case EXPR_FUNCTION:
6164 if (expr->value.function.esym == NULL
6165 || expr->ts.u.cl->length->expr_type == EXPR_CONSTANT)
6166 {
6167 gfc_conv_string_length (expr->ts.u.cl, expr, &se->pre);
6168 break;
6169 }
6170
6171 /* Map expressions involving the dummy arguments onto the actual
6172 argument expressions. */
6173 gfc_init_interface_mapping (&mapping);
6174 formal = expr->symtree->n.sym->formal;
6175 arg = expr->value.function.actual;
6176
6177 /* Set se = NULL in the calls to the interface mapping, to suppress any
6178 backend stuff. */
6179 for (; arg != NULL; arg = arg->next, formal = formal ? formal->next : NULL)
6180 {
6181 if (!arg->expr)
6182 continue;
6183 if (formal->sym)
6184 gfc_add_interface_mapping (&mapping, formal->sym, NULL, arg->expr);
6185 }
6186
6187 gfc_init_se (&tse, NULL);
6188
6189 /* Build the expression for the character length and convert it. */
6190 gfc_apply_interface_mapping (&mapping, &tse, expr->ts.u.cl->length);
6191
6192 gfc_add_block_to_block (&se->pre, &tse.pre);
6193 gfc_add_block_to_block (&se->post, &tse.post);
6194 tse.expr = fold_convert (gfc_charlen_type_node, tse.expr);
6195 tse.expr = fold_build2_loc (input_location, MAX_EXPR,
6196 gfc_charlen_type_node, tse.expr,
6197 build_int_cst (gfc_charlen_type_node, 0));
6198 expr->ts.u.cl->backend_decl = tse.expr;
6199 gfc_free_interface_mapping (&mapping);
6200 break;
6201
6202 default:
6203 gfc_conv_string_length (expr->ts.u.cl, expr, &se->pre);
6204 break;
6205 }
6206 }
6207
6208
6209 /* Helper function to check dimensions. */
6210 static bool
6211 transposed_dims (gfc_ss *ss)
6212 {
6213 int n;
6214
6215 for (n = 0; n < ss->dimen; n++)
6216 if (ss->dim[n] != n)
6217 return true;
6218 return false;
6219 }
6220
6221 /* Convert an array for passing as an actual argument. Expressions and
6222 vector subscripts are evaluated and stored in a temporary, which is then
6223 passed. For whole arrays the descriptor is passed. For array sections
6224 a modified copy of the descriptor is passed, but using the original data.
6225
6226 This function is also used for array pointer assignments, and there
6227 are three cases:
6228
6229 - se->want_pointer && !se->direct_byref
6230 EXPR is an actual argument. On exit, se->expr contains a
6231 pointer to the array descriptor.
6232
6233 - !se->want_pointer && !se->direct_byref
6234 EXPR is an actual argument to an intrinsic function or the
6235 left-hand side of a pointer assignment. On exit, se->expr
6236 contains the descriptor for EXPR.
6237
6238 - !se->want_pointer && se->direct_byref
6239 EXPR is the right-hand side of a pointer assignment and
6240 se->expr is the descriptor for the previously-evaluated
6241 left-hand side. The function creates an assignment from
6242 EXPR to se->expr.
6243
6244
6245 The se->force_tmp flag disables the non-copying descriptor optimization
6246 that is used for transpose. It may be used in cases where there is an
6247 alias between the transpose argument and another argument in the same
6248 function call. */
6249
6250 void
6251 gfc_conv_expr_descriptor (gfc_se * se, gfc_expr * expr, gfc_ss * ss)
6252 {
6253 gfc_ss_type ss_type;
6254 gfc_ss_info *ss_info;
6255 gfc_loopinfo loop;
6256 gfc_array_info *info;
6257 int need_tmp;
6258 int n;
6259 tree tmp;
6260 tree desc;
6261 stmtblock_t block;
6262 tree start;
6263 tree offset;
6264 int full;
6265 bool subref_array_target = false;
6266 gfc_expr *arg, *ss_expr;
6267
6268 gcc_assert (ss != NULL);
6269 gcc_assert (ss != gfc_ss_terminator);
6270
6271 ss_info = ss->info;
6272 ss_type = ss_info->type;
6273 ss_expr = ss_info->expr;
6274
6275 /* Special case things we know we can pass easily. */
6276 switch (expr->expr_type)
6277 {
6278 case EXPR_VARIABLE:
6279 /* If we have a linear array section, we can pass it directly.
6280 Otherwise we need to copy it into a temporary. */
6281
6282 gcc_assert (ss_type == GFC_SS_SECTION);
6283 gcc_assert (ss_expr == expr);
6284 info = &ss_info->data.array;
6285
6286 /* Get the descriptor for the array. */
6287 gfc_conv_ss_descriptor (&se->pre, ss, 0);
6288 desc = info->descriptor;
6289
6290 subref_array_target = se->direct_byref && is_subref_array (expr);
6291 need_tmp = gfc_ref_needs_temporary_p (expr->ref)
6292 && !subref_array_target;
6293
6294 if (se->force_tmp)
6295 need_tmp = 1;
6296
6297 if (need_tmp)
6298 full = 0;
6299 else if (GFC_ARRAY_TYPE_P (TREE_TYPE (desc)))
6300 {
6301 /* Create a new descriptor if the array doesn't have one. */
6302 full = 0;
6303 }
6304 else if (info->ref->u.ar.type == AR_FULL)
6305 full = 1;
6306 else if (se->direct_byref)
6307 full = 0;
6308 else
6309 full = gfc_full_array_ref_p (info->ref, NULL);
6310
6311 if (full && !transposed_dims (ss))
6312 {
6313 if (se->direct_byref && !se->byref_noassign)
6314 {
6315 /* Copy the descriptor for pointer assignments. */
6316 gfc_add_modify (&se->pre, se->expr, desc);
6317
6318 /* Add any offsets from subreferences. */
6319 gfc_get_dataptr_offset (&se->pre, se->expr, desc, NULL_TREE,
6320 subref_array_target, expr);
6321 }
6322 else if (se->want_pointer)
6323 {
6324 /* We pass full arrays directly. This means that pointers and
6325 allocatable arrays should also work. */
6326 se->expr = gfc_build_addr_expr (NULL_TREE, desc);
6327 }
6328 else
6329 {
6330 se->expr = desc;
6331 }
6332
6333 if (expr->ts.type == BT_CHARACTER)
6334 se->string_length = gfc_get_expr_charlen (expr);
6335
6336 return;
6337 }
6338 break;
6339
6340 case EXPR_FUNCTION:
6341
6342 /* We don't need to copy data in some cases. */
6343 arg = gfc_get_noncopying_intrinsic_argument (expr);
6344 if (arg)
6345 {
6346 /* This is a call to transpose... */
6347 gcc_assert (expr->value.function.isym->id == GFC_ISYM_TRANSPOSE);
6348 /* ... which has already been handled by the scalarizer, so
6349 that we just need to get its argument's descriptor. */
6350 gfc_conv_expr_descriptor (se, expr->value.function.actual->expr, ss);
6351 return;
6352 }
6353
6354 /* A transformational function return value will be a temporary
6355 array descriptor. We still need to go through the scalarizer
6356 to create the descriptor. Elemental functions ar handled as
6357 arbitrary expressions, i.e. copy to a temporary. */
6358
6359 if (se->direct_byref)
6360 {
6361 gcc_assert (ss_type == GFC_SS_FUNCTION && ss_expr == expr);
6362
6363 /* For pointer assignments pass the descriptor directly. */
6364 if (se->ss == NULL)
6365 se->ss = ss;
6366 else
6367 gcc_assert (se->ss == ss);
6368 se->expr = gfc_build_addr_expr (NULL_TREE, se->expr);
6369 gfc_conv_expr (se, expr);
6370 return;
6371 }
6372
6373 if (ss_expr != expr || ss_type != GFC_SS_FUNCTION)
6374 {
6375 if (ss_expr != expr)
6376 /* Elemental function. */
6377 gcc_assert ((expr->value.function.esym != NULL
6378 && expr->value.function.esym->attr.elemental)
6379 || (expr->value.function.isym != NULL
6380 && expr->value.function.isym->elemental)
6381 || gfc_inline_intrinsic_function_p (expr));
6382 else
6383 gcc_assert (ss_type == GFC_SS_INTRINSIC);
6384
6385 need_tmp = 1;
6386 if (expr->ts.type == BT_CHARACTER
6387 && expr->ts.u.cl->length->expr_type != EXPR_CONSTANT)
6388 get_array_charlen (expr, se);
6389
6390 info = NULL;
6391 }
6392 else
6393 {
6394 /* Transformational function. */
6395 info = &ss_info->data.array;
6396 need_tmp = 0;
6397 }
6398 break;
6399
6400 case EXPR_ARRAY:
6401 /* Constant array constructors don't need a temporary. */
6402 if (ss_type == GFC_SS_CONSTRUCTOR
6403 && expr->ts.type != BT_CHARACTER
6404 && gfc_constant_array_constructor_p (expr->value.constructor))
6405 {
6406 need_tmp = 0;
6407 info = &ss_info->data.array;
6408 }
6409 else
6410 {
6411 need_tmp = 1;
6412 info = NULL;
6413 }
6414 break;
6415
6416 default:
6417 /* Something complicated. Copy it into a temporary. */
6418 need_tmp = 1;
6419 info = NULL;
6420 break;
6421 }
6422
6423 /* If we are creating a temporary, we don't need to bother about aliases
6424 anymore. */
6425 if (need_tmp)
6426 se->force_tmp = 0;
6427
6428 gfc_init_loopinfo (&loop);
6429
6430 /* Associate the SS with the loop. */
6431 gfc_add_ss_to_loop (&loop, ss);
6432
6433 /* Tell the scalarizer not to bother creating loop variables, etc. */
6434 if (!need_tmp)
6435 loop.array_parameter = 1;
6436 else
6437 /* The right-hand side of a pointer assignment mustn't use a temporary. */
6438 gcc_assert (!se->direct_byref);
6439
6440 /* Setup the scalarizing loops and bounds. */
6441 gfc_conv_ss_startstride (&loop);
6442
6443 if (need_tmp)
6444 {
6445 if (expr->ts.type == BT_CHARACTER && !expr->ts.u.cl->backend_decl)
6446 get_array_charlen (expr, se);
6447
6448 /* Tell the scalarizer to make a temporary. */
6449 loop.temp_ss = gfc_get_temp_ss (gfc_typenode_for_spec (&expr->ts),
6450 ((expr->ts.type == BT_CHARACTER)
6451 ? expr->ts.u.cl->backend_decl
6452 : NULL),
6453 loop.dimen);
6454
6455 se->string_length = loop.temp_ss->info->string_length;
6456 gcc_assert (loop.temp_ss->dimen == loop.dimen);
6457 gfc_add_ss_to_loop (&loop, loop.temp_ss);
6458 }
6459
6460 gfc_conv_loop_setup (&loop, & expr->where);
6461
6462 if (need_tmp)
6463 {
6464 /* Copy into a temporary and pass that. We don't need to copy the data
6465 back because expressions and vector subscripts must be INTENT_IN. */
6466 /* TODO: Optimize passing function return values. */
6467 gfc_se lse;
6468 gfc_se rse;
6469
6470 /* Start the copying loops. */
6471 gfc_mark_ss_chain_used (loop.temp_ss, 1);
6472 gfc_mark_ss_chain_used (ss, 1);
6473 gfc_start_scalarized_body (&loop, &block);
6474
6475 /* Copy each data element. */
6476 gfc_init_se (&lse, NULL);
6477 gfc_copy_loopinfo_to_se (&lse, &loop);
6478 gfc_init_se (&rse, NULL);
6479 gfc_copy_loopinfo_to_se (&rse, &loop);
6480
6481 lse.ss = loop.temp_ss;
6482 rse.ss = ss;
6483
6484 gfc_conv_scalarized_array_ref (&lse, NULL);
6485 if (expr->ts.type == BT_CHARACTER)
6486 {
6487 gfc_conv_expr (&rse, expr);
6488 if (POINTER_TYPE_P (TREE_TYPE (rse.expr)))
6489 rse.expr = build_fold_indirect_ref_loc (input_location,
6490 rse.expr);
6491 }
6492 else
6493 gfc_conv_expr_val (&rse, expr);
6494
6495 gfc_add_block_to_block (&block, &rse.pre);
6496 gfc_add_block_to_block (&block, &lse.pre);
6497
6498 lse.string_length = rse.string_length;
6499 tmp = gfc_trans_scalar_assign (&lse, &rse, expr->ts, true,
6500 expr->expr_type == EXPR_VARIABLE
6501 || expr->expr_type == EXPR_ARRAY, true);
6502 gfc_add_expr_to_block (&block, tmp);
6503
6504 /* Finish the copying loops. */
6505 gfc_trans_scalarizing_loops (&loop, &block);
6506
6507 desc = loop.temp_ss->info->data.array.descriptor;
6508 }
6509 else if (expr->expr_type == EXPR_FUNCTION && !transposed_dims (ss))
6510 {
6511 desc = info->descriptor;
6512 se->string_length = ss_info->string_length;
6513 }
6514 else
6515 {
6516 /* We pass sections without copying to a temporary. Make a new
6517 descriptor and point it at the section we want. The loop variable
6518 limits will be the limits of the section.
6519 A function may decide to repack the array to speed up access, but
6520 we're not bothered about that here. */
6521 int dim, ndim, codim;
6522 tree parm;
6523 tree parmtype;
6524 tree stride;
6525 tree from;
6526 tree to;
6527 tree base;
6528
6529 ndim = info->ref ? info->ref->u.ar.dimen : ss->dimen;
6530
6531 if (se->want_coarray)
6532 {
6533 gfc_array_ref *ar = &info->ref->u.ar;
6534
6535 codim = gfc_get_corank (expr);
6536 for (n = 0; n < codim - 1; n++)
6537 {
6538 /* Make sure we are not lost somehow. */
6539 gcc_assert (ar->dimen_type[n + ndim] == DIMEN_THIS_IMAGE);
6540
6541 /* Make sure the call to gfc_conv_section_startstride won't
6542 generate unnecessary code to calculate stride. */
6543 gcc_assert (ar->stride[n + ndim] == NULL);
6544
6545 gfc_conv_section_startstride (&loop, ss, n + ndim);
6546 loop.from[n + loop.dimen] = info->start[n + ndim];
6547 loop.to[n + loop.dimen] = info->end[n + ndim];
6548 }
6549
6550 gcc_assert (n == codim - 1);
6551 evaluate_bound (&loop.pre, info->start, ar->start,
6552 info->descriptor, n + ndim, true);
6553 loop.from[n + loop.dimen] = info->start[n + ndim];
6554 }
6555 else
6556 codim = 0;
6557
6558 /* Set the string_length for a character array. */
6559 if (expr->ts.type == BT_CHARACTER)
6560 se->string_length = gfc_get_expr_charlen (expr);
6561
6562 desc = info->descriptor;
6563 if (se->direct_byref && !se->byref_noassign)
6564 {
6565 /* For pointer assignments we fill in the destination. */
6566 parm = se->expr;
6567 parmtype = TREE_TYPE (parm);
6568 }
6569 else
6570 {
6571 /* Otherwise make a new one. */
6572 parmtype = gfc_get_element_type (TREE_TYPE (desc));
6573 parmtype = gfc_get_array_type_bounds (parmtype, loop.dimen, codim,
6574 loop.from, loop.to, 0,
6575 GFC_ARRAY_UNKNOWN, false);
6576 parm = gfc_create_var (parmtype, "parm");
6577 }
6578
6579 offset = gfc_index_zero_node;
6580
6581 /* The following can be somewhat confusing. We have two
6582 descriptors, a new one and the original array.
6583 {parm, parmtype, dim} refer to the new one.
6584 {desc, type, n, loop} refer to the original, which maybe
6585 a descriptorless array.
6586 The bounds of the scalarization are the bounds of the section.
6587 We don't have to worry about numeric overflows when calculating
6588 the offsets because all elements are within the array data. */
6589
6590 /* Set the dtype. */
6591 tmp = gfc_conv_descriptor_dtype (parm);
6592 gfc_add_modify (&loop.pre, tmp, gfc_get_dtype (parmtype));
6593
6594 /* Set offset for assignments to pointer only to zero if it is not
6595 the full array. */
6596 if (se->direct_byref
6597 && info->ref && info->ref->u.ar.type != AR_FULL)
6598 base = gfc_index_zero_node;
6599 else if (GFC_ARRAY_TYPE_P (TREE_TYPE (desc)))
6600 base = gfc_evaluate_now (gfc_conv_array_offset (desc), &loop.pre);
6601 else
6602 base = NULL_TREE;
6603
6604 for (n = 0; n < ndim; n++)
6605 {
6606 stride = gfc_conv_array_stride (desc, n);
6607
6608 /* Work out the offset. */
6609 if (info->ref
6610 && info->ref->u.ar.dimen_type[n] == DIMEN_ELEMENT)
6611 {
6612 gcc_assert (info->subscript[n]
6613 && info->subscript[n]->info->type == GFC_SS_SCALAR);
6614 start = info->subscript[n]->info->data.scalar.value;
6615 }
6616 else
6617 {
6618 /* Evaluate and remember the start of the section. */
6619 start = info->start[n];
6620 stride = gfc_evaluate_now (stride, &loop.pre);
6621 }
6622
6623 tmp = gfc_conv_array_lbound (desc, n);
6624 tmp = fold_build2_loc (input_location, MINUS_EXPR, TREE_TYPE (tmp),
6625 start, tmp);
6626 tmp = fold_build2_loc (input_location, MULT_EXPR, TREE_TYPE (tmp),
6627 tmp, stride);
6628 offset = fold_build2_loc (input_location, PLUS_EXPR, TREE_TYPE (tmp),
6629 offset, tmp);
6630
6631 if (info->ref
6632 && info->ref->u.ar.dimen_type[n] == DIMEN_ELEMENT)
6633 {
6634 /* For elemental dimensions, we only need the offset. */
6635 continue;
6636 }
6637
6638 /* Vector subscripts need copying and are handled elsewhere. */
6639 if (info->ref)
6640 gcc_assert (info->ref->u.ar.dimen_type[n] == DIMEN_RANGE);
6641
6642 /* look for the corresponding scalarizer dimension: dim. */
6643 for (dim = 0; dim < ndim; dim++)
6644 if (ss->dim[dim] == n)
6645 break;
6646
6647 /* loop exited early: the DIM being looked for has been found. */
6648 gcc_assert (dim < ndim);
6649
6650 /* Set the new lower bound. */
6651 from = loop.from[dim];
6652 to = loop.to[dim];
6653
6654 /* If we have an array section or are assigning make sure that
6655 the lower bound is 1. References to the full
6656 array should otherwise keep the original bounds. */
6657 if ((!info->ref
6658 || info->ref->u.ar.type != AR_FULL)
6659 && !integer_onep (from))
6660 {
6661 tmp = fold_build2_loc (input_location, MINUS_EXPR,
6662 gfc_array_index_type, gfc_index_one_node,
6663 from);
6664 to = fold_build2_loc (input_location, PLUS_EXPR,
6665 gfc_array_index_type, to, tmp);
6666 from = gfc_index_one_node;
6667 }
6668 gfc_conv_descriptor_lbound_set (&loop.pre, parm,
6669 gfc_rank_cst[dim], from);
6670
6671 /* Set the new upper bound. */
6672 gfc_conv_descriptor_ubound_set (&loop.pre, parm,
6673 gfc_rank_cst[dim], to);
6674
6675 /* Multiply the stride by the section stride to get the
6676 total stride. */
6677 stride = fold_build2_loc (input_location, MULT_EXPR,
6678 gfc_array_index_type,
6679 stride, info->stride[n]);
6680
6681 if (se->direct_byref
6682 && info->ref
6683 && info->ref->u.ar.type != AR_FULL)
6684 {
6685 base = fold_build2_loc (input_location, MINUS_EXPR,
6686 TREE_TYPE (base), base, stride);
6687 }
6688 else if (GFC_ARRAY_TYPE_P (TREE_TYPE (desc)))
6689 {
6690 tmp = gfc_conv_array_lbound (desc, n);
6691 tmp = fold_build2_loc (input_location, MINUS_EXPR,
6692 TREE_TYPE (base), tmp, loop.from[dim]);
6693 tmp = fold_build2_loc (input_location, MULT_EXPR,
6694 TREE_TYPE (base), tmp,
6695 gfc_conv_array_stride (desc, n));
6696 base = fold_build2_loc (input_location, PLUS_EXPR,
6697 TREE_TYPE (base), tmp, base);
6698 }
6699
6700 /* Store the new stride. */
6701 gfc_conv_descriptor_stride_set (&loop.pre, parm,
6702 gfc_rank_cst[dim], stride);
6703 }
6704
6705 for (n = loop.dimen; n < loop.dimen + codim; n++)
6706 {
6707 from = loop.from[n];
6708 to = loop.to[n];
6709 gfc_conv_descriptor_lbound_set (&loop.pre, parm,
6710 gfc_rank_cst[n], from);
6711 if (n < loop.dimen + codim - 1)
6712 gfc_conv_descriptor_ubound_set (&loop.pre, parm,
6713 gfc_rank_cst[n], to);
6714 }
6715
6716 if (se->data_not_needed)
6717 gfc_conv_descriptor_data_set (&loop.pre, parm,
6718 gfc_index_zero_node);
6719 else
6720 /* Point the data pointer at the 1st element in the section. */
6721 gfc_get_dataptr_offset (&loop.pre, parm, desc, offset,
6722 subref_array_target, expr);
6723
6724 if ((se->direct_byref || GFC_ARRAY_TYPE_P (TREE_TYPE (desc)))
6725 && !se->data_not_needed)
6726 {
6727 /* Set the offset. */
6728 gfc_conv_descriptor_offset_set (&loop.pre, parm, base);
6729 }
6730 else
6731 {
6732 /* Only the callee knows what the correct offset it, so just set
6733 it to zero here. */
6734 gfc_conv_descriptor_offset_set (&loop.pre, parm, gfc_index_zero_node);
6735 }
6736 desc = parm;
6737 }
6738
6739 if (!se->direct_byref || se->byref_noassign)
6740 {
6741 /* Get a pointer to the new descriptor. */
6742 if (se->want_pointer)
6743 se->expr = gfc_build_addr_expr (NULL_TREE, desc);
6744 else
6745 se->expr = desc;
6746 }
6747
6748 gfc_add_block_to_block (&se->pre, &loop.pre);
6749 gfc_add_block_to_block (&se->post, &loop.post);
6750
6751 /* Cleanup the scalarizer. */
6752 gfc_cleanup_loop (&loop);
6753 }
6754
6755 /* Helper function for gfc_conv_array_parameter if array size needs to be
6756 computed. */
6757
6758 static void
6759 array_parameter_size (tree desc, gfc_expr *expr, tree *size)
6760 {
6761 tree elem;
6762 if (GFC_ARRAY_TYPE_P (TREE_TYPE (desc)))
6763 *size = GFC_TYPE_ARRAY_SIZE (TREE_TYPE (desc));
6764 else if (expr->rank > 1)
6765 *size = build_call_expr_loc (input_location,
6766 gfor_fndecl_size0, 1,
6767 gfc_build_addr_expr (NULL, desc));
6768 else
6769 {
6770 tree ubound = gfc_conv_descriptor_ubound_get (desc, gfc_index_zero_node);
6771 tree lbound = gfc_conv_descriptor_lbound_get (desc, gfc_index_zero_node);
6772
6773 *size = fold_build2_loc (input_location, MINUS_EXPR,
6774 gfc_array_index_type, ubound, lbound);
6775 *size = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
6776 *size, gfc_index_one_node);
6777 *size = fold_build2_loc (input_location, MAX_EXPR, gfc_array_index_type,
6778 *size, gfc_index_zero_node);
6779 }
6780 elem = TYPE_SIZE_UNIT (gfc_get_element_type (TREE_TYPE (desc)));
6781 *size = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
6782 *size, fold_convert (gfc_array_index_type, elem));
6783 }
6784
6785 /* Convert an array for passing as an actual parameter. */
6786 /* TODO: Optimize passing g77 arrays. */
6787
6788 void
6789 gfc_conv_array_parameter (gfc_se * se, gfc_expr * expr, gfc_ss * ss, bool g77,
6790 const gfc_symbol *fsym, const char *proc_name,
6791 tree *size)
6792 {
6793 tree ptr;
6794 tree desc;
6795 tree tmp = NULL_TREE;
6796 tree stmt;
6797 tree parent = DECL_CONTEXT (current_function_decl);
6798 bool full_array_var;
6799 bool this_array_result;
6800 bool contiguous;
6801 bool no_pack;
6802 bool array_constructor;
6803 bool good_allocatable;
6804 bool ultimate_ptr_comp;
6805 bool ultimate_alloc_comp;
6806 gfc_symbol *sym;
6807 stmtblock_t block;
6808 gfc_ref *ref;
6809
6810 ultimate_ptr_comp = false;
6811 ultimate_alloc_comp = false;
6812
6813 for (ref = expr->ref; ref; ref = ref->next)
6814 {
6815 if (ref->next == NULL)
6816 break;
6817
6818 if (ref->type == REF_COMPONENT)
6819 {
6820 ultimate_ptr_comp = ref->u.c.component->attr.pointer;
6821 ultimate_alloc_comp = ref->u.c.component->attr.allocatable;
6822 }
6823 }
6824
6825 full_array_var = false;
6826 contiguous = false;
6827
6828 if (expr->expr_type == EXPR_VARIABLE && ref && !ultimate_ptr_comp)
6829 full_array_var = gfc_full_array_ref_p (ref, &contiguous);
6830
6831 sym = full_array_var ? expr->symtree->n.sym : NULL;
6832
6833 /* The symbol should have an array specification. */
6834 gcc_assert (!sym || sym->as || ref->u.ar.as);
6835
6836 if (expr->expr_type == EXPR_ARRAY && expr->ts.type == BT_CHARACTER)
6837 {
6838 get_array_ctor_strlen (&se->pre, expr->value.constructor, &tmp);
6839 expr->ts.u.cl->backend_decl = tmp;
6840 se->string_length = tmp;
6841 }
6842
6843 /* Is this the result of the enclosing procedure? */
6844 this_array_result = (full_array_var && sym->attr.flavor == FL_PROCEDURE);
6845 if (this_array_result
6846 && (sym->backend_decl != current_function_decl)
6847 && (sym->backend_decl != parent))
6848 this_array_result = false;
6849
6850 /* Passing address of the array if it is not pointer or assumed-shape. */
6851 if (full_array_var && g77 && !this_array_result)
6852 {
6853 tmp = gfc_get_symbol_decl (sym);
6854
6855 if (sym->ts.type == BT_CHARACTER)
6856 se->string_length = sym->ts.u.cl->backend_decl;
6857
6858 if (sym->ts.type == BT_DERIVED || sym->ts.type == BT_CLASS)
6859 {
6860 gfc_conv_expr_descriptor (se, expr, ss);
6861 se->expr = gfc_conv_array_data (se->expr);
6862 return;
6863 }
6864
6865 if (!sym->attr.pointer
6866 && sym->as
6867 && sym->as->type != AS_ASSUMED_SHAPE
6868 && !sym->attr.allocatable)
6869 {
6870 /* Some variables are declared directly, others are declared as
6871 pointers and allocated on the heap. */
6872 if (sym->attr.dummy || POINTER_TYPE_P (TREE_TYPE (tmp)))
6873 se->expr = tmp;
6874 else
6875 se->expr = gfc_build_addr_expr (NULL_TREE, tmp);
6876 if (size)
6877 array_parameter_size (tmp, expr, size);
6878 return;
6879 }
6880
6881 if (sym->attr.allocatable)
6882 {
6883 if (sym->attr.dummy || sym->attr.result)
6884 {
6885 gfc_conv_expr_descriptor (se, expr, ss);
6886 tmp = se->expr;
6887 }
6888 if (size)
6889 array_parameter_size (tmp, expr, size);
6890 se->expr = gfc_conv_array_data (tmp);
6891 return;
6892 }
6893 }
6894
6895 /* A convenient reduction in scope. */
6896 contiguous = g77 && !this_array_result && contiguous;
6897
6898 /* There is no need to pack and unpack the array, if it is contiguous
6899 and not a deferred- or assumed-shape array, or if it is simply
6900 contiguous. */
6901 no_pack = ((sym && sym->as
6902 && !sym->attr.pointer
6903 && sym->as->type != AS_DEFERRED
6904 && sym->as->type != AS_ASSUMED_SHAPE)
6905 ||
6906 (ref && ref->u.ar.as
6907 && ref->u.ar.as->type != AS_DEFERRED
6908 && ref->u.ar.as->type != AS_ASSUMED_SHAPE)
6909 ||
6910 gfc_is_simply_contiguous (expr, false));
6911
6912 no_pack = contiguous && no_pack;
6913
6914 /* Array constructors are always contiguous and do not need packing. */
6915 array_constructor = g77 && !this_array_result && expr->expr_type == EXPR_ARRAY;
6916
6917 /* Same is true of contiguous sections from allocatable variables. */
6918 good_allocatable = contiguous
6919 && expr->symtree
6920 && expr->symtree->n.sym->attr.allocatable;
6921
6922 /* Or ultimate allocatable components. */
6923 ultimate_alloc_comp = contiguous && ultimate_alloc_comp;
6924
6925 if (no_pack || array_constructor || good_allocatable || ultimate_alloc_comp)
6926 {
6927 gfc_conv_expr_descriptor (se, expr, ss);
6928 if (expr->ts.type == BT_CHARACTER)
6929 se->string_length = expr->ts.u.cl->backend_decl;
6930 if (size)
6931 array_parameter_size (se->expr, expr, size);
6932 se->expr = gfc_conv_array_data (se->expr);
6933 return;
6934 }
6935
6936 if (this_array_result)
6937 {
6938 /* Result of the enclosing function. */
6939 gfc_conv_expr_descriptor (se, expr, ss);
6940 if (size)
6941 array_parameter_size (se->expr, expr, size);
6942 se->expr = gfc_build_addr_expr (NULL_TREE, se->expr);
6943
6944 if (g77 && TREE_TYPE (TREE_TYPE (se->expr)) != NULL_TREE
6945 && GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (TREE_TYPE (se->expr))))
6946 se->expr = gfc_conv_array_data (build_fold_indirect_ref_loc (input_location,
6947 se->expr));
6948
6949 return;
6950 }
6951 else
6952 {
6953 /* Every other type of array. */
6954 se->want_pointer = 1;
6955 gfc_conv_expr_descriptor (se, expr, ss);
6956 if (size)
6957 array_parameter_size (build_fold_indirect_ref_loc (input_location,
6958 se->expr),
6959 expr, size);
6960 }
6961
6962 /* Deallocate the allocatable components of structures that are
6963 not variable. */
6964 if ((expr->ts.type == BT_DERIVED || expr->ts.type == BT_CLASS)
6965 && expr->ts.u.derived->attr.alloc_comp
6966 && expr->expr_type != EXPR_VARIABLE)
6967 {
6968 tmp = build_fold_indirect_ref_loc (input_location, se->expr);
6969 tmp = gfc_deallocate_alloc_comp (expr->ts.u.derived, tmp, expr->rank);
6970
6971 /* The components shall be deallocated before their containing entity. */
6972 gfc_prepend_expr_to_block (&se->post, tmp);
6973 }
6974
6975 if (g77 || (fsym && fsym->attr.contiguous
6976 && !gfc_is_simply_contiguous (expr, false)))
6977 {
6978 tree origptr = NULL_TREE;
6979
6980 desc = se->expr;
6981
6982 /* For contiguous arrays, save the original value of the descriptor. */
6983 if (!g77)
6984 {
6985 origptr = gfc_create_var (pvoid_type_node, "origptr");
6986 tmp = build_fold_indirect_ref_loc (input_location, desc);
6987 tmp = gfc_conv_array_data (tmp);
6988 tmp = fold_build2_loc (input_location, MODIFY_EXPR,
6989 TREE_TYPE (origptr), origptr,
6990 fold_convert (TREE_TYPE (origptr), tmp));
6991 gfc_add_expr_to_block (&se->pre, tmp);
6992 }
6993
6994 /* Repack the array. */
6995 if (gfc_option.warn_array_temp)
6996 {
6997 if (fsym)
6998 gfc_warning ("Creating array temporary at %L for argument '%s'",
6999 &expr->where, fsym->name);
7000 else
7001 gfc_warning ("Creating array temporary at %L", &expr->where);
7002 }
7003
7004 ptr = build_call_expr_loc (input_location,
7005 gfor_fndecl_in_pack, 1, desc);
7006
7007 if (fsym && fsym->attr.optional && sym && sym->attr.optional)
7008 {
7009 tmp = gfc_conv_expr_present (sym);
7010 ptr = build3_loc (input_location, COND_EXPR, TREE_TYPE (se->expr),
7011 tmp, fold_convert (TREE_TYPE (se->expr), ptr),
7012 fold_convert (TREE_TYPE (se->expr), null_pointer_node));
7013 }
7014
7015 ptr = gfc_evaluate_now (ptr, &se->pre);
7016
7017 /* Use the packed data for the actual argument, except for contiguous arrays,
7018 where the descriptor's data component is set. */
7019 if (g77)
7020 se->expr = ptr;
7021 else
7022 {
7023 tmp = build_fold_indirect_ref_loc (input_location, desc);
7024 gfc_conv_descriptor_data_set (&se->pre, tmp, ptr);
7025 }
7026
7027 if (gfc_option.rtcheck & GFC_RTCHECK_ARRAY_TEMPS)
7028 {
7029 char * msg;
7030
7031 if (fsym && proc_name)
7032 asprintf (&msg, "An array temporary was created for argument "
7033 "'%s' of procedure '%s'", fsym->name, proc_name);
7034 else
7035 asprintf (&msg, "An array temporary was created");
7036
7037 tmp = build_fold_indirect_ref_loc (input_location,
7038 desc);
7039 tmp = gfc_conv_array_data (tmp);
7040 tmp = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
7041 fold_convert (TREE_TYPE (tmp), ptr), tmp);
7042
7043 if (fsym && fsym->attr.optional && sym && sym->attr.optional)
7044 tmp = fold_build2_loc (input_location, TRUTH_AND_EXPR,
7045 boolean_type_node,
7046 gfc_conv_expr_present (sym), tmp);
7047
7048 gfc_trans_runtime_check (false, true, tmp, &se->pre,
7049 &expr->where, msg);
7050 free (msg);
7051 }
7052
7053 gfc_start_block (&block);
7054
7055 /* Copy the data back. */
7056 if (fsym == NULL || fsym->attr.intent != INTENT_IN)
7057 {
7058 tmp = build_call_expr_loc (input_location,
7059 gfor_fndecl_in_unpack, 2, desc, ptr);
7060 gfc_add_expr_to_block (&block, tmp);
7061 }
7062
7063 /* Free the temporary. */
7064 tmp = gfc_call_free (convert (pvoid_type_node, ptr));
7065 gfc_add_expr_to_block (&block, tmp);
7066
7067 stmt = gfc_finish_block (&block);
7068
7069 gfc_init_block (&block);
7070 /* Only if it was repacked. This code needs to be executed before the
7071 loop cleanup code. */
7072 tmp = build_fold_indirect_ref_loc (input_location,
7073 desc);
7074 tmp = gfc_conv_array_data (tmp);
7075 tmp = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
7076 fold_convert (TREE_TYPE (tmp), ptr), tmp);
7077
7078 if (fsym && fsym->attr.optional && sym && sym->attr.optional)
7079 tmp = fold_build2_loc (input_location, TRUTH_AND_EXPR,
7080 boolean_type_node,
7081 gfc_conv_expr_present (sym), tmp);
7082
7083 tmp = build3_v (COND_EXPR, tmp, stmt, build_empty_stmt (input_location));
7084
7085 gfc_add_expr_to_block (&block, tmp);
7086 gfc_add_block_to_block (&block, &se->post);
7087
7088 gfc_init_block (&se->post);
7089
7090 /* Reset the descriptor pointer. */
7091 if (!g77)
7092 {
7093 tmp = build_fold_indirect_ref_loc (input_location, desc);
7094 gfc_conv_descriptor_data_set (&se->post, tmp, origptr);
7095 }
7096
7097 gfc_add_block_to_block (&se->post, &block);
7098 }
7099 }
7100
7101
7102 /* Generate code to deallocate an array, if it is allocated. */
7103
7104 tree
7105 gfc_trans_dealloc_allocated (tree descriptor, bool coarray)
7106 {
7107 tree tmp;
7108 tree var;
7109 stmtblock_t block;
7110
7111 gfc_start_block (&block);
7112
7113 var = gfc_conv_descriptor_data_get (descriptor);
7114 STRIP_NOPS (var);
7115
7116 /* Call array_deallocate with an int * present in the second argument.
7117 Although it is ignored here, it's presence ensures that arrays that
7118 are already deallocated are ignored. */
7119 tmp = gfc_deallocate_with_status (coarray ? descriptor : var, NULL_TREE,
7120 NULL_TREE, NULL_TREE, NULL_TREE, true,
7121 NULL, coarray);
7122 gfc_add_expr_to_block (&block, tmp);
7123
7124 /* Zero the data pointer. */
7125 tmp = fold_build2_loc (input_location, MODIFY_EXPR, void_type_node,
7126 var, build_int_cst (TREE_TYPE (var), 0));
7127 gfc_add_expr_to_block (&block, tmp);
7128
7129 return gfc_finish_block (&block);
7130 }
7131
7132
7133 /* This helper function calculates the size in words of a full array. */
7134
7135 static tree
7136 get_full_array_size (stmtblock_t *block, tree decl, int rank)
7137 {
7138 tree idx;
7139 tree nelems;
7140 tree tmp;
7141 idx = gfc_rank_cst[rank - 1];
7142 nelems = gfc_conv_descriptor_ubound_get (decl, idx);
7143 tmp = gfc_conv_descriptor_lbound_get (decl, idx);
7144 tmp = fold_build2_loc (input_location, MINUS_EXPR, gfc_array_index_type,
7145 nelems, tmp);
7146 tmp = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
7147 tmp, gfc_index_one_node);
7148 tmp = gfc_evaluate_now (tmp, block);
7149
7150 nelems = gfc_conv_descriptor_stride_get (decl, idx);
7151 tmp = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
7152 nelems, tmp);
7153 return gfc_evaluate_now (tmp, block);
7154 }
7155
7156
7157 /* Allocate dest to the same size as src, and copy src -> dest.
7158 If no_malloc is set, only the copy is done. */
7159
7160 static tree
7161 duplicate_allocatable (tree dest, tree src, tree type, int rank,
7162 bool no_malloc)
7163 {
7164 tree tmp;
7165 tree size;
7166 tree nelems;
7167 tree null_cond;
7168 tree null_data;
7169 stmtblock_t block;
7170
7171 /* If the source is null, set the destination to null. Then,
7172 allocate memory to the destination. */
7173 gfc_init_block (&block);
7174
7175 if (rank == 0)
7176 {
7177 tmp = null_pointer_node;
7178 tmp = fold_build2_loc (input_location, MODIFY_EXPR, type, dest, tmp);
7179 gfc_add_expr_to_block (&block, tmp);
7180 null_data = gfc_finish_block (&block);
7181
7182 gfc_init_block (&block);
7183 size = TYPE_SIZE_UNIT (TREE_TYPE (type));
7184 if (!no_malloc)
7185 {
7186 tmp = gfc_call_malloc (&block, type, size);
7187 tmp = fold_build2_loc (input_location, MODIFY_EXPR, void_type_node,
7188 dest, fold_convert (type, tmp));
7189 gfc_add_expr_to_block (&block, tmp);
7190 }
7191
7192 tmp = builtin_decl_explicit (BUILT_IN_MEMCPY);
7193 tmp = build_call_expr_loc (input_location, tmp, 3,
7194 dest, src, size);
7195 }
7196 else
7197 {
7198 gfc_conv_descriptor_data_set (&block, dest, null_pointer_node);
7199 null_data = gfc_finish_block (&block);
7200
7201 gfc_init_block (&block);
7202 nelems = get_full_array_size (&block, src, rank);
7203 tmp = fold_convert (gfc_array_index_type,
7204 TYPE_SIZE_UNIT (gfc_get_element_type (type)));
7205 size = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
7206 nelems, tmp);
7207 if (!no_malloc)
7208 {
7209 tmp = TREE_TYPE (gfc_conv_descriptor_data_get (src));
7210 tmp = gfc_call_malloc (&block, tmp, size);
7211 gfc_conv_descriptor_data_set (&block, dest, tmp);
7212 }
7213
7214 /* We know the temporary and the value will be the same length,
7215 so can use memcpy. */
7216 tmp = builtin_decl_explicit (BUILT_IN_MEMCPY);
7217 tmp = build_call_expr_loc (input_location,
7218 tmp, 3, gfc_conv_descriptor_data_get (dest),
7219 gfc_conv_descriptor_data_get (src), size);
7220 }
7221
7222 gfc_add_expr_to_block (&block, tmp);
7223 tmp = gfc_finish_block (&block);
7224
7225 /* Null the destination if the source is null; otherwise do
7226 the allocate and copy. */
7227 if (rank == 0)
7228 null_cond = src;
7229 else
7230 null_cond = gfc_conv_descriptor_data_get (src);
7231
7232 null_cond = convert (pvoid_type_node, null_cond);
7233 null_cond = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
7234 null_cond, null_pointer_node);
7235 return build3_v (COND_EXPR, null_cond, tmp, null_data);
7236 }
7237
7238
7239 /* Allocate dest to the same size as src, and copy data src -> dest. */
7240
7241 tree
7242 gfc_duplicate_allocatable (tree dest, tree src, tree type, int rank)
7243 {
7244 return duplicate_allocatable (dest, src, type, rank, false);
7245 }
7246
7247
7248 /* Copy data src -> dest. */
7249
7250 tree
7251 gfc_copy_allocatable_data (tree dest, tree src, tree type, int rank)
7252 {
7253 return duplicate_allocatable (dest, src, type, rank, true);
7254 }
7255
7256
7257 /* Recursively traverse an object of derived type, generating code to
7258 deallocate, nullify or copy allocatable components. This is the work horse
7259 function for the functions named in this enum. */
7260
7261 enum {DEALLOCATE_ALLOC_COMP = 1, NULLIFY_ALLOC_COMP, COPY_ALLOC_COMP,
7262 COPY_ONLY_ALLOC_COMP};
7263
7264 static tree
7265 structure_alloc_comps (gfc_symbol * der_type, tree decl,
7266 tree dest, int rank, int purpose)
7267 {
7268 gfc_component *c;
7269 gfc_loopinfo loop;
7270 stmtblock_t fnblock;
7271 stmtblock_t loopbody;
7272 stmtblock_t tmpblock;
7273 tree decl_type;
7274 tree tmp;
7275 tree comp;
7276 tree dcmp;
7277 tree nelems;
7278 tree index;
7279 tree var;
7280 tree cdecl;
7281 tree ctype;
7282 tree vref, dref;
7283 tree null_cond = NULL_TREE;
7284 bool called_dealloc_with_status;
7285
7286 gfc_init_block (&fnblock);
7287
7288 decl_type = TREE_TYPE (decl);
7289
7290 if ((POINTER_TYPE_P (decl_type) && rank != 0)
7291 || (TREE_CODE (decl_type) == REFERENCE_TYPE && rank == 0))
7292
7293 decl = build_fold_indirect_ref_loc (input_location,
7294 decl);
7295
7296 /* Just in case in gets dereferenced. */
7297 decl_type = TREE_TYPE (decl);
7298
7299 /* If this an array of derived types with allocatable components
7300 build a loop and recursively call this function. */
7301 if (TREE_CODE (decl_type) == ARRAY_TYPE
7302 || GFC_DESCRIPTOR_TYPE_P (decl_type))
7303 {
7304 tmp = gfc_conv_array_data (decl);
7305 var = build_fold_indirect_ref_loc (input_location,
7306 tmp);
7307
7308 /* Get the number of elements - 1 and set the counter. */
7309 if (GFC_DESCRIPTOR_TYPE_P (decl_type))
7310 {
7311 /* Use the descriptor for an allocatable array. Since this
7312 is a full array reference, we only need the descriptor
7313 information from dimension = rank. */
7314 tmp = get_full_array_size (&fnblock, decl, rank);
7315 tmp = fold_build2_loc (input_location, MINUS_EXPR,
7316 gfc_array_index_type, tmp,
7317 gfc_index_one_node);
7318
7319 null_cond = gfc_conv_descriptor_data_get (decl);
7320 null_cond = fold_build2_loc (input_location, NE_EXPR,
7321 boolean_type_node, null_cond,
7322 build_int_cst (TREE_TYPE (null_cond), 0));
7323 }
7324 else
7325 {
7326 /* Otherwise use the TYPE_DOMAIN information. */
7327 tmp = array_type_nelts (decl_type);
7328 tmp = fold_convert (gfc_array_index_type, tmp);
7329 }
7330
7331 /* Remember that this is, in fact, the no. of elements - 1. */
7332 nelems = gfc_evaluate_now (tmp, &fnblock);
7333 index = gfc_create_var (gfc_array_index_type, "S");
7334
7335 /* Build the body of the loop. */
7336 gfc_init_block (&loopbody);
7337
7338 vref = gfc_build_array_ref (var, index, NULL);
7339
7340 if (purpose == COPY_ALLOC_COMP)
7341 {
7342 if (GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (dest)))
7343 {
7344 tmp = gfc_duplicate_allocatable (dest, decl, decl_type, rank);
7345 gfc_add_expr_to_block (&fnblock, tmp);
7346 }
7347 tmp = build_fold_indirect_ref_loc (input_location,
7348 gfc_conv_array_data (dest));
7349 dref = gfc_build_array_ref (tmp, index, NULL);
7350 tmp = structure_alloc_comps (der_type, vref, dref, rank, purpose);
7351 }
7352 else if (purpose == COPY_ONLY_ALLOC_COMP)
7353 {
7354 tmp = build_fold_indirect_ref_loc (input_location,
7355 gfc_conv_array_data (dest));
7356 dref = gfc_build_array_ref (tmp, index, NULL);
7357 tmp = structure_alloc_comps (der_type, vref, dref, rank,
7358 COPY_ALLOC_COMP);
7359 }
7360 else
7361 tmp = structure_alloc_comps (der_type, vref, NULL_TREE, rank, purpose);
7362
7363 gfc_add_expr_to_block (&loopbody, tmp);
7364
7365 /* Build the loop and return. */
7366 gfc_init_loopinfo (&loop);
7367 loop.dimen = 1;
7368 loop.from[0] = gfc_index_zero_node;
7369 loop.loopvar[0] = index;
7370 loop.to[0] = nelems;
7371 gfc_trans_scalarizing_loops (&loop, &loopbody);
7372 gfc_add_block_to_block (&fnblock, &loop.pre);
7373
7374 tmp = gfc_finish_block (&fnblock);
7375 if (null_cond != NULL_TREE)
7376 tmp = build3_v (COND_EXPR, null_cond, tmp,
7377 build_empty_stmt (input_location));
7378
7379 return tmp;
7380 }
7381
7382 /* Otherwise, act on the components or recursively call self to
7383 act on a chain of components. */
7384 for (c = der_type->components; c; c = c->next)
7385 {
7386 bool cmp_has_alloc_comps = (c->ts.type == BT_DERIVED
7387 || c->ts.type == BT_CLASS)
7388 && c->ts.u.derived->attr.alloc_comp;
7389 cdecl = c->backend_decl;
7390 ctype = TREE_TYPE (cdecl);
7391
7392 switch (purpose)
7393 {
7394 case DEALLOCATE_ALLOC_COMP:
7395
7396 /* gfc_deallocate_scalar_with_status calls gfc_deallocate_alloc_comp
7397 (ie. this function) so generate all the calls and suppress the
7398 recursion from here, if necessary. */
7399 called_dealloc_with_status = false;
7400 gfc_init_block (&tmpblock);
7401
7402 if (c->attr.allocatable
7403 && (c->attr.dimension || c->attr.codimension))
7404 {
7405 comp = fold_build3_loc (input_location, COMPONENT_REF, ctype,
7406 decl, cdecl, NULL_TREE);
7407 tmp = gfc_trans_dealloc_allocated (comp, c->attr.codimension);
7408 gfc_add_expr_to_block (&tmpblock, tmp);
7409 }
7410 else if (c->attr.allocatable)
7411 {
7412 /* Allocatable scalar components. */
7413 comp = fold_build3_loc (input_location, COMPONENT_REF, ctype,
7414 decl, cdecl, NULL_TREE);
7415
7416 tmp = gfc_deallocate_scalar_with_status (comp, NULL, true, NULL,
7417 c->ts);
7418 gfc_add_expr_to_block (&tmpblock, tmp);
7419 called_dealloc_with_status = true;
7420
7421 tmp = fold_build2_loc (input_location, MODIFY_EXPR,
7422 void_type_node, comp,
7423 build_int_cst (TREE_TYPE (comp), 0));
7424 gfc_add_expr_to_block (&tmpblock, tmp);
7425 }
7426 else if (c->ts.type == BT_CLASS && CLASS_DATA (c)->attr.allocatable)
7427 {
7428 /* Allocatable CLASS components. */
7429 comp = fold_build3_loc (input_location, COMPONENT_REF, ctype,
7430 decl, cdecl, NULL_TREE);
7431
7432 /* Add reference to '_data' component. */
7433 tmp = CLASS_DATA (c)->backend_decl;
7434 comp = fold_build3_loc (input_location, COMPONENT_REF,
7435 TREE_TYPE (tmp), comp, tmp, NULL_TREE);
7436
7437 if (GFC_DESCRIPTOR_TYPE_P(TREE_TYPE (comp)))
7438 tmp = gfc_trans_dealloc_allocated (comp,
7439 CLASS_DATA (c)->attr.codimension);
7440 else
7441 {
7442 tmp = gfc_deallocate_scalar_with_status (comp, NULL, true, NULL,
7443 CLASS_DATA (c)->ts);
7444 gfc_add_expr_to_block (&tmpblock, tmp);
7445 called_dealloc_with_status = true;
7446
7447 tmp = fold_build2_loc (input_location, MODIFY_EXPR,
7448 void_type_node, comp,
7449 build_int_cst (TREE_TYPE (comp), 0));
7450 }
7451 gfc_add_expr_to_block (&tmpblock, tmp);
7452 }
7453
7454 if (cmp_has_alloc_comps
7455 && !c->attr.pointer
7456 && !called_dealloc_with_status)
7457 {
7458 /* Do not deallocate the components of ultimate pointer
7459 components or iteratively call self if call has been made
7460 to gfc_trans_dealloc_allocated */
7461 comp = fold_build3_loc (input_location, COMPONENT_REF, ctype,
7462 decl, cdecl, NULL_TREE);
7463 rank = c->as ? c->as->rank : 0;
7464 tmp = structure_alloc_comps (c->ts.u.derived, comp, NULL_TREE,
7465 rank, purpose);
7466 gfc_add_expr_to_block (&fnblock, tmp);
7467 }
7468
7469 /* Now add the deallocation of this component. */
7470 gfc_add_block_to_block (&fnblock, &tmpblock);
7471 break;
7472
7473 case NULLIFY_ALLOC_COMP:
7474 if (c->attr.pointer)
7475 continue;
7476 else if (c->attr.allocatable
7477 && (c->attr.dimension|| c->attr.codimension))
7478 {
7479 comp = fold_build3_loc (input_location, COMPONENT_REF, ctype,
7480 decl, cdecl, NULL_TREE);
7481 gfc_conv_descriptor_data_set (&fnblock, comp, null_pointer_node);
7482 }
7483 else if (c->attr.allocatable)
7484 {
7485 /* Allocatable scalar components. */
7486 comp = fold_build3_loc (input_location, COMPONENT_REF, ctype,
7487 decl, cdecl, NULL_TREE);
7488 tmp = fold_build2_loc (input_location, MODIFY_EXPR,
7489 void_type_node, comp,
7490 build_int_cst (TREE_TYPE (comp), 0));
7491 gfc_add_expr_to_block (&fnblock, tmp);
7492 }
7493 else if (c->ts.type == BT_CLASS && CLASS_DATA (c)->attr.allocatable)
7494 {
7495 /* Allocatable CLASS components. */
7496 comp = fold_build3_loc (input_location, COMPONENT_REF, ctype,
7497 decl, cdecl, NULL_TREE);
7498 /* Add reference to '_data' component. */
7499 tmp = CLASS_DATA (c)->backend_decl;
7500 comp = fold_build3_loc (input_location, COMPONENT_REF,
7501 TREE_TYPE (tmp), comp, tmp, NULL_TREE);
7502 if (GFC_DESCRIPTOR_TYPE_P(TREE_TYPE (comp)))
7503 gfc_conv_descriptor_data_set (&fnblock, comp, null_pointer_node);
7504 else
7505 {
7506 tmp = fold_build2_loc (input_location, MODIFY_EXPR,
7507 void_type_node, comp,
7508 build_int_cst (TREE_TYPE (comp), 0));
7509 gfc_add_expr_to_block (&fnblock, tmp);
7510 }
7511 }
7512 else if (cmp_has_alloc_comps)
7513 {
7514 comp = fold_build3_loc (input_location, COMPONENT_REF, ctype,
7515 decl, cdecl, NULL_TREE);
7516 rank = c->as ? c->as->rank : 0;
7517 tmp = structure_alloc_comps (c->ts.u.derived, comp, NULL_TREE,
7518 rank, purpose);
7519 gfc_add_expr_to_block (&fnblock, tmp);
7520 }
7521 break;
7522
7523 case COPY_ALLOC_COMP:
7524 if (c->attr.pointer)
7525 continue;
7526
7527 /* We need source and destination components. */
7528 comp = fold_build3_loc (input_location, COMPONENT_REF, ctype, decl,
7529 cdecl, NULL_TREE);
7530 dcmp = fold_build3_loc (input_location, COMPONENT_REF, ctype, dest,
7531 cdecl, NULL_TREE);
7532 dcmp = fold_convert (TREE_TYPE (comp), dcmp);
7533
7534 if (c->ts.type == BT_CLASS && CLASS_DATA (c)->attr.allocatable)
7535 {
7536 tree ftn_tree;
7537 tree size;
7538 tree dst_data;
7539 tree src_data;
7540 tree null_data;
7541
7542 dst_data = gfc_class_data_get (dcmp);
7543 src_data = gfc_class_data_get (comp);
7544 size = fold_convert (size_type_node, gfc_vtable_size_get (comp));
7545
7546 if (CLASS_DATA (c)->attr.dimension)
7547 {
7548 nelems = gfc_conv_descriptor_size (src_data,
7549 CLASS_DATA (c)->as->rank);
7550 src_data = gfc_conv_descriptor_data_get (src_data);
7551 dst_data = gfc_conv_descriptor_data_get (dst_data);
7552 }
7553 else
7554 nelems = build_int_cst (size_type_node, 1);
7555
7556 gfc_init_block (&tmpblock);
7557
7558 /* We need to use CALLOC as _copy might try to free allocatable
7559 components of the destination. */
7560 ftn_tree = builtin_decl_explicit (BUILT_IN_CALLOC);
7561 tmp = build_call_expr_loc (input_location, ftn_tree, 2, nelems,
7562 size);
7563 gfc_add_modify (&tmpblock, dst_data,
7564 fold_convert (TREE_TYPE (dst_data), tmp));
7565
7566 tmp = gfc_copy_class_to_class (comp, dcmp, nelems);
7567 gfc_add_expr_to_block (&tmpblock, tmp);
7568 tmp = gfc_finish_block (&tmpblock);
7569
7570 gfc_init_block (&tmpblock);
7571 gfc_add_modify (&tmpblock, dst_data,
7572 fold_convert (TREE_TYPE (dst_data),
7573 null_pointer_node));
7574 null_data = gfc_finish_block (&tmpblock);
7575
7576 null_cond = fold_build2_loc (input_location, NE_EXPR,
7577 boolean_type_node, src_data,
7578 null_pointer_node);
7579
7580 gfc_add_expr_to_block (&fnblock, build3_v (COND_EXPR, null_cond,
7581 tmp, null_data));
7582 continue;
7583 }
7584
7585 if (c->attr.allocatable && !cmp_has_alloc_comps)
7586 {
7587 rank = c->as ? c->as->rank : 0;
7588 tmp = gfc_duplicate_allocatable (dcmp, comp, ctype, rank);
7589 gfc_add_expr_to_block (&fnblock, tmp);
7590 }
7591
7592 if (cmp_has_alloc_comps)
7593 {
7594 rank = c->as ? c->as->rank : 0;
7595 tmp = fold_convert (TREE_TYPE (dcmp), comp);
7596 gfc_add_modify (&fnblock, dcmp, tmp);
7597 tmp = structure_alloc_comps (c->ts.u.derived, comp, dcmp,
7598 rank, purpose);
7599 gfc_add_expr_to_block (&fnblock, tmp);
7600 }
7601 break;
7602
7603 default:
7604 gcc_unreachable ();
7605 break;
7606 }
7607 }
7608
7609 return gfc_finish_block (&fnblock);
7610 }
7611
7612 /* Recursively traverse an object of derived type, generating code to
7613 nullify allocatable components. */
7614
7615 tree
7616 gfc_nullify_alloc_comp (gfc_symbol * der_type, tree decl, int rank)
7617 {
7618 return structure_alloc_comps (der_type, decl, NULL_TREE, rank,
7619 NULLIFY_ALLOC_COMP);
7620 }
7621
7622
7623 /* Recursively traverse an object of derived type, generating code to
7624 deallocate allocatable components. */
7625
7626 tree
7627 gfc_deallocate_alloc_comp (gfc_symbol * der_type, tree decl, int rank)
7628 {
7629 return structure_alloc_comps (der_type, decl, NULL_TREE, rank,
7630 DEALLOCATE_ALLOC_COMP);
7631 }
7632
7633
7634 /* Recursively traverse an object of derived type, generating code to
7635 copy it and its allocatable components. */
7636
7637 tree
7638 gfc_copy_alloc_comp (gfc_symbol * der_type, tree decl, tree dest, int rank)
7639 {
7640 return structure_alloc_comps (der_type, decl, dest, rank, COPY_ALLOC_COMP);
7641 }
7642
7643
7644 /* Recursively traverse an object of derived type, generating code to
7645 copy only its allocatable components. */
7646
7647 tree
7648 gfc_copy_only_alloc_comp (gfc_symbol * der_type, tree decl, tree dest, int rank)
7649 {
7650 return structure_alloc_comps (der_type, decl, dest, rank, COPY_ONLY_ALLOC_COMP);
7651 }
7652
7653
7654 /* Returns the value of LBOUND for an expression. This could be broken out
7655 from gfc_conv_intrinsic_bound but this seemed to be simpler. This is
7656 called by gfc_alloc_allocatable_for_assignment. */
7657 static tree
7658 get_std_lbound (gfc_expr *expr, tree desc, int dim, bool assumed_size)
7659 {
7660 tree lbound;
7661 tree ubound;
7662 tree stride;
7663 tree cond, cond1, cond3, cond4;
7664 tree tmp;
7665 gfc_ref *ref;
7666
7667 if (GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (desc)))
7668 {
7669 tmp = gfc_rank_cst[dim];
7670 lbound = gfc_conv_descriptor_lbound_get (desc, tmp);
7671 ubound = gfc_conv_descriptor_ubound_get (desc, tmp);
7672 stride = gfc_conv_descriptor_stride_get (desc, tmp);
7673 cond1 = fold_build2_loc (input_location, GE_EXPR, boolean_type_node,
7674 ubound, lbound);
7675 cond3 = fold_build2_loc (input_location, GE_EXPR, boolean_type_node,
7676 stride, gfc_index_zero_node);
7677 cond3 = fold_build2_loc (input_location, TRUTH_AND_EXPR,
7678 boolean_type_node, cond3, cond1);
7679 cond4 = fold_build2_loc (input_location, LT_EXPR, boolean_type_node,
7680 stride, gfc_index_zero_node);
7681 if (assumed_size)
7682 cond = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node,
7683 tmp, build_int_cst (gfc_array_index_type,
7684 expr->rank - 1));
7685 else
7686 cond = boolean_false_node;
7687
7688 cond1 = fold_build2_loc (input_location, TRUTH_OR_EXPR,
7689 boolean_type_node, cond3, cond4);
7690 cond = fold_build2_loc (input_location, TRUTH_OR_EXPR,
7691 boolean_type_node, cond, cond1);
7692
7693 return fold_build3_loc (input_location, COND_EXPR,
7694 gfc_array_index_type, cond,
7695 lbound, gfc_index_one_node);
7696 }
7697
7698 if (expr->expr_type == EXPR_FUNCTION)
7699 {
7700 /* A conversion function, so use the argument. */
7701 gcc_assert (expr->value.function.isym
7702 && expr->value.function.isym->conversion);
7703 expr = expr->value.function.actual->expr;
7704 }
7705
7706 if (expr->expr_type == EXPR_VARIABLE)
7707 {
7708 tmp = TREE_TYPE (expr->symtree->n.sym->backend_decl);
7709 for (ref = expr->ref; ref; ref = ref->next)
7710 {
7711 if (ref->type == REF_COMPONENT
7712 && ref->u.c.component->as
7713 && ref->next
7714 && ref->next->u.ar.type == AR_FULL)
7715 tmp = TREE_TYPE (ref->u.c.component->backend_decl);
7716 }
7717 return GFC_TYPE_ARRAY_LBOUND(tmp, dim);
7718 }
7719
7720 return gfc_index_one_node;
7721 }
7722
7723
7724 /* Returns true if an expression represents an lhs that can be reallocated
7725 on assignment. */
7726
7727 bool
7728 gfc_is_reallocatable_lhs (gfc_expr *expr)
7729 {
7730 gfc_ref * ref;
7731
7732 if (!expr->ref)
7733 return false;
7734
7735 /* An allocatable variable. */
7736 if (expr->symtree->n.sym->attr.allocatable
7737 && expr->ref
7738 && expr->ref->type == REF_ARRAY
7739 && expr->ref->u.ar.type == AR_FULL)
7740 return true;
7741
7742 /* All that can be left are allocatable components. */
7743 if ((expr->symtree->n.sym->ts.type != BT_DERIVED
7744 && expr->symtree->n.sym->ts.type != BT_CLASS)
7745 || !expr->symtree->n.sym->ts.u.derived->attr.alloc_comp)
7746 return false;
7747
7748 /* Find a component ref followed by an array reference. */
7749 for (ref = expr->ref; ref; ref = ref->next)
7750 if (ref->next
7751 && ref->type == REF_COMPONENT
7752 && ref->next->type == REF_ARRAY
7753 && !ref->next->next)
7754 break;
7755
7756 if (!ref)
7757 return false;
7758
7759 /* Return true if valid reallocatable lhs. */
7760 if (ref->u.c.component->attr.allocatable
7761 && ref->next->u.ar.type == AR_FULL)
7762 return true;
7763
7764 return false;
7765 }
7766
7767
7768 /* Allocate the lhs of an assignment to an allocatable array, otherwise
7769 reallocate it. */
7770
7771 tree
7772 gfc_alloc_allocatable_for_assignment (gfc_loopinfo *loop,
7773 gfc_expr *expr1,
7774 gfc_expr *expr2)
7775 {
7776 stmtblock_t realloc_block;
7777 stmtblock_t alloc_block;
7778 stmtblock_t fblock;
7779 gfc_ss *rss;
7780 gfc_ss *lss;
7781 gfc_array_info *linfo;
7782 tree realloc_expr;
7783 tree alloc_expr;
7784 tree size1;
7785 tree size2;
7786 tree array1;
7787 tree cond;
7788 tree tmp;
7789 tree tmp2;
7790 tree lbound;
7791 tree ubound;
7792 tree desc;
7793 tree desc2;
7794 tree offset;
7795 tree jump_label1;
7796 tree jump_label2;
7797 tree neq_size;
7798 tree lbd;
7799 int n;
7800 int dim;
7801 gfc_array_spec * as;
7802
7803 /* x = f(...) with x allocatable. In this case, expr1 is the rhs.
7804 Find the lhs expression in the loop chain and set expr1 and
7805 expr2 accordingly. */
7806 if (expr1->expr_type == EXPR_FUNCTION && expr2 == NULL)
7807 {
7808 expr2 = expr1;
7809 /* Find the ss for the lhs. */
7810 lss = loop->ss;
7811 for (; lss && lss != gfc_ss_terminator; lss = lss->loop_chain)
7812 if (lss->info->expr && lss->info->expr->expr_type == EXPR_VARIABLE)
7813 break;
7814 if (lss == gfc_ss_terminator)
7815 return NULL_TREE;
7816 expr1 = lss->info->expr;
7817 }
7818
7819 /* Bail out if this is not a valid allocate on assignment. */
7820 if (!gfc_is_reallocatable_lhs (expr1)
7821 || (expr2 && !expr2->rank))
7822 return NULL_TREE;
7823
7824 /* Find the ss for the lhs. */
7825 lss = loop->ss;
7826 for (; lss && lss != gfc_ss_terminator; lss = lss->loop_chain)
7827 if (lss->info->expr == expr1)
7828 break;
7829
7830 if (lss == gfc_ss_terminator)
7831 return NULL_TREE;
7832
7833 linfo = &lss->info->data.array;
7834
7835 /* Find an ss for the rhs. For operator expressions, we see the
7836 ss's for the operands. Any one of these will do. */
7837 rss = loop->ss;
7838 for (; rss && rss != gfc_ss_terminator; rss = rss->loop_chain)
7839 if (rss->info->expr != expr1 && rss != loop->temp_ss)
7840 break;
7841
7842 if (expr2 && rss == gfc_ss_terminator)
7843 return NULL_TREE;
7844
7845 gfc_start_block (&fblock);
7846
7847 /* Since the lhs is allocatable, this must be a descriptor type.
7848 Get the data and array size. */
7849 desc = linfo->descriptor;
7850 gcc_assert (GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (desc)));
7851 array1 = gfc_conv_descriptor_data_get (desc);
7852
7853 /* 7.4.1.3 "If variable is an allocated allocatable variable, it is
7854 deallocated if expr is an array of different shape or any of the
7855 corresponding length type parameter values of variable and expr
7856 differ." This assures F95 compatibility. */
7857 jump_label1 = gfc_build_label_decl (NULL_TREE);
7858 jump_label2 = gfc_build_label_decl (NULL_TREE);
7859
7860 /* Allocate if data is NULL. */
7861 cond = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node,
7862 array1, build_int_cst (TREE_TYPE (array1), 0));
7863 tmp = build3_v (COND_EXPR, cond,
7864 build1_v (GOTO_EXPR, jump_label1),
7865 build_empty_stmt (input_location));
7866 gfc_add_expr_to_block (&fblock, tmp);
7867
7868 /* Get arrayspec if expr is a full array. */
7869 if (expr2 && expr2->expr_type == EXPR_FUNCTION
7870 && expr2->value.function.isym
7871 && expr2->value.function.isym->conversion)
7872 {
7873 /* For conversion functions, take the arg. */
7874 gfc_expr *arg = expr2->value.function.actual->expr;
7875 as = gfc_get_full_arrayspec_from_expr (arg);
7876 }
7877 else if (expr2)
7878 as = gfc_get_full_arrayspec_from_expr (expr2);
7879 else
7880 as = NULL;
7881
7882 /* If the lhs shape is not the same as the rhs jump to setting the
7883 bounds and doing the reallocation....... */
7884 for (n = 0; n < expr1->rank; n++)
7885 {
7886 /* Check the shape. */
7887 lbound = gfc_conv_descriptor_lbound_get (desc, gfc_rank_cst[n]);
7888 ubound = gfc_conv_descriptor_ubound_get (desc, gfc_rank_cst[n]);
7889 tmp = fold_build2_loc (input_location, MINUS_EXPR,
7890 gfc_array_index_type,
7891 loop->to[n], loop->from[n]);
7892 tmp = fold_build2_loc (input_location, PLUS_EXPR,
7893 gfc_array_index_type,
7894 tmp, lbound);
7895 tmp = fold_build2_loc (input_location, MINUS_EXPR,
7896 gfc_array_index_type,
7897 tmp, ubound);
7898 cond = fold_build2_loc (input_location, NE_EXPR,
7899 boolean_type_node,
7900 tmp, gfc_index_zero_node);
7901 tmp = build3_v (COND_EXPR, cond,
7902 build1_v (GOTO_EXPR, jump_label1),
7903 build_empty_stmt (input_location));
7904 gfc_add_expr_to_block (&fblock, tmp);
7905 }
7906
7907 /* ....else jump past the (re)alloc code. */
7908 tmp = build1_v (GOTO_EXPR, jump_label2);
7909 gfc_add_expr_to_block (&fblock, tmp);
7910
7911 /* Add the label to start automatic (re)allocation. */
7912 tmp = build1_v (LABEL_EXPR, jump_label1);
7913 gfc_add_expr_to_block (&fblock, tmp);
7914
7915 size1 = gfc_conv_descriptor_size (desc, expr1->rank);
7916
7917 /* Get the rhs size. Fix both sizes. */
7918 if (expr2)
7919 desc2 = rss->info->data.array.descriptor;
7920 else
7921 desc2 = NULL_TREE;
7922 size2 = gfc_index_one_node;
7923 for (n = 0; n < expr2->rank; n++)
7924 {
7925 tmp = fold_build2_loc (input_location, MINUS_EXPR,
7926 gfc_array_index_type,
7927 loop->to[n], loop->from[n]);
7928 tmp = fold_build2_loc (input_location, PLUS_EXPR,
7929 gfc_array_index_type,
7930 tmp, gfc_index_one_node);
7931 size2 = fold_build2_loc (input_location, MULT_EXPR,
7932 gfc_array_index_type,
7933 tmp, size2);
7934 }
7935
7936 size1 = gfc_evaluate_now (size1, &fblock);
7937 size2 = gfc_evaluate_now (size2, &fblock);
7938
7939 cond = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
7940 size1, size2);
7941 neq_size = gfc_evaluate_now (cond, &fblock);
7942
7943
7944 /* Now modify the lhs descriptor and the associated scalarizer
7945 variables. F2003 7.4.1.3: "If variable is or becomes an
7946 unallocated allocatable variable, then it is allocated with each
7947 deferred type parameter equal to the corresponding type parameters
7948 of expr , with the shape of expr , and with each lower bound equal
7949 to the corresponding element of LBOUND(expr)."
7950 Reuse size1 to keep a dimension-by-dimension track of the
7951 stride of the new array. */
7952 size1 = gfc_index_one_node;
7953 offset = gfc_index_zero_node;
7954
7955 for (n = 0; n < expr2->rank; n++)
7956 {
7957 tmp = fold_build2_loc (input_location, MINUS_EXPR,
7958 gfc_array_index_type,
7959 loop->to[n], loop->from[n]);
7960 tmp = fold_build2_loc (input_location, PLUS_EXPR,
7961 gfc_array_index_type,
7962 tmp, gfc_index_one_node);
7963
7964 lbound = gfc_index_one_node;
7965 ubound = tmp;
7966
7967 if (as)
7968 {
7969 lbd = get_std_lbound (expr2, desc2, n,
7970 as->type == AS_ASSUMED_SIZE);
7971 ubound = fold_build2_loc (input_location,
7972 MINUS_EXPR,
7973 gfc_array_index_type,
7974 ubound, lbound);
7975 ubound = fold_build2_loc (input_location,
7976 PLUS_EXPR,
7977 gfc_array_index_type,
7978 ubound, lbd);
7979 lbound = lbd;
7980 }
7981
7982 gfc_conv_descriptor_lbound_set (&fblock, desc,
7983 gfc_rank_cst[n],
7984 lbound);
7985 gfc_conv_descriptor_ubound_set (&fblock, desc,
7986 gfc_rank_cst[n],
7987 ubound);
7988 gfc_conv_descriptor_stride_set (&fblock, desc,
7989 gfc_rank_cst[n],
7990 size1);
7991 lbound = gfc_conv_descriptor_lbound_get (desc,
7992 gfc_rank_cst[n]);
7993 tmp2 = fold_build2_loc (input_location, MULT_EXPR,
7994 gfc_array_index_type,
7995 lbound, size1);
7996 offset = fold_build2_loc (input_location, MINUS_EXPR,
7997 gfc_array_index_type,
7998 offset, tmp2);
7999 size1 = fold_build2_loc (input_location, MULT_EXPR,
8000 gfc_array_index_type,
8001 tmp, size1);
8002 }
8003
8004 /* Set the lhs descriptor and scalarizer offsets. For rank > 1,
8005 the array offset is saved and the info.offset is used for a
8006 running offset. Use the saved_offset instead. */
8007 tmp = gfc_conv_descriptor_offset (desc);
8008 gfc_add_modify (&fblock, tmp, offset);
8009 if (linfo->saved_offset
8010 && TREE_CODE (linfo->saved_offset) == VAR_DECL)
8011 gfc_add_modify (&fblock, linfo->saved_offset, tmp);
8012
8013 /* Now set the deltas for the lhs. */
8014 for (n = 0; n < expr1->rank; n++)
8015 {
8016 tmp = gfc_conv_descriptor_lbound_get (desc, gfc_rank_cst[n]);
8017 dim = lss->dim[n];
8018 tmp = fold_build2_loc (input_location, MINUS_EXPR,
8019 gfc_array_index_type, tmp,
8020 loop->from[dim]);
8021 if (linfo->delta[dim]
8022 && TREE_CODE (linfo->delta[dim]) == VAR_DECL)
8023 gfc_add_modify (&fblock, linfo->delta[dim], tmp);
8024 }
8025
8026 /* Get the new lhs size in bytes. */
8027 if (expr1->ts.type == BT_CHARACTER && expr1->ts.deferred)
8028 {
8029 tmp = expr2->ts.u.cl->backend_decl;
8030 gcc_assert (expr1->ts.u.cl->backend_decl);
8031 tmp = fold_convert (TREE_TYPE (expr1->ts.u.cl->backend_decl), tmp);
8032 gfc_add_modify (&fblock, expr1->ts.u.cl->backend_decl, tmp);
8033 }
8034 else if (expr1->ts.type == BT_CHARACTER && expr1->ts.u.cl->backend_decl)
8035 {
8036 tmp = TYPE_SIZE_UNIT (TREE_TYPE (gfc_typenode_for_spec (&expr1->ts)));
8037 tmp = fold_build2_loc (input_location, MULT_EXPR,
8038 gfc_array_index_type, tmp,
8039 expr1->ts.u.cl->backend_decl);
8040 }
8041 else
8042 tmp = TYPE_SIZE_UNIT (gfc_typenode_for_spec (&expr1->ts));
8043 tmp = fold_convert (gfc_array_index_type, tmp);
8044 size2 = fold_build2_loc (input_location, MULT_EXPR,
8045 gfc_array_index_type,
8046 tmp, size2);
8047 size2 = fold_convert (size_type_node, size2);
8048 size2 = gfc_evaluate_now (size2, &fblock);
8049
8050 /* Realloc expression. Note that the scalarizer uses desc.data
8051 in the array reference - (*desc.data)[<element>]. */
8052 gfc_init_block (&realloc_block);
8053 tmp = build_call_expr_loc (input_location,
8054 builtin_decl_explicit (BUILT_IN_REALLOC), 2,
8055 fold_convert (pvoid_type_node, array1),
8056 size2);
8057 gfc_conv_descriptor_data_set (&realloc_block,
8058 desc, tmp);
8059 realloc_expr = gfc_finish_block (&realloc_block);
8060
8061 /* Only reallocate if sizes are different. */
8062 tmp = build3_v (COND_EXPR, neq_size, realloc_expr,
8063 build_empty_stmt (input_location));
8064 realloc_expr = tmp;
8065
8066
8067 /* Malloc expression. */
8068 gfc_init_block (&alloc_block);
8069 tmp = build_call_expr_loc (input_location,
8070 builtin_decl_explicit (BUILT_IN_MALLOC),
8071 1, size2);
8072 gfc_conv_descriptor_data_set (&alloc_block,
8073 desc, tmp);
8074 tmp = gfc_conv_descriptor_dtype (desc);
8075 gfc_add_modify (&alloc_block, tmp, gfc_get_dtype (TREE_TYPE (desc)));
8076 alloc_expr = gfc_finish_block (&alloc_block);
8077
8078 /* Malloc if not allocated; realloc otherwise. */
8079 tmp = build_int_cst (TREE_TYPE (array1), 0);
8080 cond = fold_build2_loc (input_location, EQ_EXPR,
8081 boolean_type_node,
8082 array1, tmp);
8083 tmp = build3_v (COND_EXPR, cond, alloc_expr, realloc_expr);
8084 gfc_add_expr_to_block (&fblock, tmp);
8085
8086 /* Make sure that the scalarizer data pointer is updated. */
8087 if (linfo->data
8088 && TREE_CODE (linfo->data) == VAR_DECL)
8089 {
8090 tmp = gfc_conv_descriptor_data_get (desc);
8091 gfc_add_modify (&fblock, linfo->data, tmp);
8092 }
8093
8094 /* Add the exit label. */
8095 tmp = build1_v (LABEL_EXPR, jump_label2);
8096 gfc_add_expr_to_block (&fblock, tmp);
8097
8098 return gfc_finish_block (&fblock);
8099 }
8100
8101
8102 /* NULLIFY an allocatable/pointer array on function entry, free it on exit.
8103 Do likewise, recursively if necessary, with the allocatable components of
8104 derived types. */
8105
8106 void
8107 gfc_trans_deferred_array (gfc_symbol * sym, gfc_wrapped_block * block)
8108 {
8109 tree type;
8110 tree tmp;
8111 tree descriptor;
8112 stmtblock_t init;
8113 stmtblock_t cleanup;
8114 locus loc;
8115 int rank;
8116 bool sym_has_alloc_comp;
8117
8118 sym_has_alloc_comp = (sym->ts.type == BT_DERIVED
8119 || sym->ts.type == BT_CLASS)
8120 && sym->ts.u.derived->attr.alloc_comp;
8121
8122 /* Make sure the frontend gets these right. */
8123 if (!(sym->attr.pointer || sym->attr.allocatable || sym_has_alloc_comp))
8124 fatal_error ("Possible front-end bug: Deferred array size without pointer, "
8125 "allocatable attribute or derived type without allocatable "
8126 "components.");
8127
8128 gfc_save_backend_locus (&loc);
8129 gfc_set_backend_locus (&sym->declared_at);
8130 gfc_init_block (&init);
8131
8132 gcc_assert (TREE_CODE (sym->backend_decl) == VAR_DECL
8133 || TREE_CODE (sym->backend_decl) == PARM_DECL);
8134
8135 if (sym->ts.type == BT_CHARACTER
8136 && !INTEGER_CST_P (sym->ts.u.cl->backend_decl))
8137 {
8138 gfc_conv_string_length (sym->ts.u.cl, NULL, &init);
8139 gfc_trans_vla_type_sizes (sym, &init);
8140 }
8141
8142 /* Dummy, use associated and result variables don't need anything special. */
8143 if (sym->attr.dummy || sym->attr.use_assoc || sym->attr.result)
8144 {
8145 gfc_add_init_cleanup (block, gfc_finish_block (&init), NULL_TREE);
8146 gfc_restore_backend_locus (&loc);
8147 return;
8148 }
8149
8150 descriptor = sym->backend_decl;
8151
8152 /* Although static, derived types with default initializers and
8153 allocatable components must not be nulled wholesale; instead they
8154 are treated component by component. */
8155 if (TREE_STATIC (descriptor) && !sym_has_alloc_comp)
8156 {
8157 /* SAVEd variables are not freed on exit. */
8158 gfc_trans_static_array_pointer (sym);
8159
8160 gfc_add_init_cleanup (block, gfc_finish_block (&init), NULL_TREE);
8161 gfc_restore_backend_locus (&loc);
8162 return;
8163 }
8164
8165 /* Get the descriptor type. */
8166 type = TREE_TYPE (sym->backend_decl);
8167
8168 if (sym_has_alloc_comp && !(sym->attr.pointer || sym->attr.allocatable))
8169 {
8170 if (!sym->attr.save
8171 && !(TREE_STATIC (sym->backend_decl) && sym->attr.is_main_program))
8172 {
8173 if (sym->value == NULL
8174 || !gfc_has_default_initializer (sym->ts.u.derived))
8175 {
8176 rank = sym->as ? sym->as->rank : 0;
8177 tmp = gfc_nullify_alloc_comp (sym->ts.u.derived,
8178 descriptor, rank);
8179 gfc_add_expr_to_block (&init, tmp);
8180 }
8181 else
8182 gfc_init_default_dt (sym, &init, false);
8183 }
8184 }
8185 else if (!GFC_DESCRIPTOR_TYPE_P (type))
8186 {
8187 /* If the backend_decl is not a descriptor, we must have a pointer
8188 to one. */
8189 descriptor = build_fold_indirect_ref_loc (input_location,
8190 sym->backend_decl);
8191 type = TREE_TYPE (descriptor);
8192 }
8193
8194 /* NULLIFY the data pointer. */
8195 if (GFC_DESCRIPTOR_TYPE_P (type) && !sym->attr.save)
8196 gfc_conv_descriptor_data_set (&init, descriptor, null_pointer_node);
8197
8198 gfc_restore_backend_locus (&loc);
8199 gfc_init_block (&cleanup);
8200
8201 /* Allocatable arrays need to be freed when they go out of scope.
8202 The allocatable components of pointers must not be touched. */
8203 if (sym_has_alloc_comp && !(sym->attr.function || sym->attr.result)
8204 && !sym->attr.pointer && !sym->attr.save)
8205 {
8206 int rank;
8207 rank = sym->as ? sym->as->rank : 0;
8208 tmp = gfc_deallocate_alloc_comp (sym->ts.u.derived, descriptor, rank);
8209 gfc_add_expr_to_block (&cleanup, tmp);
8210 }
8211
8212 if (sym->attr.allocatable && (sym->attr.dimension || sym->attr.codimension)
8213 && !sym->attr.save && !sym->attr.result)
8214 {
8215 tmp = gfc_trans_dealloc_allocated (sym->backend_decl,
8216 sym->attr.codimension);
8217 gfc_add_expr_to_block (&cleanup, tmp);
8218 }
8219
8220 gfc_add_init_cleanup (block, gfc_finish_block (&init),
8221 gfc_finish_block (&cleanup));
8222 }
8223
8224 /************ Expression Walking Functions ******************/
8225
8226 /* Walk a variable reference.
8227
8228 Possible extension - multiple component subscripts.
8229 x(:,:) = foo%a(:)%b(:)
8230 Transforms to
8231 forall (i=..., j=...)
8232 x(i,j) = foo%a(j)%b(i)
8233 end forall
8234 This adds a fair amount of complexity because you need to deal with more
8235 than one ref. Maybe handle in a similar manner to vector subscripts.
8236 Maybe not worth the effort. */
8237
8238
8239 static gfc_ss *
8240 gfc_walk_variable_expr (gfc_ss * ss, gfc_expr * expr)
8241 {
8242 gfc_ref *ref;
8243
8244 for (ref = expr->ref; ref; ref = ref->next)
8245 if (ref->type == REF_ARRAY && ref->u.ar.type != AR_ELEMENT)
8246 break;
8247
8248 return gfc_walk_array_ref (ss, expr, ref);
8249 }
8250
8251
8252 gfc_ss *
8253 gfc_walk_array_ref (gfc_ss * ss, gfc_expr * expr, gfc_ref * ref)
8254 {
8255 gfc_array_ref *ar;
8256 gfc_ss *newss;
8257 int n;
8258
8259 for (; ref; ref = ref->next)
8260 {
8261 if (ref->type == REF_SUBSTRING)
8262 {
8263 ss = gfc_get_scalar_ss (ss, ref->u.ss.start);
8264 ss = gfc_get_scalar_ss (ss, ref->u.ss.end);
8265 }
8266
8267 /* We're only interested in array sections from now on. */
8268 if (ref->type != REF_ARRAY)
8269 continue;
8270
8271 ar = &ref->u.ar;
8272
8273 switch (ar->type)
8274 {
8275 case AR_ELEMENT:
8276 for (n = ar->dimen - 1; n >= 0; n--)
8277 ss = gfc_get_scalar_ss (ss, ar->start[n]);
8278 break;
8279
8280 case AR_FULL:
8281 newss = gfc_get_array_ss (ss, expr, ar->as->rank, GFC_SS_SECTION);
8282 newss->info->data.array.ref = ref;
8283
8284 /* Make sure array is the same as array(:,:), this way
8285 we don't need to special case all the time. */
8286 ar->dimen = ar->as->rank;
8287 for (n = 0; n < ar->dimen; n++)
8288 {
8289 ar->dimen_type[n] = DIMEN_RANGE;
8290
8291 gcc_assert (ar->start[n] == NULL);
8292 gcc_assert (ar->end[n] == NULL);
8293 gcc_assert (ar->stride[n] == NULL);
8294 }
8295 ss = newss;
8296 break;
8297
8298 case AR_SECTION:
8299 newss = gfc_get_array_ss (ss, expr, 0, GFC_SS_SECTION);
8300 newss->info->data.array.ref = ref;
8301
8302 /* We add SS chains for all the subscripts in the section. */
8303 for (n = 0; n < ar->dimen; n++)
8304 {
8305 gfc_ss *indexss;
8306
8307 switch (ar->dimen_type[n])
8308 {
8309 case DIMEN_ELEMENT:
8310 /* Add SS for elemental (scalar) subscripts. */
8311 gcc_assert (ar->start[n]);
8312 indexss = gfc_get_scalar_ss (gfc_ss_terminator, ar->start[n]);
8313 indexss->loop_chain = gfc_ss_terminator;
8314 newss->info->data.array.subscript[n] = indexss;
8315 break;
8316
8317 case DIMEN_RANGE:
8318 /* We don't add anything for sections, just remember this
8319 dimension for later. */
8320 newss->dim[newss->dimen] = n;
8321 newss->dimen++;
8322 break;
8323
8324 case DIMEN_VECTOR:
8325 /* Create a GFC_SS_VECTOR index in which we can store
8326 the vector's descriptor. */
8327 indexss = gfc_get_array_ss (gfc_ss_terminator, ar->start[n],
8328 1, GFC_SS_VECTOR);
8329 indexss->loop_chain = gfc_ss_terminator;
8330 newss->info->data.array.subscript[n] = indexss;
8331 newss->dim[newss->dimen] = n;
8332 newss->dimen++;
8333 break;
8334
8335 default:
8336 /* We should know what sort of section it is by now. */
8337 gcc_unreachable ();
8338 }
8339 }
8340 /* We should have at least one non-elemental dimension,
8341 unless we are creating a descriptor for a (scalar) coarray. */
8342 gcc_assert (newss->dimen > 0
8343 || newss->info->data.array.ref->u.ar.as->corank > 0);
8344 ss = newss;
8345 break;
8346
8347 default:
8348 /* We should know what sort of section it is by now. */
8349 gcc_unreachable ();
8350 }
8351
8352 }
8353 return ss;
8354 }
8355
8356
8357 /* Walk an expression operator. If only one operand of a binary expression is
8358 scalar, we must also add the scalar term to the SS chain. */
8359
8360 static gfc_ss *
8361 gfc_walk_op_expr (gfc_ss * ss, gfc_expr * expr)
8362 {
8363 gfc_ss *head;
8364 gfc_ss *head2;
8365
8366 head = gfc_walk_subexpr (ss, expr->value.op.op1);
8367 if (expr->value.op.op2 == NULL)
8368 head2 = head;
8369 else
8370 head2 = gfc_walk_subexpr (head, expr->value.op.op2);
8371
8372 /* All operands are scalar. Pass back and let the caller deal with it. */
8373 if (head2 == ss)
8374 return head2;
8375
8376 /* All operands require scalarization. */
8377 if (head != ss && (expr->value.op.op2 == NULL || head2 != head))
8378 return head2;
8379
8380 /* One of the operands needs scalarization, the other is scalar.
8381 Create a gfc_ss for the scalar expression. */
8382 if (head == ss)
8383 {
8384 /* First operand is scalar. We build the chain in reverse order, so
8385 add the scalar SS after the second operand. */
8386 head = head2;
8387 while (head && head->next != ss)
8388 head = head->next;
8389 /* Check we haven't somehow broken the chain. */
8390 gcc_assert (head);
8391 head->next = gfc_get_scalar_ss (ss, expr->value.op.op1);
8392 }
8393 else /* head2 == head */
8394 {
8395 gcc_assert (head2 == head);
8396 /* Second operand is scalar. */
8397 head2 = gfc_get_scalar_ss (head2, expr->value.op.op2);
8398 }
8399
8400 return head2;
8401 }
8402
8403
8404 /* Reverse a SS chain. */
8405
8406 gfc_ss *
8407 gfc_reverse_ss (gfc_ss * ss)
8408 {
8409 gfc_ss *next;
8410 gfc_ss *head;
8411
8412 gcc_assert (ss != NULL);
8413
8414 head = gfc_ss_terminator;
8415 while (ss != gfc_ss_terminator)
8416 {
8417 next = ss->next;
8418 /* Check we didn't somehow break the chain. */
8419 gcc_assert (next != NULL);
8420 ss->next = head;
8421 head = ss;
8422 ss = next;
8423 }
8424
8425 return (head);
8426 }
8427
8428
8429 /* Given an expression refering to a procedure, return the symbol of its
8430 interface. We can't get the procedure symbol directly as we have to handle
8431 the case of (deferred) type-bound procedures. */
8432
8433 gfc_symbol *
8434 gfc_get_proc_ifc_for_expr (gfc_expr *procedure_ref)
8435 {
8436 gfc_symbol *sym;
8437 gfc_ref *ref;
8438
8439 if (procedure_ref == NULL)
8440 return NULL;
8441
8442 /* Normal procedure case. */
8443 sym = procedure_ref->symtree->n.sym;
8444
8445 /* Typebound procedure case. */
8446 for (ref = procedure_ref->ref; ref; ref = ref->next)
8447 {
8448 if (ref->type == REF_COMPONENT
8449 && ref->u.c.component->attr.proc_pointer)
8450 sym = ref->u.c.component->ts.interface;
8451 else
8452 sym = NULL;
8453 }
8454
8455 return sym;
8456 }
8457
8458
8459 /* Walk the arguments of an elemental function.
8460 PROC_EXPR is used to check whether an argument is permitted to be absent. If
8461 it is NULL, we don't do the check and the argument is assumed to be present.
8462 */
8463
8464 gfc_ss *
8465 gfc_walk_elemental_function_args (gfc_ss * ss, gfc_actual_arglist *arg,
8466 gfc_symbol *proc_ifc, gfc_ss_type type)
8467 {
8468 gfc_formal_arglist *dummy_arg;
8469 int scalar;
8470 gfc_ss *head;
8471 gfc_ss *tail;
8472 gfc_ss *newss;
8473
8474 head = gfc_ss_terminator;
8475 tail = NULL;
8476
8477 if (proc_ifc)
8478 dummy_arg = proc_ifc->formal;
8479 else
8480 dummy_arg = NULL;
8481
8482 scalar = 1;
8483 for (; arg; arg = arg->next)
8484 {
8485 if (!arg->expr || arg->expr->expr_type == EXPR_NULL)
8486 continue;
8487
8488 newss = gfc_walk_subexpr (head, arg->expr);
8489 if (newss == head)
8490 {
8491 /* Scalar argument. */
8492 gcc_assert (type == GFC_SS_SCALAR || type == GFC_SS_REFERENCE);
8493 newss = gfc_get_scalar_ss (head, arg->expr);
8494 newss->info->type = type;
8495
8496 }
8497 else
8498 scalar = 0;
8499
8500 if (dummy_arg != NULL
8501 && dummy_arg->sym->attr.optional
8502 && arg->expr->expr_type == EXPR_VARIABLE
8503 && (gfc_expr_attr (arg->expr).optional
8504 || gfc_expr_attr (arg->expr).allocatable
8505 || gfc_expr_attr (arg->expr).pointer))
8506 newss->info->can_be_null_ref = true;
8507
8508 head = newss;
8509 if (!tail)
8510 {
8511 tail = head;
8512 while (tail->next != gfc_ss_terminator)
8513 tail = tail->next;
8514 }
8515
8516 if (dummy_arg != NULL)
8517 dummy_arg = dummy_arg->next;
8518 }
8519
8520 if (scalar)
8521 {
8522 /* If all the arguments are scalar we don't need the argument SS. */
8523 gfc_free_ss_chain (head);
8524 /* Pass it back. */
8525 return ss;
8526 }
8527
8528 /* Add it onto the existing chain. */
8529 tail->next = ss;
8530 return head;
8531 }
8532
8533
8534 /* Walk a function call. Scalar functions are passed back, and taken out of
8535 scalarization loops. For elemental functions we walk their arguments.
8536 The result of functions returning arrays is stored in a temporary outside
8537 the loop, so that the function is only called once. Hence we do not need
8538 to walk their arguments. */
8539
8540 static gfc_ss *
8541 gfc_walk_function_expr (gfc_ss * ss, gfc_expr * expr)
8542 {
8543 gfc_intrinsic_sym *isym;
8544 gfc_symbol *sym;
8545 gfc_component *comp = NULL;
8546
8547 isym = expr->value.function.isym;
8548
8549 /* Handle intrinsic functions separately. */
8550 if (isym)
8551 return gfc_walk_intrinsic_function (ss, expr, isym);
8552
8553 sym = expr->value.function.esym;
8554 if (!sym)
8555 sym = expr->symtree->n.sym;
8556
8557 /* A function that returns arrays. */
8558 gfc_is_proc_ptr_comp (expr, &comp);
8559 if ((!comp && gfc_return_by_reference (sym) && sym->result->attr.dimension)
8560 || (comp && comp->attr.dimension))
8561 return gfc_get_array_ss (ss, expr, expr->rank, GFC_SS_FUNCTION);
8562
8563 /* Walk the parameters of an elemental function. For now we always pass
8564 by reference. */
8565 if (sym->attr.elemental || (comp && comp->attr.elemental))
8566 return gfc_walk_elemental_function_args (ss, expr->value.function.actual,
8567 gfc_get_proc_ifc_for_expr (expr),
8568 GFC_SS_REFERENCE);
8569
8570 /* Scalar functions are OK as these are evaluated outside the scalarization
8571 loop. Pass back and let the caller deal with it. */
8572 return ss;
8573 }
8574
8575
8576 /* An array temporary is constructed for array constructors. */
8577
8578 static gfc_ss *
8579 gfc_walk_array_constructor (gfc_ss * ss, gfc_expr * expr)
8580 {
8581 return gfc_get_array_ss (ss, expr, expr->rank, GFC_SS_CONSTRUCTOR);
8582 }
8583
8584
8585 /* Walk an expression. Add walked expressions to the head of the SS chain.
8586 A wholly scalar expression will not be added. */
8587
8588 gfc_ss *
8589 gfc_walk_subexpr (gfc_ss * ss, gfc_expr * expr)
8590 {
8591 gfc_ss *head;
8592
8593 switch (expr->expr_type)
8594 {
8595 case EXPR_VARIABLE:
8596 head = gfc_walk_variable_expr (ss, expr);
8597 return head;
8598
8599 case EXPR_OP:
8600 head = gfc_walk_op_expr (ss, expr);
8601 return head;
8602
8603 case EXPR_FUNCTION:
8604 head = gfc_walk_function_expr (ss, expr);
8605 return head;
8606
8607 case EXPR_CONSTANT:
8608 case EXPR_NULL:
8609 case EXPR_STRUCTURE:
8610 /* Pass back and let the caller deal with it. */
8611 break;
8612
8613 case EXPR_ARRAY:
8614 head = gfc_walk_array_constructor (ss, expr);
8615 return head;
8616
8617 case EXPR_SUBSTRING:
8618 /* Pass back and let the caller deal with it. */
8619 break;
8620
8621 default:
8622 internal_error ("bad expression type during walk (%d)",
8623 expr->expr_type);
8624 }
8625 return ss;
8626 }
8627
8628
8629 /* Entry point for expression walking.
8630 A return value equal to the passed chain means this is
8631 a scalar expression. It is up to the caller to take whatever action is
8632 necessary to translate these. */
8633
8634 gfc_ss *
8635 gfc_walk_expr (gfc_expr * expr)
8636 {
8637 gfc_ss *res;
8638
8639 res = gfc_walk_subexpr (gfc_ss_terminator, expr);
8640 return gfc_reverse_ss (res);
8641 }