]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/fortran/trans-array.c
trans.h (struct gfc_loopinfo): New field parent.
[thirdparty/gcc.git] / gcc / fortran / trans-array.c
1 /* Array translation routines
2 Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
3 2011
4 Free Software Foundation, Inc.
5 Contributed by Paul Brook <paul@nowt.org>
6 and Steven Bosscher <s.bosscher@student.tudelft.nl>
7
8 This file is part of GCC.
9
10 GCC is free software; you can redistribute it and/or modify it under
11 the terms of the GNU General Public License as published by the Free
12 Software Foundation; either version 3, or (at your option) any later
13 version.
14
15 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
16 WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
19
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
23
24 /* trans-array.c-- Various array related code, including scalarization,
25 allocation, initialization and other support routines. */
26
27 /* How the scalarizer works.
28 In gfortran, array expressions use the same core routines as scalar
29 expressions.
30 First, a Scalarization State (SS) chain is built. This is done by walking
31 the expression tree, and building a linear list of the terms in the
32 expression. As the tree is walked, scalar subexpressions are translated.
33
34 The scalarization parameters are stored in a gfc_loopinfo structure.
35 First the start and stride of each term is calculated by
36 gfc_conv_ss_startstride. During this process the expressions for the array
37 descriptors and data pointers are also translated.
38
39 If the expression is an assignment, we must then resolve any dependencies.
40 In fortran all the rhs values of an assignment must be evaluated before
41 any assignments take place. This can require a temporary array to store the
42 values. We also require a temporary when we are passing array expressions
43 or vector subscripts as procedure parameters.
44
45 Array sections are passed without copying to a temporary. These use the
46 scalarizer to determine the shape of the section. The flag
47 loop->array_parameter tells the scalarizer that the actual values and loop
48 variables will not be required.
49
50 The function gfc_conv_loop_setup generates the scalarization setup code.
51 It determines the range of the scalarizing loop variables. If a temporary
52 is required, this is created and initialized. Code for scalar expressions
53 taken outside the loop is also generated at this time. Next the offset and
54 scaling required to translate from loop variables to array indices for each
55 term is calculated.
56
57 A call to gfc_start_scalarized_body marks the start of the scalarized
58 expression. This creates a scope and declares the loop variables. Before
59 calling this gfc_make_ss_chain_used must be used to indicate which terms
60 will be used inside this loop.
61
62 The scalar gfc_conv_* functions are then used to build the main body of the
63 scalarization loop. Scalarization loop variables and precalculated scalar
64 values are automatically substituted. Note that gfc_advance_se_ss_chain
65 must be used, rather than changing the se->ss directly.
66
67 For assignment expressions requiring a temporary two sub loops are
68 generated. The first stores the result of the expression in the temporary,
69 the second copies it to the result. A call to
70 gfc_trans_scalarized_loop_boundary marks the end of the main loop code and
71 the start of the copying loop. The temporary may be less than full rank.
72
73 Finally gfc_trans_scalarizing_loops is called to generate the implicit do
74 loops. The loops are added to the pre chain of the loopinfo. The post
75 chain may still contain cleanup code.
76
77 After the loop code has been added into its parent scope gfc_cleanup_loop
78 is called to free all the SS allocated by the scalarizer. */
79
80 #include "config.h"
81 #include "system.h"
82 #include "coretypes.h"
83 #include "tree.h"
84 #include "gimple.h"
85 #include "diagnostic-core.h" /* For internal_error/fatal_error. */
86 #include "flags.h"
87 #include "gfortran.h"
88 #include "constructor.h"
89 #include "trans.h"
90 #include "trans-stmt.h"
91 #include "trans-types.h"
92 #include "trans-array.h"
93 #include "trans-const.h"
94 #include "dependency.h"
95
96 static bool gfc_get_array_constructor_size (mpz_t *, gfc_constructor_base);
97
98 /* The contents of this structure aren't actually used, just the address. */
99 static gfc_ss gfc_ss_terminator_var;
100 gfc_ss * const gfc_ss_terminator = &gfc_ss_terminator_var;
101
102
103 static tree
104 gfc_array_dataptr_type (tree desc)
105 {
106 return (GFC_TYPE_ARRAY_DATAPTR_TYPE (TREE_TYPE (desc)));
107 }
108
109
110 /* Build expressions to access the members of an array descriptor.
111 It's surprisingly easy to mess up here, so never access
112 an array descriptor by "brute force", always use these
113 functions. This also avoids problems if we change the format
114 of an array descriptor.
115
116 To understand these magic numbers, look at the comments
117 before gfc_build_array_type() in trans-types.c.
118
119 The code within these defines should be the only code which knows the format
120 of an array descriptor.
121
122 Any code just needing to read obtain the bounds of an array should use
123 gfc_conv_array_* rather than the following functions as these will return
124 know constant values, and work with arrays which do not have descriptors.
125
126 Don't forget to #undef these! */
127
128 #define DATA_FIELD 0
129 #define OFFSET_FIELD 1
130 #define DTYPE_FIELD 2
131 #define DIMENSION_FIELD 3
132 #define CAF_TOKEN_FIELD 4
133
134 #define STRIDE_SUBFIELD 0
135 #define LBOUND_SUBFIELD 1
136 #define UBOUND_SUBFIELD 2
137
138 /* This provides READ-ONLY access to the data field. The field itself
139 doesn't have the proper type. */
140
141 tree
142 gfc_conv_descriptor_data_get (tree desc)
143 {
144 tree field, type, t;
145
146 type = TREE_TYPE (desc);
147 gcc_assert (GFC_DESCRIPTOR_TYPE_P (type));
148
149 field = TYPE_FIELDS (type);
150 gcc_assert (DATA_FIELD == 0);
151
152 t = fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field), desc,
153 field, NULL_TREE);
154 t = fold_convert (GFC_TYPE_ARRAY_DATAPTR_TYPE (type), t);
155
156 return t;
157 }
158
159 /* This provides WRITE access to the data field.
160
161 TUPLES_P is true if we are generating tuples.
162
163 This function gets called through the following macros:
164 gfc_conv_descriptor_data_set
165 gfc_conv_descriptor_data_set. */
166
167 void
168 gfc_conv_descriptor_data_set (stmtblock_t *block, tree desc, tree value)
169 {
170 tree field, type, t;
171
172 type = TREE_TYPE (desc);
173 gcc_assert (GFC_DESCRIPTOR_TYPE_P (type));
174
175 field = TYPE_FIELDS (type);
176 gcc_assert (DATA_FIELD == 0);
177
178 t = fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field), desc,
179 field, NULL_TREE);
180 gfc_add_modify (block, t, fold_convert (TREE_TYPE (field), value));
181 }
182
183
184 /* This provides address access to the data field. This should only be
185 used by array allocation, passing this on to the runtime. */
186
187 tree
188 gfc_conv_descriptor_data_addr (tree desc)
189 {
190 tree field, type, t;
191
192 type = TREE_TYPE (desc);
193 gcc_assert (GFC_DESCRIPTOR_TYPE_P (type));
194
195 field = TYPE_FIELDS (type);
196 gcc_assert (DATA_FIELD == 0);
197
198 t = fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field), desc,
199 field, NULL_TREE);
200 return gfc_build_addr_expr (NULL_TREE, t);
201 }
202
203 static tree
204 gfc_conv_descriptor_offset (tree desc)
205 {
206 tree type;
207 tree field;
208
209 type = TREE_TYPE (desc);
210 gcc_assert (GFC_DESCRIPTOR_TYPE_P (type));
211
212 field = gfc_advance_chain (TYPE_FIELDS (type), OFFSET_FIELD);
213 gcc_assert (field != NULL_TREE && TREE_TYPE (field) == gfc_array_index_type);
214
215 return fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field),
216 desc, field, NULL_TREE);
217 }
218
219 tree
220 gfc_conv_descriptor_offset_get (tree desc)
221 {
222 return gfc_conv_descriptor_offset (desc);
223 }
224
225 void
226 gfc_conv_descriptor_offset_set (stmtblock_t *block, tree desc,
227 tree value)
228 {
229 tree t = gfc_conv_descriptor_offset (desc);
230 gfc_add_modify (block, t, fold_convert (TREE_TYPE (t), value));
231 }
232
233
234 tree
235 gfc_conv_descriptor_dtype (tree desc)
236 {
237 tree field;
238 tree type;
239
240 type = TREE_TYPE (desc);
241 gcc_assert (GFC_DESCRIPTOR_TYPE_P (type));
242
243 field = gfc_advance_chain (TYPE_FIELDS (type), DTYPE_FIELD);
244 gcc_assert (field != NULL_TREE && TREE_TYPE (field) == gfc_array_index_type);
245
246 return fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field),
247 desc, field, NULL_TREE);
248 }
249
250 static tree
251 gfc_conv_descriptor_dimension (tree desc, tree dim)
252 {
253 tree field;
254 tree type;
255 tree tmp;
256
257 type = TREE_TYPE (desc);
258 gcc_assert (GFC_DESCRIPTOR_TYPE_P (type));
259
260 field = gfc_advance_chain (TYPE_FIELDS (type), DIMENSION_FIELD);
261 gcc_assert (field != NULL_TREE
262 && TREE_CODE (TREE_TYPE (field)) == ARRAY_TYPE
263 && TREE_CODE (TREE_TYPE (TREE_TYPE (field))) == RECORD_TYPE);
264
265 tmp = fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field),
266 desc, field, NULL_TREE);
267 tmp = gfc_build_array_ref (tmp, dim, NULL);
268 return tmp;
269 }
270
271
272 tree
273 gfc_conv_descriptor_token (tree desc)
274 {
275 tree type;
276 tree field;
277
278 type = TREE_TYPE (desc);
279 gcc_assert (GFC_DESCRIPTOR_TYPE_P (type));
280 gcc_assert (GFC_TYPE_ARRAY_AKIND (type) == GFC_ARRAY_ALLOCATABLE);
281 gcc_assert (gfc_option.coarray == GFC_FCOARRAY_LIB);
282 field = gfc_advance_chain (TYPE_FIELDS (type), CAF_TOKEN_FIELD);
283 gcc_assert (field != NULL_TREE && TREE_TYPE (field) == prvoid_type_node);
284
285 return fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field),
286 desc, field, NULL_TREE);
287 }
288
289
290 static tree
291 gfc_conv_descriptor_stride (tree desc, tree dim)
292 {
293 tree tmp;
294 tree field;
295
296 tmp = gfc_conv_descriptor_dimension (desc, dim);
297 field = TYPE_FIELDS (TREE_TYPE (tmp));
298 field = gfc_advance_chain (field, STRIDE_SUBFIELD);
299 gcc_assert (field != NULL_TREE && TREE_TYPE (field) == gfc_array_index_type);
300
301 tmp = fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field),
302 tmp, field, NULL_TREE);
303 return tmp;
304 }
305
306 tree
307 gfc_conv_descriptor_stride_get (tree desc, tree dim)
308 {
309 tree type = TREE_TYPE (desc);
310 gcc_assert (GFC_DESCRIPTOR_TYPE_P (type));
311 if (integer_zerop (dim)
312 && (GFC_TYPE_ARRAY_AKIND (type) == GFC_ARRAY_ALLOCATABLE
313 ||GFC_TYPE_ARRAY_AKIND (type) == GFC_ARRAY_ASSUMED_SHAPE_CONT
314 ||GFC_TYPE_ARRAY_AKIND (type) == GFC_ARRAY_POINTER_CONT))
315 return gfc_index_one_node;
316
317 return gfc_conv_descriptor_stride (desc, dim);
318 }
319
320 void
321 gfc_conv_descriptor_stride_set (stmtblock_t *block, tree desc,
322 tree dim, tree value)
323 {
324 tree t = gfc_conv_descriptor_stride (desc, dim);
325 gfc_add_modify (block, t, fold_convert (TREE_TYPE (t), value));
326 }
327
328 static tree
329 gfc_conv_descriptor_lbound (tree desc, tree dim)
330 {
331 tree tmp;
332 tree field;
333
334 tmp = gfc_conv_descriptor_dimension (desc, dim);
335 field = TYPE_FIELDS (TREE_TYPE (tmp));
336 field = gfc_advance_chain (field, LBOUND_SUBFIELD);
337 gcc_assert (field != NULL_TREE && TREE_TYPE (field) == gfc_array_index_type);
338
339 tmp = fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field),
340 tmp, field, NULL_TREE);
341 return tmp;
342 }
343
344 tree
345 gfc_conv_descriptor_lbound_get (tree desc, tree dim)
346 {
347 return gfc_conv_descriptor_lbound (desc, dim);
348 }
349
350 void
351 gfc_conv_descriptor_lbound_set (stmtblock_t *block, tree desc,
352 tree dim, tree value)
353 {
354 tree t = gfc_conv_descriptor_lbound (desc, dim);
355 gfc_add_modify (block, t, fold_convert (TREE_TYPE (t), value));
356 }
357
358 static tree
359 gfc_conv_descriptor_ubound (tree desc, tree dim)
360 {
361 tree tmp;
362 tree field;
363
364 tmp = gfc_conv_descriptor_dimension (desc, dim);
365 field = TYPE_FIELDS (TREE_TYPE (tmp));
366 field = gfc_advance_chain (field, UBOUND_SUBFIELD);
367 gcc_assert (field != NULL_TREE && TREE_TYPE (field) == gfc_array_index_type);
368
369 tmp = fold_build3_loc (input_location, COMPONENT_REF, TREE_TYPE (field),
370 tmp, field, NULL_TREE);
371 return tmp;
372 }
373
374 tree
375 gfc_conv_descriptor_ubound_get (tree desc, tree dim)
376 {
377 return gfc_conv_descriptor_ubound (desc, dim);
378 }
379
380 void
381 gfc_conv_descriptor_ubound_set (stmtblock_t *block, tree desc,
382 tree dim, tree value)
383 {
384 tree t = gfc_conv_descriptor_ubound (desc, dim);
385 gfc_add_modify (block, t, fold_convert (TREE_TYPE (t), value));
386 }
387
388 /* Build a null array descriptor constructor. */
389
390 tree
391 gfc_build_null_descriptor (tree type)
392 {
393 tree field;
394 tree tmp;
395
396 gcc_assert (GFC_DESCRIPTOR_TYPE_P (type));
397 gcc_assert (DATA_FIELD == 0);
398 field = TYPE_FIELDS (type);
399
400 /* Set a NULL data pointer. */
401 tmp = build_constructor_single (type, field, null_pointer_node);
402 TREE_CONSTANT (tmp) = 1;
403 /* All other fields are ignored. */
404
405 return tmp;
406 }
407
408
409 /* Modify a descriptor such that the lbound of a given dimension is the value
410 specified. This also updates ubound and offset accordingly. */
411
412 void
413 gfc_conv_shift_descriptor_lbound (stmtblock_t* block, tree desc,
414 int dim, tree new_lbound)
415 {
416 tree offs, ubound, lbound, stride;
417 tree diff, offs_diff;
418
419 new_lbound = fold_convert (gfc_array_index_type, new_lbound);
420
421 offs = gfc_conv_descriptor_offset_get (desc);
422 lbound = gfc_conv_descriptor_lbound_get (desc, gfc_rank_cst[dim]);
423 ubound = gfc_conv_descriptor_ubound_get (desc, gfc_rank_cst[dim]);
424 stride = gfc_conv_descriptor_stride_get (desc, gfc_rank_cst[dim]);
425
426 /* Get difference (new - old) by which to shift stuff. */
427 diff = fold_build2_loc (input_location, MINUS_EXPR, gfc_array_index_type,
428 new_lbound, lbound);
429
430 /* Shift ubound and offset accordingly. This has to be done before
431 updating the lbound, as they depend on the lbound expression! */
432 ubound = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
433 ubound, diff);
434 gfc_conv_descriptor_ubound_set (block, desc, gfc_rank_cst[dim], ubound);
435 offs_diff = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
436 diff, stride);
437 offs = fold_build2_loc (input_location, MINUS_EXPR, gfc_array_index_type,
438 offs, offs_diff);
439 gfc_conv_descriptor_offset_set (block, desc, offs);
440
441 /* Finally set lbound to value we want. */
442 gfc_conv_descriptor_lbound_set (block, desc, gfc_rank_cst[dim], new_lbound);
443 }
444
445
446 /* Cleanup those #defines. */
447
448 #undef DATA_FIELD
449 #undef OFFSET_FIELD
450 #undef DTYPE_FIELD
451 #undef DIMENSION_FIELD
452 #undef CAF_TOKEN_FIELD
453 #undef STRIDE_SUBFIELD
454 #undef LBOUND_SUBFIELD
455 #undef UBOUND_SUBFIELD
456
457
458 /* Mark a SS chain as used. Flags specifies in which loops the SS is used.
459 flags & 1 = Main loop body.
460 flags & 2 = temp copy loop. */
461
462 void
463 gfc_mark_ss_chain_used (gfc_ss * ss, unsigned flags)
464 {
465 for (; ss != gfc_ss_terminator; ss = ss->next)
466 ss->info->useflags = flags;
467 }
468
469 static void gfc_free_ss (gfc_ss *);
470
471
472 /* Free a gfc_ss chain. */
473
474 void
475 gfc_free_ss_chain (gfc_ss * ss)
476 {
477 gfc_ss *next;
478
479 while (ss != gfc_ss_terminator)
480 {
481 gcc_assert (ss != NULL);
482 next = ss->next;
483 gfc_free_ss (ss);
484 ss = next;
485 }
486 }
487
488
489 static void
490 free_ss_info (gfc_ss_info *ss_info)
491 {
492 ss_info->refcount--;
493 if (ss_info->refcount > 0)
494 return;
495
496 gcc_assert (ss_info->refcount == 0);
497 free (ss_info);
498 }
499
500
501 /* Free a SS. */
502
503 static void
504 gfc_free_ss (gfc_ss * ss)
505 {
506 gfc_ss_info *ss_info;
507 int n;
508
509 ss_info = ss->info;
510
511 switch (ss_info->type)
512 {
513 case GFC_SS_SECTION:
514 for (n = 0; n < ss->dimen; n++)
515 {
516 if (ss_info->data.array.subscript[ss->dim[n]])
517 gfc_free_ss_chain (ss_info->data.array.subscript[ss->dim[n]]);
518 }
519 break;
520
521 default:
522 break;
523 }
524
525 free_ss_info (ss_info);
526 free (ss);
527 }
528
529
530 /* Creates and initializes an array type gfc_ss struct. */
531
532 gfc_ss *
533 gfc_get_array_ss (gfc_ss *next, gfc_expr *expr, int dimen, gfc_ss_type type)
534 {
535 gfc_ss *ss;
536 gfc_ss_info *ss_info;
537 int i;
538
539 ss_info = gfc_get_ss_info ();
540 ss_info->refcount++;
541 ss_info->type = type;
542 ss_info->expr = expr;
543
544 ss = gfc_get_ss ();
545 ss->info = ss_info;
546 ss->next = next;
547 ss->dimen = dimen;
548 for (i = 0; i < ss->dimen; i++)
549 ss->dim[i] = i;
550
551 return ss;
552 }
553
554
555 /* Creates and initializes a temporary type gfc_ss struct. */
556
557 gfc_ss *
558 gfc_get_temp_ss (tree type, tree string_length, int dimen)
559 {
560 gfc_ss *ss;
561 gfc_ss_info *ss_info;
562 int i;
563
564 ss_info = gfc_get_ss_info ();
565 ss_info->refcount++;
566 ss_info->type = GFC_SS_TEMP;
567 ss_info->string_length = string_length;
568 ss_info->data.temp.type = type;
569
570 ss = gfc_get_ss ();
571 ss->info = ss_info;
572 ss->next = gfc_ss_terminator;
573 ss->dimen = dimen;
574 for (i = 0; i < ss->dimen; i++)
575 ss->dim[i] = i;
576
577 return ss;
578 }
579
580
581 /* Creates and initializes a scalar type gfc_ss struct. */
582
583 gfc_ss *
584 gfc_get_scalar_ss (gfc_ss *next, gfc_expr *expr)
585 {
586 gfc_ss *ss;
587 gfc_ss_info *ss_info;
588
589 ss_info = gfc_get_ss_info ();
590 ss_info->refcount++;
591 ss_info->type = GFC_SS_SCALAR;
592 ss_info->expr = expr;
593
594 ss = gfc_get_ss ();
595 ss->info = ss_info;
596 ss->next = next;
597
598 return ss;
599 }
600
601
602 /* Free all the SS associated with a loop. */
603
604 void
605 gfc_cleanup_loop (gfc_loopinfo * loop)
606 {
607 gfc_loopinfo *loop_next, **ploop;
608 gfc_ss *ss;
609 gfc_ss *next;
610
611 ss = loop->ss;
612 while (ss != gfc_ss_terminator)
613 {
614 gcc_assert (ss != NULL);
615 next = ss->loop_chain;
616 gfc_free_ss (ss);
617 ss = next;
618 }
619
620 /* Remove reference to self in the parent loop. */
621 if (loop->parent)
622 for (ploop = &loop->parent->nested; *ploop; ploop = &(*ploop)->next)
623 if (*ploop == loop)
624 {
625 *ploop = loop->next;
626 break;
627 }
628
629 /* Free non-freed nested loops. */
630 for (loop = loop->nested; loop; loop = loop_next)
631 {
632 loop_next = loop->next;
633 gfc_cleanup_loop (loop);
634 free (loop);
635 }
636 }
637
638
639 static void
640 set_ss_loop (gfc_ss *ss, gfc_loopinfo *loop)
641 {
642 int n;
643
644 for (; ss != gfc_ss_terminator; ss = ss->next)
645 {
646 ss->loop = loop;
647
648 if (ss->info->type == GFC_SS_SCALAR
649 || ss->info->type == GFC_SS_REFERENCE
650 || ss->info->type == GFC_SS_TEMP)
651 continue;
652
653 for (n = 0; n < GFC_MAX_DIMENSIONS; n++)
654 if (ss->info->data.array.subscript[n] != NULL)
655 set_ss_loop (ss->info->data.array.subscript[n], loop);
656 }
657 }
658
659
660 /* Associate a SS chain with a loop. */
661
662 void
663 gfc_add_ss_to_loop (gfc_loopinfo * loop, gfc_ss * head)
664 {
665 gfc_ss *ss;
666 gfc_loopinfo *nested_loop;
667
668 if (head == gfc_ss_terminator)
669 return;
670
671 set_ss_loop (head, loop);
672
673 ss = head;
674 for (; ss && ss != gfc_ss_terminator; ss = ss->next)
675 {
676 if (ss->nested_ss)
677 {
678 nested_loop = ss->nested_ss->loop;
679
680 /* More than one ss can belong to the same loop. Hence, we add the
681 loop to the chain only if it is different from the previously
682 added one, to avoid duplicate nested loops. */
683 if (nested_loop != loop->nested)
684 {
685 gcc_assert (nested_loop->parent == NULL);
686 nested_loop->parent = loop;
687
688 gcc_assert (nested_loop->next == NULL);
689 nested_loop->next = loop->nested;
690 loop->nested = nested_loop;
691 }
692 else
693 gcc_assert (nested_loop->parent == loop);
694 }
695
696 if (ss->next == gfc_ss_terminator)
697 ss->loop_chain = loop->ss;
698 else
699 ss->loop_chain = ss->next;
700 }
701 gcc_assert (ss == gfc_ss_terminator);
702 loop->ss = head;
703 }
704
705
706 /* Generate an initializer for a static pointer or allocatable array. */
707
708 void
709 gfc_trans_static_array_pointer (gfc_symbol * sym)
710 {
711 tree type;
712
713 gcc_assert (TREE_STATIC (sym->backend_decl));
714 /* Just zero the data member. */
715 type = TREE_TYPE (sym->backend_decl);
716 DECL_INITIAL (sym->backend_decl) = gfc_build_null_descriptor (type);
717 }
718
719
720 /* If the bounds of SE's loop have not yet been set, see if they can be
721 determined from array spec AS, which is the array spec of a called
722 function. MAPPING maps the callee's dummy arguments to the values
723 that the caller is passing. Add any initialization and finalization
724 code to SE. */
725
726 void
727 gfc_set_loop_bounds_from_array_spec (gfc_interface_mapping * mapping,
728 gfc_se * se, gfc_array_spec * as)
729 {
730 int n, dim, total_dim;
731 gfc_se tmpse;
732 gfc_ss *ss;
733 tree lower;
734 tree upper;
735 tree tmp;
736
737 total_dim = 0;
738
739 if (!as || as->type != AS_EXPLICIT)
740 return;
741
742 for (ss = se->ss; ss; ss = ss->parent)
743 {
744 total_dim += ss->loop->dimen;
745 for (n = 0; n < ss->loop->dimen; n++)
746 {
747 /* The bound is known, nothing to do. */
748 if (ss->loop->to[n] != NULL_TREE)
749 continue;
750
751 dim = ss->dim[n];
752 gcc_assert (dim < as->rank);
753 gcc_assert (ss->loop->dimen <= as->rank);
754
755 /* Evaluate the lower bound. */
756 gfc_init_se (&tmpse, NULL);
757 gfc_apply_interface_mapping (mapping, &tmpse, as->lower[dim]);
758 gfc_add_block_to_block (&se->pre, &tmpse.pre);
759 gfc_add_block_to_block (&se->post, &tmpse.post);
760 lower = fold_convert (gfc_array_index_type, tmpse.expr);
761
762 /* ...and the upper bound. */
763 gfc_init_se (&tmpse, NULL);
764 gfc_apply_interface_mapping (mapping, &tmpse, as->upper[dim]);
765 gfc_add_block_to_block (&se->pre, &tmpse.pre);
766 gfc_add_block_to_block (&se->post, &tmpse.post);
767 upper = fold_convert (gfc_array_index_type, tmpse.expr);
768
769 /* Set the upper bound of the loop to UPPER - LOWER. */
770 tmp = fold_build2_loc (input_location, MINUS_EXPR,
771 gfc_array_index_type, upper, lower);
772 tmp = gfc_evaluate_now (tmp, &se->pre);
773 ss->loop->to[n] = tmp;
774 }
775 }
776
777 gcc_assert (total_dim == as->rank);
778 }
779
780
781 /* Generate code to allocate an array temporary, or create a variable to
782 hold the data. If size is NULL, zero the descriptor so that the
783 callee will allocate the array. If DEALLOC is true, also generate code to
784 free the array afterwards.
785
786 If INITIAL is not NULL, it is packed using internal_pack and the result used
787 as data instead of allocating a fresh, unitialized area of memory.
788
789 Initialization code is added to PRE and finalization code to POST.
790 DYNAMIC is true if the caller may want to extend the array later
791 using realloc. This prevents us from putting the array on the stack. */
792
793 static void
794 gfc_trans_allocate_array_storage (stmtblock_t * pre, stmtblock_t * post,
795 gfc_array_info * info, tree size, tree nelem,
796 tree initial, bool dynamic, bool dealloc)
797 {
798 tree tmp;
799 tree desc;
800 bool onstack;
801
802 desc = info->descriptor;
803 info->offset = gfc_index_zero_node;
804 if (size == NULL_TREE || integer_zerop (size))
805 {
806 /* A callee allocated array. */
807 gfc_conv_descriptor_data_set (pre, desc, null_pointer_node);
808 onstack = FALSE;
809 }
810 else
811 {
812 /* Allocate the temporary. */
813 onstack = !dynamic && initial == NULL_TREE
814 && (gfc_option.flag_stack_arrays
815 || gfc_can_put_var_on_stack (size));
816
817 if (onstack)
818 {
819 /* Make a temporary variable to hold the data. */
820 tmp = fold_build2_loc (input_location, MINUS_EXPR, TREE_TYPE (nelem),
821 nelem, gfc_index_one_node);
822 tmp = gfc_evaluate_now (tmp, pre);
823 tmp = build_range_type (gfc_array_index_type, gfc_index_zero_node,
824 tmp);
825 tmp = build_array_type (gfc_get_element_type (TREE_TYPE (desc)),
826 tmp);
827 tmp = gfc_create_var (tmp, "A");
828 /* If we're here only because of -fstack-arrays we have to
829 emit a DECL_EXPR to make the gimplifier emit alloca calls. */
830 if (!gfc_can_put_var_on_stack (size))
831 gfc_add_expr_to_block (pre,
832 fold_build1_loc (input_location,
833 DECL_EXPR, TREE_TYPE (tmp),
834 tmp));
835 tmp = gfc_build_addr_expr (NULL_TREE, tmp);
836 gfc_conv_descriptor_data_set (pre, desc, tmp);
837 }
838 else
839 {
840 /* Allocate memory to hold the data or call internal_pack. */
841 if (initial == NULL_TREE)
842 {
843 tmp = gfc_call_malloc (pre, NULL, size);
844 tmp = gfc_evaluate_now (tmp, pre);
845 }
846 else
847 {
848 tree packed;
849 tree source_data;
850 tree was_packed;
851 stmtblock_t do_copying;
852
853 tmp = TREE_TYPE (initial); /* Pointer to descriptor. */
854 gcc_assert (TREE_CODE (tmp) == POINTER_TYPE);
855 tmp = TREE_TYPE (tmp); /* The descriptor itself. */
856 tmp = gfc_get_element_type (tmp);
857 gcc_assert (tmp == gfc_get_element_type (TREE_TYPE (desc)));
858 packed = gfc_create_var (build_pointer_type (tmp), "data");
859
860 tmp = build_call_expr_loc (input_location,
861 gfor_fndecl_in_pack, 1, initial);
862 tmp = fold_convert (TREE_TYPE (packed), tmp);
863 gfc_add_modify (pre, packed, tmp);
864
865 tmp = build_fold_indirect_ref_loc (input_location,
866 initial);
867 source_data = gfc_conv_descriptor_data_get (tmp);
868
869 /* internal_pack may return source->data without any allocation
870 or copying if it is already packed. If that's the case, we
871 need to allocate and copy manually. */
872
873 gfc_start_block (&do_copying);
874 tmp = gfc_call_malloc (&do_copying, NULL, size);
875 tmp = fold_convert (TREE_TYPE (packed), tmp);
876 gfc_add_modify (&do_copying, packed, tmp);
877 tmp = gfc_build_memcpy_call (packed, source_data, size);
878 gfc_add_expr_to_block (&do_copying, tmp);
879
880 was_packed = fold_build2_loc (input_location, EQ_EXPR,
881 boolean_type_node, packed,
882 source_data);
883 tmp = gfc_finish_block (&do_copying);
884 tmp = build3_v (COND_EXPR, was_packed, tmp,
885 build_empty_stmt (input_location));
886 gfc_add_expr_to_block (pre, tmp);
887
888 tmp = fold_convert (pvoid_type_node, packed);
889 }
890
891 gfc_conv_descriptor_data_set (pre, desc, tmp);
892 }
893 }
894 info->data = gfc_conv_descriptor_data_get (desc);
895
896 /* The offset is zero because we create temporaries with a zero
897 lower bound. */
898 gfc_conv_descriptor_offset_set (pre, desc, gfc_index_zero_node);
899
900 if (dealloc && !onstack)
901 {
902 /* Free the temporary. */
903 tmp = gfc_conv_descriptor_data_get (desc);
904 tmp = gfc_call_free (fold_convert (pvoid_type_node, tmp));
905 gfc_add_expr_to_block (post, tmp);
906 }
907 }
908
909
910 /* Get the scalarizer array dimension corresponding to actual array dimension
911 given by ARRAY_DIM.
912
913 For example, if SS represents the array ref a(1,:,:,1), it is a
914 bidimensional scalarizer array, and the result would be 0 for ARRAY_DIM=1,
915 and 1 for ARRAY_DIM=2.
916 If SS represents transpose(a(:,1,1,:)), it is again a bidimensional
917 scalarizer array, and the result would be 1 for ARRAY_DIM=0 and 0 for
918 ARRAY_DIM=3.
919 If SS represents sum(a(:,:,:,1), dim=1), it is a 2+1-dimensional scalarizer
920 array. If called on the inner ss, the result would be respectively 0,1,2 for
921 ARRAY_DIM=0,1,2. If called on the outer ss, the result would be 0,1
922 for ARRAY_DIM=1,2. */
923
924 static int
925 get_scalarizer_dim_for_array_dim (gfc_ss *ss, int array_dim)
926 {
927 int array_ref_dim;
928 int n;
929
930 array_ref_dim = 0;
931
932 for (; ss; ss = ss->parent)
933 for (n = 0; n < ss->dimen; n++)
934 if (ss->dim[n] < array_dim)
935 array_ref_dim++;
936
937 return array_ref_dim;
938 }
939
940
941 static gfc_ss *
942 innermost_ss (gfc_ss *ss)
943 {
944 while (ss->nested_ss != NULL)
945 ss = ss->nested_ss;
946
947 return ss;
948 }
949
950
951
952 /* Get the array reference dimension corresponding to the given loop dimension.
953 It is different from the true array dimension given by the dim array in
954 the case of a partial array reference (i.e. a(:,:,1,:) for example)
955 It is different from the loop dimension in the case of a transposed array.
956 */
957
958 static int
959 get_array_ref_dim_for_loop_dim (gfc_ss *ss, int loop_dim)
960 {
961 return get_scalarizer_dim_for_array_dim (innermost_ss (ss),
962 ss->dim[loop_dim]);
963 }
964
965
966 /* Generate code to create and initialize the descriptor for a temporary
967 array. This is used for both temporaries needed by the scalarizer, and
968 functions returning arrays. Adjusts the loop variables to be
969 zero-based, and calculates the loop bounds for callee allocated arrays.
970 Allocate the array unless it's callee allocated (we have a callee
971 allocated array if 'callee_alloc' is true, or if loop->to[n] is
972 NULL_TREE for any n). Also fills in the descriptor, data and offset
973 fields of info if known. Returns the size of the array, or NULL for a
974 callee allocated array.
975
976 PRE, POST, INITIAL, DYNAMIC and DEALLOC are as for
977 gfc_trans_allocate_array_storage. */
978
979 tree
980 gfc_trans_create_temp_array (stmtblock_t * pre, stmtblock_t * post, gfc_ss * ss,
981 tree eltype, tree initial, bool dynamic,
982 bool dealloc, bool callee_alloc, locus * where)
983 {
984 gfc_loopinfo *loop;
985 gfc_ss *s;
986 gfc_array_info *info;
987 tree from[GFC_MAX_DIMENSIONS], to[GFC_MAX_DIMENSIONS];
988 tree type;
989 tree desc;
990 tree tmp;
991 tree size;
992 tree nelem;
993 tree cond;
994 tree or_expr;
995 int n, dim, tmp_dim;
996 int total_dim = 0;
997
998 memset (from, 0, sizeof (from));
999 memset (to, 0, sizeof (to));
1000
1001 info = &ss->info->data.array;
1002
1003 gcc_assert (ss->dimen > 0);
1004 gcc_assert (ss->loop->dimen == ss->dimen);
1005
1006 if (gfc_option.warn_array_temp && where)
1007 gfc_warning ("Creating array temporary at %L", where);
1008
1009 /* Set the lower bound to zero. */
1010 for (s = ss; s; s = s->parent)
1011 {
1012 loop = s->loop;
1013
1014 total_dim += loop->dimen;
1015 for (n = 0; n < loop->dimen; n++)
1016 {
1017 dim = s->dim[n];
1018
1019 /* Callee allocated arrays may not have a known bound yet. */
1020 if (loop->to[n])
1021 loop->to[n] = gfc_evaluate_now (
1022 fold_build2_loc (input_location, MINUS_EXPR,
1023 gfc_array_index_type,
1024 loop->to[n], loop->from[n]),
1025 pre);
1026 loop->from[n] = gfc_index_zero_node;
1027
1028 /* We have just changed the loop bounds, we must clear the
1029 corresponding specloop, so that delta calculation is not skipped
1030 later in set_delta. */
1031 loop->specloop[n] = NULL;
1032
1033 /* We are constructing the temporary's descriptor based on the loop
1034 dimensions. As the dimensions may be accessed in arbitrary order
1035 (think of transpose) the size taken from the n'th loop may not map
1036 to the n'th dimension of the array. We need to reconstruct loop
1037 infos in the right order before using it to set the descriptor
1038 bounds. */
1039 tmp_dim = get_scalarizer_dim_for_array_dim (ss, dim);
1040 from[tmp_dim] = loop->from[n];
1041 to[tmp_dim] = loop->to[n];
1042
1043 info->delta[dim] = gfc_index_zero_node;
1044 info->start[dim] = gfc_index_zero_node;
1045 info->end[dim] = gfc_index_zero_node;
1046 info->stride[dim] = gfc_index_one_node;
1047 }
1048 }
1049
1050 /* Initialize the descriptor. */
1051 type =
1052 gfc_get_array_type_bounds (eltype, total_dim, 0, from, to, 1,
1053 GFC_ARRAY_UNKNOWN, true);
1054 desc = gfc_create_var (type, "atmp");
1055 GFC_DECL_PACKED_ARRAY (desc) = 1;
1056
1057 info->descriptor = desc;
1058 size = gfc_index_one_node;
1059
1060 /* Fill in the array dtype. */
1061 tmp = gfc_conv_descriptor_dtype (desc);
1062 gfc_add_modify (pre, tmp, gfc_get_dtype (TREE_TYPE (desc)));
1063
1064 /*
1065 Fill in the bounds and stride. This is a packed array, so:
1066
1067 size = 1;
1068 for (n = 0; n < rank; n++)
1069 {
1070 stride[n] = size
1071 delta = ubound[n] + 1 - lbound[n];
1072 size = size * delta;
1073 }
1074 size = size * sizeof(element);
1075 */
1076
1077 or_expr = NULL_TREE;
1078
1079 /* If there is at least one null loop->to[n], it is a callee allocated
1080 array. */
1081 for (n = 0; n < total_dim; n++)
1082 if (to[n] == NULL_TREE)
1083 {
1084 size = NULL_TREE;
1085 break;
1086 }
1087
1088 if (size == NULL_TREE)
1089 for (s = ss; s; s = s->parent)
1090 for (n = 0; n < s->loop->dimen; n++)
1091 {
1092 dim = get_scalarizer_dim_for_array_dim (ss, ss->dim[n]);
1093
1094 /* For a callee allocated array express the loop bounds in terms
1095 of the descriptor fields. */
1096 tmp = fold_build2_loc (input_location,
1097 MINUS_EXPR, gfc_array_index_type,
1098 gfc_conv_descriptor_ubound_get (desc, gfc_rank_cst[dim]),
1099 gfc_conv_descriptor_lbound_get (desc, gfc_rank_cst[dim]));
1100 s->loop->to[n] = tmp;
1101 }
1102 else
1103 {
1104 for (n = 0; n < total_dim; n++)
1105 {
1106 /* Store the stride and bound components in the descriptor. */
1107 gfc_conv_descriptor_stride_set (pre, desc, gfc_rank_cst[n], size);
1108
1109 gfc_conv_descriptor_lbound_set (pre, desc, gfc_rank_cst[n],
1110 gfc_index_zero_node);
1111
1112 gfc_conv_descriptor_ubound_set (pre, desc, gfc_rank_cst[n], to[n]);
1113
1114 tmp = fold_build2_loc (input_location, PLUS_EXPR,
1115 gfc_array_index_type,
1116 to[n], gfc_index_one_node);
1117
1118 /* Check whether the size for this dimension is negative. */
1119 cond = fold_build2_loc (input_location, LE_EXPR, boolean_type_node,
1120 tmp, gfc_index_zero_node);
1121 cond = gfc_evaluate_now (cond, pre);
1122
1123 if (n == 0)
1124 or_expr = cond;
1125 else
1126 or_expr = fold_build2_loc (input_location, TRUTH_OR_EXPR,
1127 boolean_type_node, or_expr, cond);
1128
1129 size = fold_build2_loc (input_location, MULT_EXPR,
1130 gfc_array_index_type, size, tmp);
1131 size = gfc_evaluate_now (size, pre);
1132 }
1133 }
1134
1135 /* Get the size of the array. */
1136 if (size && !callee_alloc)
1137 {
1138 /* If or_expr is true, then the extent in at least one
1139 dimension is zero and the size is set to zero. */
1140 size = fold_build3_loc (input_location, COND_EXPR, gfc_array_index_type,
1141 or_expr, gfc_index_zero_node, size);
1142
1143 nelem = size;
1144 size = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
1145 size,
1146 fold_convert (gfc_array_index_type,
1147 TYPE_SIZE_UNIT (gfc_get_element_type (type))));
1148 }
1149 else
1150 {
1151 nelem = size;
1152 size = NULL_TREE;
1153 }
1154
1155 gfc_trans_allocate_array_storage (pre, post, info, size, nelem, initial,
1156 dynamic, dealloc);
1157
1158 while (ss->parent)
1159 ss = ss->parent;
1160
1161 if (ss->dimen > ss->loop->temp_dim)
1162 ss->loop->temp_dim = ss->dimen;
1163
1164 return size;
1165 }
1166
1167
1168 /* Return the number of iterations in a loop that starts at START,
1169 ends at END, and has step STEP. */
1170
1171 static tree
1172 gfc_get_iteration_count (tree start, tree end, tree step)
1173 {
1174 tree tmp;
1175 tree type;
1176
1177 type = TREE_TYPE (step);
1178 tmp = fold_build2_loc (input_location, MINUS_EXPR, type, end, start);
1179 tmp = fold_build2_loc (input_location, FLOOR_DIV_EXPR, type, tmp, step);
1180 tmp = fold_build2_loc (input_location, PLUS_EXPR, type, tmp,
1181 build_int_cst (type, 1));
1182 tmp = fold_build2_loc (input_location, MAX_EXPR, type, tmp,
1183 build_int_cst (type, 0));
1184 return fold_convert (gfc_array_index_type, tmp);
1185 }
1186
1187
1188 /* Extend the data in array DESC by EXTRA elements. */
1189
1190 static void
1191 gfc_grow_array (stmtblock_t * pblock, tree desc, tree extra)
1192 {
1193 tree arg0, arg1;
1194 tree tmp;
1195 tree size;
1196 tree ubound;
1197
1198 if (integer_zerop (extra))
1199 return;
1200
1201 ubound = gfc_conv_descriptor_ubound_get (desc, gfc_rank_cst[0]);
1202
1203 /* Add EXTRA to the upper bound. */
1204 tmp = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
1205 ubound, extra);
1206 gfc_conv_descriptor_ubound_set (pblock, desc, gfc_rank_cst[0], tmp);
1207
1208 /* Get the value of the current data pointer. */
1209 arg0 = gfc_conv_descriptor_data_get (desc);
1210
1211 /* Calculate the new array size. */
1212 size = TYPE_SIZE_UNIT (gfc_get_element_type (TREE_TYPE (desc)));
1213 tmp = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
1214 ubound, gfc_index_one_node);
1215 arg1 = fold_build2_loc (input_location, MULT_EXPR, size_type_node,
1216 fold_convert (size_type_node, tmp),
1217 fold_convert (size_type_node, size));
1218
1219 /* Call the realloc() function. */
1220 tmp = gfc_call_realloc (pblock, arg0, arg1);
1221 gfc_conv_descriptor_data_set (pblock, desc, tmp);
1222 }
1223
1224
1225 /* Return true if the bounds of iterator I can only be determined
1226 at run time. */
1227
1228 static inline bool
1229 gfc_iterator_has_dynamic_bounds (gfc_iterator * i)
1230 {
1231 return (i->start->expr_type != EXPR_CONSTANT
1232 || i->end->expr_type != EXPR_CONSTANT
1233 || i->step->expr_type != EXPR_CONSTANT);
1234 }
1235
1236
1237 /* Split the size of constructor element EXPR into the sum of two terms,
1238 one of which can be determined at compile time and one of which must
1239 be calculated at run time. Set *SIZE to the former and return true
1240 if the latter might be nonzero. */
1241
1242 static bool
1243 gfc_get_array_constructor_element_size (mpz_t * size, gfc_expr * expr)
1244 {
1245 if (expr->expr_type == EXPR_ARRAY)
1246 return gfc_get_array_constructor_size (size, expr->value.constructor);
1247 else if (expr->rank > 0)
1248 {
1249 /* Calculate everything at run time. */
1250 mpz_set_ui (*size, 0);
1251 return true;
1252 }
1253 else
1254 {
1255 /* A single element. */
1256 mpz_set_ui (*size, 1);
1257 return false;
1258 }
1259 }
1260
1261
1262 /* Like gfc_get_array_constructor_element_size, but applied to the whole
1263 of array constructor C. */
1264
1265 static bool
1266 gfc_get_array_constructor_size (mpz_t * size, gfc_constructor_base base)
1267 {
1268 gfc_constructor *c;
1269 gfc_iterator *i;
1270 mpz_t val;
1271 mpz_t len;
1272 bool dynamic;
1273
1274 mpz_set_ui (*size, 0);
1275 mpz_init (len);
1276 mpz_init (val);
1277
1278 dynamic = false;
1279 for (c = gfc_constructor_first (base); c; c = gfc_constructor_next (c))
1280 {
1281 i = c->iterator;
1282 if (i && gfc_iterator_has_dynamic_bounds (i))
1283 dynamic = true;
1284 else
1285 {
1286 dynamic |= gfc_get_array_constructor_element_size (&len, c->expr);
1287 if (i)
1288 {
1289 /* Multiply the static part of the element size by the
1290 number of iterations. */
1291 mpz_sub (val, i->end->value.integer, i->start->value.integer);
1292 mpz_fdiv_q (val, val, i->step->value.integer);
1293 mpz_add_ui (val, val, 1);
1294 if (mpz_sgn (val) > 0)
1295 mpz_mul (len, len, val);
1296 else
1297 mpz_set_ui (len, 0);
1298 }
1299 mpz_add (*size, *size, len);
1300 }
1301 }
1302 mpz_clear (len);
1303 mpz_clear (val);
1304 return dynamic;
1305 }
1306
1307
1308 /* Make sure offset is a variable. */
1309
1310 static void
1311 gfc_put_offset_into_var (stmtblock_t * pblock, tree * poffset,
1312 tree * offsetvar)
1313 {
1314 /* We should have already created the offset variable. We cannot
1315 create it here because we may be in an inner scope. */
1316 gcc_assert (*offsetvar != NULL_TREE);
1317 gfc_add_modify (pblock, *offsetvar, *poffset);
1318 *poffset = *offsetvar;
1319 TREE_USED (*offsetvar) = 1;
1320 }
1321
1322
1323 /* Variables needed for bounds-checking. */
1324 static bool first_len;
1325 static tree first_len_val;
1326 static bool typespec_chararray_ctor;
1327
1328 static void
1329 gfc_trans_array_ctor_element (stmtblock_t * pblock, tree desc,
1330 tree offset, gfc_se * se, gfc_expr * expr)
1331 {
1332 tree tmp;
1333
1334 gfc_conv_expr (se, expr);
1335
1336 /* Store the value. */
1337 tmp = build_fold_indirect_ref_loc (input_location,
1338 gfc_conv_descriptor_data_get (desc));
1339 tmp = gfc_build_array_ref (tmp, offset, NULL);
1340
1341 if (expr->ts.type == BT_CHARACTER)
1342 {
1343 int i = gfc_validate_kind (BT_CHARACTER, expr->ts.kind, false);
1344 tree esize;
1345
1346 esize = size_in_bytes (gfc_get_element_type (TREE_TYPE (desc)));
1347 esize = fold_convert (gfc_charlen_type_node, esize);
1348 esize = fold_build2_loc (input_location, TRUNC_DIV_EXPR,
1349 gfc_charlen_type_node, esize,
1350 build_int_cst (gfc_charlen_type_node,
1351 gfc_character_kinds[i].bit_size / 8));
1352
1353 gfc_conv_string_parameter (se);
1354 if (POINTER_TYPE_P (TREE_TYPE (tmp)))
1355 {
1356 /* The temporary is an array of pointers. */
1357 se->expr = fold_convert (TREE_TYPE (tmp), se->expr);
1358 gfc_add_modify (&se->pre, tmp, se->expr);
1359 }
1360 else
1361 {
1362 /* The temporary is an array of string values. */
1363 tmp = gfc_build_addr_expr (gfc_get_pchar_type (expr->ts.kind), tmp);
1364 /* We know the temporary and the value will be the same length,
1365 so can use memcpy. */
1366 gfc_trans_string_copy (&se->pre, esize, tmp, expr->ts.kind,
1367 se->string_length, se->expr, expr->ts.kind);
1368 }
1369 if ((gfc_option.rtcheck & GFC_RTCHECK_BOUNDS) && !typespec_chararray_ctor)
1370 {
1371 if (first_len)
1372 {
1373 gfc_add_modify (&se->pre, first_len_val,
1374 se->string_length);
1375 first_len = false;
1376 }
1377 else
1378 {
1379 /* Verify that all constructor elements are of the same
1380 length. */
1381 tree cond = fold_build2_loc (input_location, NE_EXPR,
1382 boolean_type_node, first_len_val,
1383 se->string_length);
1384 gfc_trans_runtime_check
1385 (true, false, cond, &se->pre, &expr->where,
1386 "Different CHARACTER lengths (%ld/%ld) in array constructor",
1387 fold_convert (long_integer_type_node, first_len_val),
1388 fold_convert (long_integer_type_node, se->string_length));
1389 }
1390 }
1391 }
1392 else
1393 {
1394 /* TODO: Should the frontend already have done this conversion? */
1395 se->expr = fold_convert (TREE_TYPE (tmp), se->expr);
1396 gfc_add_modify (&se->pre, tmp, se->expr);
1397 }
1398
1399 gfc_add_block_to_block (pblock, &se->pre);
1400 gfc_add_block_to_block (pblock, &se->post);
1401 }
1402
1403
1404 /* Add the contents of an array to the constructor. DYNAMIC is as for
1405 gfc_trans_array_constructor_value. */
1406
1407 static void
1408 gfc_trans_array_constructor_subarray (stmtblock_t * pblock,
1409 tree type ATTRIBUTE_UNUSED,
1410 tree desc, gfc_expr * expr,
1411 tree * poffset, tree * offsetvar,
1412 bool dynamic)
1413 {
1414 gfc_se se;
1415 gfc_ss *ss;
1416 gfc_loopinfo loop;
1417 stmtblock_t body;
1418 tree tmp;
1419 tree size;
1420 int n;
1421
1422 /* We need this to be a variable so we can increment it. */
1423 gfc_put_offset_into_var (pblock, poffset, offsetvar);
1424
1425 gfc_init_se (&se, NULL);
1426
1427 /* Walk the array expression. */
1428 ss = gfc_walk_expr (expr);
1429 gcc_assert (ss != gfc_ss_terminator);
1430
1431 /* Initialize the scalarizer. */
1432 gfc_init_loopinfo (&loop);
1433 gfc_add_ss_to_loop (&loop, ss);
1434
1435 /* Initialize the loop. */
1436 gfc_conv_ss_startstride (&loop);
1437 gfc_conv_loop_setup (&loop, &expr->where);
1438
1439 /* Make sure the constructed array has room for the new data. */
1440 if (dynamic)
1441 {
1442 /* Set SIZE to the total number of elements in the subarray. */
1443 size = gfc_index_one_node;
1444 for (n = 0; n < loop.dimen; n++)
1445 {
1446 tmp = gfc_get_iteration_count (loop.from[n], loop.to[n],
1447 gfc_index_one_node);
1448 size = fold_build2_loc (input_location, MULT_EXPR,
1449 gfc_array_index_type, size, tmp);
1450 }
1451
1452 /* Grow the constructed array by SIZE elements. */
1453 gfc_grow_array (&loop.pre, desc, size);
1454 }
1455
1456 /* Make the loop body. */
1457 gfc_mark_ss_chain_used (ss, 1);
1458 gfc_start_scalarized_body (&loop, &body);
1459 gfc_copy_loopinfo_to_se (&se, &loop);
1460 se.ss = ss;
1461
1462 gfc_trans_array_ctor_element (&body, desc, *poffset, &se, expr);
1463 gcc_assert (se.ss == gfc_ss_terminator);
1464
1465 /* Increment the offset. */
1466 tmp = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
1467 *poffset, gfc_index_one_node);
1468 gfc_add_modify (&body, *poffset, tmp);
1469
1470 /* Finish the loop. */
1471 gfc_trans_scalarizing_loops (&loop, &body);
1472 gfc_add_block_to_block (&loop.pre, &loop.post);
1473 tmp = gfc_finish_block (&loop.pre);
1474 gfc_add_expr_to_block (pblock, tmp);
1475
1476 gfc_cleanup_loop (&loop);
1477 }
1478
1479
1480 /* Assign the values to the elements of an array constructor. DYNAMIC
1481 is true if descriptor DESC only contains enough data for the static
1482 size calculated by gfc_get_array_constructor_size. When true, memory
1483 for the dynamic parts must be allocated using realloc. */
1484
1485 static void
1486 gfc_trans_array_constructor_value (stmtblock_t * pblock, tree type,
1487 tree desc, gfc_constructor_base base,
1488 tree * poffset, tree * offsetvar,
1489 bool dynamic)
1490 {
1491 tree tmp;
1492 stmtblock_t body;
1493 gfc_se se;
1494 mpz_t size;
1495 gfc_constructor *c;
1496
1497 tree shadow_loopvar = NULL_TREE;
1498 gfc_saved_var saved_loopvar;
1499
1500 mpz_init (size);
1501 for (c = gfc_constructor_first (base); c; c = gfc_constructor_next (c))
1502 {
1503 /* If this is an iterator or an array, the offset must be a variable. */
1504 if ((c->iterator || c->expr->rank > 0) && INTEGER_CST_P (*poffset))
1505 gfc_put_offset_into_var (pblock, poffset, offsetvar);
1506
1507 /* Shadowing the iterator avoids changing its value and saves us from
1508 keeping track of it. Further, it makes sure that there's always a
1509 backend-decl for the symbol, even if there wasn't one before,
1510 e.g. in the case of an iterator that appears in a specification
1511 expression in an interface mapping. */
1512 if (c->iterator)
1513 {
1514 gfc_symbol *sym = c->iterator->var->symtree->n.sym;
1515 tree type = gfc_typenode_for_spec (&sym->ts);
1516
1517 shadow_loopvar = gfc_create_var (type, "shadow_loopvar");
1518 gfc_shadow_sym (sym, shadow_loopvar, &saved_loopvar);
1519 }
1520
1521 gfc_start_block (&body);
1522
1523 if (c->expr->expr_type == EXPR_ARRAY)
1524 {
1525 /* Array constructors can be nested. */
1526 gfc_trans_array_constructor_value (&body, type, desc,
1527 c->expr->value.constructor,
1528 poffset, offsetvar, dynamic);
1529 }
1530 else if (c->expr->rank > 0)
1531 {
1532 gfc_trans_array_constructor_subarray (&body, type, desc, c->expr,
1533 poffset, offsetvar, dynamic);
1534 }
1535 else
1536 {
1537 /* This code really upsets the gimplifier so don't bother for now. */
1538 gfc_constructor *p;
1539 HOST_WIDE_INT n;
1540 HOST_WIDE_INT size;
1541
1542 p = c;
1543 n = 0;
1544 while (p && !(p->iterator || p->expr->expr_type != EXPR_CONSTANT))
1545 {
1546 p = gfc_constructor_next (p);
1547 n++;
1548 }
1549 if (n < 4)
1550 {
1551 /* Scalar values. */
1552 gfc_init_se (&se, NULL);
1553 gfc_trans_array_ctor_element (&body, desc, *poffset,
1554 &se, c->expr);
1555
1556 *poffset = fold_build2_loc (input_location, PLUS_EXPR,
1557 gfc_array_index_type,
1558 *poffset, gfc_index_one_node);
1559 }
1560 else
1561 {
1562 /* Collect multiple scalar constants into a constructor. */
1563 VEC(constructor_elt,gc) *v = NULL;
1564 tree init;
1565 tree bound;
1566 tree tmptype;
1567 HOST_WIDE_INT idx = 0;
1568
1569 p = c;
1570 /* Count the number of consecutive scalar constants. */
1571 while (p && !(p->iterator
1572 || p->expr->expr_type != EXPR_CONSTANT))
1573 {
1574 gfc_init_se (&se, NULL);
1575 gfc_conv_constant (&se, p->expr);
1576
1577 if (c->expr->ts.type != BT_CHARACTER)
1578 se.expr = fold_convert (type, se.expr);
1579 /* For constant character array constructors we build
1580 an array of pointers. */
1581 else if (POINTER_TYPE_P (type))
1582 se.expr = gfc_build_addr_expr
1583 (gfc_get_pchar_type (p->expr->ts.kind),
1584 se.expr);
1585
1586 CONSTRUCTOR_APPEND_ELT (v,
1587 build_int_cst (gfc_array_index_type,
1588 idx++),
1589 se.expr);
1590 c = p;
1591 p = gfc_constructor_next (p);
1592 }
1593
1594 bound = size_int (n - 1);
1595 /* Create an array type to hold them. */
1596 tmptype = build_range_type (gfc_array_index_type,
1597 gfc_index_zero_node, bound);
1598 tmptype = build_array_type (type, tmptype);
1599
1600 init = build_constructor (tmptype, v);
1601 TREE_CONSTANT (init) = 1;
1602 TREE_STATIC (init) = 1;
1603 /* Create a static variable to hold the data. */
1604 tmp = gfc_create_var (tmptype, "data");
1605 TREE_STATIC (tmp) = 1;
1606 TREE_CONSTANT (tmp) = 1;
1607 TREE_READONLY (tmp) = 1;
1608 DECL_INITIAL (tmp) = init;
1609 init = tmp;
1610
1611 /* Use BUILTIN_MEMCPY to assign the values. */
1612 tmp = gfc_conv_descriptor_data_get (desc);
1613 tmp = build_fold_indirect_ref_loc (input_location,
1614 tmp);
1615 tmp = gfc_build_array_ref (tmp, *poffset, NULL);
1616 tmp = gfc_build_addr_expr (NULL_TREE, tmp);
1617 init = gfc_build_addr_expr (NULL_TREE, init);
1618
1619 size = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (type));
1620 bound = build_int_cst (size_type_node, n * size);
1621 tmp = build_call_expr_loc (input_location,
1622 builtin_decl_explicit (BUILT_IN_MEMCPY),
1623 3, tmp, init, bound);
1624 gfc_add_expr_to_block (&body, tmp);
1625
1626 *poffset = fold_build2_loc (input_location, PLUS_EXPR,
1627 gfc_array_index_type, *poffset,
1628 build_int_cst (gfc_array_index_type, n));
1629 }
1630 if (!INTEGER_CST_P (*poffset))
1631 {
1632 gfc_add_modify (&body, *offsetvar, *poffset);
1633 *poffset = *offsetvar;
1634 }
1635 }
1636
1637 /* The frontend should already have done any expansions
1638 at compile-time. */
1639 if (!c->iterator)
1640 {
1641 /* Pass the code as is. */
1642 tmp = gfc_finish_block (&body);
1643 gfc_add_expr_to_block (pblock, tmp);
1644 }
1645 else
1646 {
1647 /* Build the implied do-loop. */
1648 stmtblock_t implied_do_block;
1649 tree cond;
1650 tree end;
1651 tree step;
1652 tree exit_label;
1653 tree loopbody;
1654 tree tmp2;
1655
1656 loopbody = gfc_finish_block (&body);
1657
1658 /* Create a new block that holds the implied-do loop. A temporary
1659 loop-variable is used. */
1660 gfc_start_block(&implied_do_block);
1661
1662 /* Initialize the loop. */
1663 gfc_init_se (&se, NULL);
1664 gfc_conv_expr_val (&se, c->iterator->start);
1665 gfc_add_block_to_block (&implied_do_block, &se.pre);
1666 gfc_add_modify (&implied_do_block, shadow_loopvar, se.expr);
1667
1668 gfc_init_se (&se, NULL);
1669 gfc_conv_expr_val (&se, c->iterator->end);
1670 gfc_add_block_to_block (&implied_do_block, &se.pre);
1671 end = gfc_evaluate_now (se.expr, &implied_do_block);
1672
1673 gfc_init_se (&se, NULL);
1674 gfc_conv_expr_val (&se, c->iterator->step);
1675 gfc_add_block_to_block (&implied_do_block, &se.pre);
1676 step = gfc_evaluate_now (se.expr, &implied_do_block);
1677
1678 /* If this array expands dynamically, and the number of iterations
1679 is not constant, we won't have allocated space for the static
1680 part of C->EXPR's size. Do that now. */
1681 if (dynamic && gfc_iterator_has_dynamic_bounds (c->iterator))
1682 {
1683 /* Get the number of iterations. */
1684 tmp = gfc_get_iteration_count (shadow_loopvar, end, step);
1685
1686 /* Get the static part of C->EXPR's size. */
1687 gfc_get_array_constructor_element_size (&size, c->expr);
1688 tmp2 = gfc_conv_mpz_to_tree (size, gfc_index_integer_kind);
1689
1690 /* Grow the array by TMP * TMP2 elements. */
1691 tmp = fold_build2_loc (input_location, MULT_EXPR,
1692 gfc_array_index_type, tmp, tmp2);
1693 gfc_grow_array (&implied_do_block, desc, tmp);
1694 }
1695
1696 /* Generate the loop body. */
1697 exit_label = gfc_build_label_decl (NULL_TREE);
1698 gfc_start_block (&body);
1699
1700 /* Generate the exit condition. Depending on the sign of
1701 the step variable we have to generate the correct
1702 comparison. */
1703 tmp = fold_build2_loc (input_location, GT_EXPR, boolean_type_node,
1704 step, build_int_cst (TREE_TYPE (step), 0));
1705 cond = fold_build3_loc (input_location, COND_EXPR,
1706 boolean_type_node, tmp,
1707 fold_build2_loc (input_location, GT_EXPR,
1708 boolean_type_node, shadow_loopvar, end),
1709 fold_build2_loc (input_location, LT_EXPR,
1710 boolean_type_node, shadow_loopvar, end));
1711 tmp = build1_v (GOTO_EXPR, exit_label);
1712 TREE_USED (exit_label) = 1;
1713 tmp = build3_v (COND_EXPR, cond, tmp,
1714 build_empty_stmt (input_location));
1715 gfc_add_expr_to_block (&body, tmp);
1716
1717 /* The main loop body. */
1718 gfc_add_expr_to_block (&body, loopbody);
1719
1720 /* Increase loop variable by step. */
1721 tmp = fold_build2_loc (input_location, PLUS_EXPR,
1722 TREE_TYPE (shadow_loopvar), shadow_loopvar,
1723 step);
1724 gfc_add_modify (&body, shadow_loopvar, tmp);
1725
1726 /* Finish the loop. */
1727 tmp = gfc_finish_block (&body);
1728 tmp = build1_v (LOOP_EXPR, tmp);
1729 gfc_add_expr_to_block (&implied_do_block, tmp);
1730
1731 /* Add the exit label. */
1732 tmp = build1_v (LABEL_EXPR, exit_label);
1733 gfc_add_expr_to_block (&implied_do_block, tmp);
1734
1735 /* Finishe the implied-do loop. */
1736 tmp = gfc_finish_block(&implied_do_block);
1737 gfc_add_expr_to_block(pblock, tmp);
1738
1739 gfc_restore_sym (c->iterator->var->symtree->n.sym, &saved_loopvar);
1740 }
1741 }
1742 mpz_clear (size);
1743 }
1744
1745
1746 /* A catch-all to obtain the string length for anything that is not a
1747 a substring of non-constant length, a constant, array or variable. */
1748
1749 static void
1750 get_array_ctor_all_strlen (stmtblock_t *block, gfc_expr *e, tree *len)
1751 {
1752 gfc_se se;
1753 gfc_ss *ss;
1754
1755 /* Don't bother if we already know the length is a constant. */
1756 if (*len && INTEGER_CST_P (*len))
1757 return;
1758
1759 if (!e->ref && e->ts.u.cl && e->ts.u.cl->length
1760 && e->ts.u.cl->length->expr_type == EXPR_CONSTANT)
1761 {
1762 /* This is easy. */
1763 gfc_conv_const_charlen (e->ts.u.cl);
1764 *len = e->ts.u.cl->backend_decl;
1765 }
1766 else
1767 {
1768 /* Otherwise, be brutal even if inefficient. */
1769 ss = gfc_walk_expr (e);
1770 gfc_init_se (&se, NULL);
1771
1772 /* No function call, in case of side effects. */
1773 se.no_function_call = 1;
1774 if (ss == gfc_ss_terminator)
1775 gfc_conv_expr (&se, e);
1776 else
1777 gfc_conv_expr_descriptor (&se, e, ss);
1778
1779 /* Fix the value. */
1780 *len = gfc_evaluate_now (se.string_length, &se.pre);
1781
1782 gfc_add_block_to_block (block, &se.pre);
1783 gfc_add_block_to_block (block, &se.post);
1784
1785 e->ts.u.cl->backend_decl = *len;
1786 }
1787 }
1788
1789
1790 /* Figure out the string length of a variable reference expression.
1791 Used by get_array_ctor_strlen. */
1792
1793 static void
1794 get_array_ctor_var_strlen (stmtblock_t *block, gfc_expr * expr, tree * len)
1795 {
1796 gfc_ref *ref;
1797 gfc_typespec *ts;
1798 mpz_t char_len;
1799
1800 /* Don't bother if we already know the length is a constant. */
1801 if (*len && INTEGER_CST_P (*len))
1802 return;
1803
1804 ts = &expr->symtree->n.sym->ts;
1805 for (ref = expr->ref; ref; ref = ref->next)
1806 {
1807 switch (ref->type)
1808 {
1809 case REF_ARRAY:
1810 /* Array references don't change the string length. */
1811 break;
1812
1813 case REF_COMPONENT:
1814 /* Use the length of the component. */
1815 ts = &ref->u.c.component->ts;
1816 break;
1817
1818 case REF_SUBSTRING:
1819 if (ref->u.ss.start->expr_type != EXPR_CONSTANT
1820 || ref->u.ss.end->expr_type != EXPR_CONSTANT)
1821 {
1822 /* Note that this might evaluate expr. */
1823 get_array_ctor_all_strlen (block, expr, len);
1824 return;
1825 }
1826 mpz_init_set_ui (char_len, 1);
1827 mpz_add (char_len, char_len, ref->u.ss.end->value.integer);
1828 mpz_sub (char_len, char_len, ref->u.ss.start->value.integer);
1829 *len = gfc_conv_mpz_to_tree (char_len, gfc_default_integer_kind);
1830 *len = convert (gfc_charlen_type_node, *len);
1831 mpz_clear (char_len);
1832 return;
1833
1834 default:
1835 gcc_unreachable ();
1836 }
1837 }
1838
1839 *len = ts->u.cl->backend_decl;
1840 }
1841
1842
1843 /* Figure out the string length of a character array constructor.
1844 If len is NULL, don't calculate the length; this happens for recursive calls
1845 when a sub-array-constructor is an element but not at the first position,
1846 so when we're not interested in the length.
1847 Returns TRUE if all elements are character constants. */
1848
1849 bool
1850 get_array_ctor_strlen (stmtblock_t *block, gfc_constructor_base base, tree * len)
1851 {
1852 gfc_constructor *c;
1853 bool is_const;
1854
1855 is_const = TRUE;
1856
1857 if (gfc_constructor_first (base) == NULL)
1858 {
1859 if (len)
1860 *len = build_int_cstu (gfc_charlen_type_node, 0);
1861 return is_const;
1862 }
1863
1864 /* Loop over all constructor elements to find out is_const, but in len we
1865 want to store the length of the first, not the last, element. We can
1866 of course exit the loop as soon as is_const is found to be false. */
1867 for (c = gfc_constructor_first (base);
1868 c && is_const; c = gfc_constructor_next (c))
1869 {
1870 switch (c->expr->expr_type)
1871 {
1872 case EXPR_CONSTANT:
1873 if (len && !(*len && INTEGER_CST_P (*len)))
1874 *len = build_int_cstu (gfc_charlen_type_node,
1875 c->expr->value.character.length);
1876 break;
1877
1878 case EXPR_ARRAY:
1879 if (!get_array_ctor_strlen (block, c->expr->value.constructor, len))
1880 is_const = false;
1881 break;
1882
1883 case EXPR_VARIABLE:
1884 is_const = false;
1885 if (len)
1886 get_array_ctor_var_strlen (block, c->expr, len);
1887 break;
1888
1889 default:
1890 is_const = false;
1891 if (len)
1892 get_array_ctor_all_strlen (block, c->expr, len);
1893 break;
1894 }
1895
1896 /* After the first iteration, we don't want the length modified. */
1897 len = NULL;
1898 }
1899
1900 return is_const;
1901 }
1902
1903 /* Check whether the array constructor C consists entirely of constant
1904 elements, and if so returns the number of those elements, otherwise
1905 return zero. Note, an empty or NULL array constructor returns zero. */
1906
1907 unsigned HOST_WIDE_INT
1908 gfc_constant_array_constructor_p (gfc_constructor_base base)
1909 {
1910 unsigned HOST_WIDE_INT nelem = 0;
1911
1912 gfc_constructor *c = gfc_constructor_first (base);
1913 while (c)
1914 {
1915 if (c->iterator
1916 || c->expr->rank > 0
1917 || c->expr->expr_type != EXPR_CONSTANT)
1918 return 0;
1919 c = gfc_constructor_next (c);
1920 nelem++;
1921 }
1922 return nelem;
1923 }
1924
1925
1926 /* Given EXPR, the constant array constructor specified by an EXPR_ARRAY,
1927 and the tree type of it's elements, TYPE, return a static constant
1928 variable that is compile-time initialized. */
1929
1930 tree
1931 gfc_build_constant_array_constructor (gfc_expr * expr, tree type)
1932 {
1933 tree tmptype, init, tmp;
1934 HOST_WIDE_INT nelem;
1935 gfc_constructor *c;
1936 gfc_array_spec as;
1937 gfc_se se;
1938 int i;
1939 VEC(constructor_elt,gc) *v = NULL;
1940
1941 /* First traverse the constructor list, converting the constants
1942 to tree to build an initializer. */
1943 nelem = 0;
1944 c = gfc_constructor_first (expr->value.constructor);
1945 while (c)
1946 {
1947 gfc_init_se (&se, NULL);
1948 gfc_conv_constant (&se, c->expr);
1949 if (c->expr->ts.type != BT_CHARACTER)
1950 se.expr = fold_convert (type, se.expr);
1951 else if (POINTER_TYPE_P (type))
1952 se.expr = gfc_build_addr_expr (gfc_get_pchar_type (c->expr->ts.kind),
1953 se.expr);
1954 CONSTRUCTOR_APPEND_ELT (v, build_int_cst (gfc_array_index_type, nelem),
1955 se.expr);
1956 c = gfc_constructor_next (c);
1957 nelem++;
1958 }
1959
1960 /* Next determine the tree type for the array. We use the gfortran
1961 front-end's gfc_get_nodesc_array_type in order to create a suitable
1962 GFC_ARRAY_TYPE_P that may be used by the scalarizer. */
1963
1964 memset (&as, 0, sizeof (gfc_array_spec));
1965
1966 as.rank = expr->rank;
1967 as.type = AS_EXPLICIT;
1968 if (!expr->shape)
1969 {
1970 as.lower[0] = gfc_get_int_expr (gfc_default_integer_kind, NULL, 0);
1971 as.upper[0] = gfc_get_int_expr (gfc_default_integer_kind,
1972 NULL, nelem - 1);
1973 }
1974 else
1975 for (i = 0; i < expr->rank; i++)
1976 {
1977 int tmp = (int) mpz_get_si (expr->shape[i]);
1978 as.lower[i] = gfc_get_int_expr (gfc_default_integer_kind, NULL, 0);
1979 as.upper[i] = gfc_get_int_expr (gfc_default_integer_kind,
1980 NULL, tmp - 1);
1981 }
1982
1983 tmptype = gfc_get_nodesc_array_type (type, &as, PACKED_STATIC, true);
1984
1985 /* as is not needed anymore. */
1986 for (i = 0; i < as.rank + as.corank; i++)
1987 {
1988 gfc_free_expr (as.lower[i]);
1989 gfc_free_expr (as.upper[i]);
1990 }
1991
1992 init = build_constructor (tmptype, v);
1993
1994 TREE_CONSTANT (init) = 1;
1995 TREE_STATIC (init) = 1;
1996
1997 tmp = gfc_create_var (tmptype, "A");
1998 TREE_STATIC (tmp) = 1;
1999 TREE_CONSTANT (tmp) = 1;
2000 TREE_READONLY (tmp) = 1;
2001 DECL_INITIAL (tmp) = init;
2002
2003 return tmp;
2004 }
2005
2006
2007 /* Translate a constant EXPR_ARRAY array constructor for the scalarizer.
2008 This mostly initializes the scalarizer state info structure with the
2009 appropriate values to directly use the array created by the function
2010 gfc_build_constant_array_constructor. */
2011
2012 static void
2013 trans_constant_array_constructor (gfc_ss * ss, tree type)
2014 {
2015 gfc_array_info *info;
2016 tree tmp;
2017 int i;
2018
2019 tmp = gfc_build_constant_array_constructor (ss->info->expr, type);
2020
2021 info = &ss->info->data.array;
2022
2023 info->descriptor = tmp;
2024 info->data = gfc_build_addr_expr (NULL_TREE, tmp);
2025 info->offset = gfc_index_zero_node;
2026
2027 for (i = 0; i < ss->dimen; i++)
2028 {
2029 info->delta[i] = gfc_index_zero_node;
2030 info->start[i] = gfc_index_zero_node;
2031 info->end[i] = gfc_index_zero_node;
2032 info->stride[i] = gfc_index_one_node;
2033 }
2034 }
2035
2036
2037 /* Helper routine of gfc_trans_array_constructor to determine if the
2038 bounds of the loop specified by LOOP are constant and simple enough
2039 to use with trans_constant_array_constructor. Returns the
2040 iteration count of the loop if suitable, and NULL_TREE otherwise. */
2041
2042 static tree
2043 constant_array_constructor_loop_size (gfc_loopinfo * loop)
2044 {
2045 tree size = gfc_index_one_node;
2046 tree tmp;
2047 int i;
2048
2049 for (i = 0; i < loop->dimen; i++)
2050 {
2051 /* If the bounds aren't constant, return NULL_TREE. */
2052 if (!INTEGER_CST_P (loop->from[i]) || !INTEGER_CST_P (loop->to[i]))
2053 return NULL_TREE;
2054 if (!integer_zerop (loop->from[i]))
2055 {
2056 /* Only allow nonzero "from" in one-dimensional arrays. */
2057 if (loop->dimen != 1)
2058 return NULL_TREE;
2059 tmp = fold_build2_loc (input_location, MINUS_EXPR,
2060 gfc_array_index_type,
2061 loop->to[i], loop->from[i]);
2062 }
2063 else
2064 tmp = loop->to[i];
2065 tmp = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
2066 tmp, gfc_index_one_node);
2067 size = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
2068 size, tmp);
2069 }
2070
2071 return size;
2072 }
2073
2074
2075 /* Array constructors are handled by constructing a temporary, then using that
2076 within the scalarization loop. This is not optimal, but seems by far the
2077 simplest method. */
2078
2079 static void
2080 trans_array_constructor (gfc_ss * ss, locus * where)
2081 {
2082 gfc_constructor_base c;
2083 tree offset;
2084 tree offsetvar;
2085 tree desc;
2086 tree type;
2087 tree tmp;
2088 bool dynamic;
2089 bool old_first_len, old_typespec_chararray_ctor;
2090 tree old_first_len_val;
2091 gfc_loopinfo *loop;
2092 gfc_ss_info *ss_info;
2093 gfc_expr *expr;
2094 gfc_ss *s;
2095
2096 /* Save the old values for nested checking. */
2097 old_first_len = first_len;
2098 old_first_len_val = first_len_val;
2099 old_typespec_chararray_ctor = typespec_chararray_ctor;
2100
2101 loop = ss->loop;
2102 ss_info = ss->info;
2103 expr = ss_info->expr;
2104
2105 /* Do bounds-checking here and in gfc_trans_array_ctor_element only if no
2106 typespec was given for the array constructor. */
2107 typespec_chararray_ctor = (expr->ts.u.cl
2108 && expr->ts.u.cl->length_from_typespec);
2109
2110 if ((gfc_option.rtcheck & GFC_RTCHECK_BOUNDS)
2111 && expr->ts.type == BT_CHARACTER && !typespec_chararray_ctor)
2112 {
2113 first_len_val = gfc_create_var (gfc_charlen_type_node, "len");
2114 first_len = true;
2115 }
2116
2117 gcc_assert (ss->dimen == loop->dimen);
2118
2119 c = expr->value.constructor;
2120 if (expr->ts.type == BT_CHARACTER)
2121 {
2122 bool const_string;
2123
2124 /* get_array_ctor_strlen walks the elements of the constructor, if a
2125 typespec was given, we already know the string length and want the one
2126 specified there. */
2127 if (typespec_chararray_ctor && expr->ts.u.cl->length
2128 && expr->ts.u.cl->length->expr_type != EXPR_CONSTANT)
2129 {
2130 gfc_se length_se;
2131
2132 const_string = false;
2133 gfc_init_se (&length_se, NULL);
2134 gfc_conv_expr_type (&length_se, expr->ts.u.cl->length,
2135 gfc_charlen_type_node);
2136 ss_info->string_length = length_se.expr;
2137 gfc_add_block_to_block (&loop->pre, &length_se.pre);
2138 gfc_add_block_to_block (&loop->post, &length_se.post);
2139 }
2140 else
2141 const_string = get_array_ctor_strlen (&loop->pre, c,
2142 &ss_info->string_length);
2143
2144 /* Complex character array constructors should have been taken care of
2145 and not end up here. */
2146 gcc_assert (ss_info->string_length);
2147
2148 expr->ts.u.cl->backend_decl = ss_info->string_length;
2149
2150 type = gfc_get_character_type_len (expr->ts.kind, ss_info->string_length);
2151 if (const_string)
2152 type = build_pointer_type (type);
2153 }
2154 else
2155 type = gfc_typenode_for_spec (&expr->ts);
2156
2157 /* See if the constructor determines the loop bounds. */
2158 dynamic = false;
2159
2160 if (expr->shape && loop->dimen > 1 && loop->to[0] == NULL_TREE)
2161 {
2162 /* We have a multidimensional parameter. */
2163 for (s = ss; s; s = s->parent)
2164 {
2165 int n;
2166 for (n = 0; n < s->loop->dimen; n++)
2167 {
2168 s->loop->from[n] = gfc_index_zero_node;
2169 s->loop->to[n] = gfc_conv_mpz_to_tree (expr->shape[s->dim[n]],
2170 gfc_index_integer_kind);
2171 s->loop->to[n] = fold_build2_loc (input_location, MINUS_EXPR,
2172 gfc_array_index_type,
2173 s->loop->to[n],
2174 gfc_index_one_node);
2175 }
2176 }
2177 }
2178
2179 if (loop->to[0] == NULL_TREE)
2180 {
2181 mpz_t size;
2182
2183 /* We should have a 1-dimensional, zero-based loop. */
2184 gcc_assert (loop->parent == NULL && loop->nested == NULL);
2185 gcc_assert (loop->dimen == 1);
2186 gcc_assert (integer_zerop (loop->from[0]));
2187
2188 /* Split the constructor size into a static part and a dynamic part.
2189 Allocate the static size up-front and record whether the dynamic
2190 size might be nonzero. */
2191 mpz_init (size);
2192 dynamic = gfc_get_array_constructor_size (&size, c);
2193 mpz_sub_ui (size, size, 1);
2194 loop->to[0] = gfc_conv_mpz_to_tree (size, gfc_index_integer_kind);
2195 mpz_clear (size);
2196 }
2197
2198 /* Special case constant array constructors. */
2199 if (!dynamic)
2200 {
2201 unsigned HOST_WIDE_INT nelem = gfc_constant_array_constructor_p (c);
2202 if (nelem > 0)
2203 {
2204 tree size = constant_array_constructor_loop_size (loop);
2205 if (size && compare_tree_int (size, nelem) == 0)
2206 {
2207 trans_constant_array_constructor (ss, type);
2208 goto finish;
2209 }
2210 }
2211 }
2212
2213 if (TREE_CODE (loop->to[0]) == VAR_DECL)
2214 dynamic = true;
2215
2216 gfc_trans_create_temp_array (&loop->pre, &loop->post, ss, type, NULL_TREE,
2217 dynamic, true, false, where);
2218
2219 desc = ss_info->data.array.descriptor;
2220 offset = gfc_index_zero_node;
2221 offsetvar = gfc_create_var_np (gfc_array_index_type, "offset");
2222 TREE_NO_WARNING (offsetvar) = 1;
2223 TREE_USED (offsetvar) = 0;
2224 gfc_trans_array_constructor_value (&loop->pre, type, desc, c,
2225 &offset, &offsetvar, dynamic);
2226
2227 /* If the array grows dynamically, the upper bound of the loop variable
2228 is determined by the array's final upper bound. */
2229 if (dynamic)
2230 {
2231 tmp = fold_build2_loc (input_location, MINUS_EXPR,
2232 gfc_array_index_type,
2233 offsetvar, gfc_index_one_node);
2234 tmp = gfc_evaluate_now (tmp, &loop->pre);
2235 gfc_conv_descriptor_ubound_set (&loop->pre, desc, gfc_rank_cst[0], tmp);
2236 if (loop->to[0] && TREE_CODE (loop->to[0]) == VAR_DECL)
2237 gfc_add_modify (&loop->pre, loop->to[0], tmp);
2238 else
2239 loop->to[0] = tmp;
2240 }
2241
2242 if (TREE_USED (offsetvar))
2243 pushdecl (offsetvar);
2244 else
2245 gcc_assert (INTEGER_CST_P (offset));
2246
2247 #if 0
2248 /* Disable bound checking for now because it's probably broken. */
2249 if (gfc_option.rtcheck & GFC_RTCHECK_BOUNDS)
2250 {
2251 gcc_unreachable ();
2252 }
2253 #endif
2254
2255 finish:
2256 /* Restore old values of globals. */
2257 first_len = old_first_len;
2258 first_len_val = old_first_len_val;
2259 typespec_chararray_ctor = old_typespec_chararray_ctor;
2260 }
2261
2262
2263 /* INFO describes a GFC_SS_SECTION in loop LOOP, and this function is
2264 called after evaluating all of INFO's vector dimensions. Go through
2265 each such vector dimension and see if we can now fill in any missing
2266 loop bounds. */
2267
2268 static void
2269 set_vector_loop_bounds (gfc_ss * ss)
2270 {
2271 gfc_loopinfo *loop;
2272 gfc_array_info *info;
2273 gfc_se se;
2274 tree tmp;
2275 tree desc;
2276 tree zero;
2277 int n;
2278 int dim;
2279
2280 info = &ss->info->data.array;
2281
2282 for (; ss; ss = ss->parent)
2283 {
2284 loop = ss->loop;
2285
2286 for (n = 0; n < loop->dimen; n++)
2287 {
2288 dim = ss->dim[n];
2289 if (info->ref->u.ar.dimen_type[dim] != DIMEN_VECTOR
2290 || loop->to[n] != NULL)
2291 continue;
2292
2293 /* Loop variable N indexes vector dimension DIM, and we don't
2294 yet know the upper bound of loop variable N. Set it to the
2295 difference between the vector's upper and lower bounds. */
2296 gcc_assert (loop->from[n] == gfc_index_zero_node);
2297 gcc_assert (info->subscript[dim]
2298 && info->subscript[dim]->info->type == GFC_SS_VECTOR);
2299
2300 gfc_init_se (&se, NULL);
2301 desc = info->subscript[dim]->info->data.array.descriptor;
2302 zero = gfc_rank_cst[0];
2303 tmp = fold_build2_loc (input_location, MINUS_EXPR,
2304 gfc_array_index_type,
2305 gfc_conv_descriptor_ubound_get (desc, zero),
2306 gfc_conv_descriptor_lbound_get (desc, zero));
2307 tmp = gfc_evaluate_now (tmp, &loop->pre);
2308 loop->to[n] = tmp;
2309 }
2310 }
2311 }
2312
2313
2314 /* Add the pre and post chains for all the scalar expressions in a SS chain
2315 to loop. This is called after the loop parameters have been calculated,
2316 but before the actual scalarizing loops. */
2317
2318 static void
2319 gfc_add_loop_ss_code (gfc_loopinfo * loop, gfc_ss * ss, bool subscript,
2320 locus * where)
2321 {
2322 gfc_loopinfo *nested_loop;
2323 gfc_se se;
2324 gfc_ss_info *ss_info;
2325 gfc_array_info *info;
2326 gfc_expr *expr;
2327 bool skip_nested = false;
2328 int n;
2329
2330 /* TODO: This can generate bad code if there are ordering dependencies,
2331 e.g., a callee allocated function and an unknown size constructor. */
2332 gcc_assert (ss != NULL);
2333
2334 for (; ss != gfc_ss_terminator; ss = ss->loop_chain)
2335 {
2336 gcc_assert (ss);
2337
2338 /* Cross loop arrays are handled from within the most nested loop. */
2339 if (ss->nested_ss != NULL)
2340 continue;
2341
2342 ss_info = ss->info;
2343 expr = ss_info->expr;
2344 info = &ss_info->data.array;
2345
2346 switch (ss_info->type)
2347 {
2348 case GFC_SS_SCALAR:
2349 /* Scalar expression. Evaluate this now. This includes elemental
2350 dimension indices, but not array section bounds. */
2351 gfc_init_se (&se, NULL);
2352 gfc_conv_expr (&se, expr);
2353 gfc_add_block_to_block (&loop->pre, &se.pre);
2354
2355 if (expr->ts.type != BT_CHARACTER)
2356 {
2357 /* Move the evaluation of scalar expressions outside the
2358 scalarization loop, except for WHERE assignments. */
2359 if (subscript)
2360 se.expr = convert(gfc_array_index_type, se.expr);
2361 if (!ss_info->where)
2362 se.expr = gfc_evaluate_now (se.expr, &loop->pre);
2363 gfc_add_block_to_block (&loop->pre, &se.post);
2364 }
2365 else
2366 gfc_add_block_to_block (&loop->post, &se.post);
2367
2368 ss_info->data.scalar.value = se.expr;
2369 ss_info->string_length = se.string_length;
2370 break;
2371
2372 case GFC_SS_REFERENCE:
2373 /* Scalar argument to elemental procedure. Evaluate this
2374 now. */
2375 gfc_init_se (&se, NULL);
2376 gfc_conv_expr (&se, expr);
2377 gfc_add_block_to_block (&loop->pre, &se.pre);
2378 gfc_add_block_to_block (&loop->post, &se.post);
2379
2380 ss_info->data.scalar.value = gfc_evaluate_now (se.expr, &loop->pre);
2381 ss_info->string_length = se.string_length;
2382 break;
2383
2384 case GFC_SS_SECTION:
2385 /* Add the expressions for scalar and vector subscripts. */
2386 for (n = 0; n < GFC_MAX_DIMENSIONS; n++)
2387 if (info->subscript[n])
2388 {
2389 gfc_add_loop_ss_code (loop, info->subscript[n], true, where);
2390 /* The recursive call will have taken care of the nested loops.
2391 No need to do it twice. */
2392 skip_nested = true;
2393 }
2394
2395 set_vector_loop_bounds (ss);
2396 break;
2397
2398 case GFC_SS_VECTOR:
2399 /* Get the vector's descriptor and store it in SS. */
2400 gfc_init_se (&se, NULL);
2401 gfc_conv_expr_descriptor (&se, expr, gfc_walk_expr (expr));
2402 gfc_add_block_to_block (&loop->pre, &se.pre);
2403 gfc_add_block_to_block (&loop->post, &se.post);
2404 info->descriptor = se.expr;
2405 break;
2406
2407 case GFC_SS_INTRINSIC:
2408 gfc_add_intrinsic_ss_code (loop, ss);
2409 break;
2410
2411 case GFC_SS_FUNCTION:
2412 /* Array function return value. We call the function and save its
2413 result in a temporary for use inside the loop. */
2414 gfc_init_se (&se, NULL);
2415 se.loop = loop;
2416 se.ss = ss;
2417 gfc_conv_expr (&se, expr);
2418 gfc_add_block_to_block (&loop->pre, &se.pre);
2419 gfc_add_block_to_block (&loop->post, &se.post);
2420 ss_info->string_length = se.string_length;
2421 break;
2422
2423 case GFC_SS_CONSTRUCTOR:
2424 if (expr->ts.type == BT_CHARACTER
2425 && ss_info->string_length == NULL
2426 && expr->ts.u.cl
2427 && expr->ts.u.cl->length)
2428 {
2429 gfc_init_se (&se, NULL);
2430 gfc_conv_expr_type (&se, expr->ts.u.cl->length,
2431 gfc_charlen_type_node);
2432 ss_info->string_length = se.expr;
2433 gfc_add_block_to_block (&loop->pre, &se.pre);
2434 gfc_add_block_to_block (&loop->post, &se.post);
2435 }
2436 trans_array_constructor (ss, where);
2437 break;
2438
2439 case GFC_SS_TEMP:
2440 case GFC_SS_COMPONENT:
2441 /* Do nothing. These are handled elsewhere. */
2442 break;
2443
2444 default:
2445 gcc_unreachable ();
2446 }
2447 }
2448
2449 if (!skip_nested)
2450 for (nested_loop = loop->nested; nested_loop;
2451 nested_loop = nested_loop->next)
2452 gfc_add_loop_ss_code (nested_loop, nested_loop->ss, subscript, where);
2453 }
2454
2455
2456 /* Translate expressions for the descriptor and data pointer of a SS. */
2457 /*GCC ARRAYS*/
2458
2459 static void
2460 gfc_conv_ss_descriptor (stmtblock_t * block, gfc_ss * ss, int base)
2461 {
2462 gfc_se se;
2463 gfc_ss_info *ss_info;
2464 gfc_array_info *info;
2465 tree tmp;
2466
2467 ss_info = ss->info;
2468 info = &ss_info->data.array;
2469
2470 /* Get the descriptor for the array to be scalarized. */
2471 gcc_assert (ss_info->expr->expr_type == EXPR_VARIABLE);
2472 gfc_init_se (&se, NULL);
2473 se.descriptor_only = 1;
2474 gfc_conv_expr_lhs (&se, ss_info->expr);
2475 gfc_add_block_to_block (block, &se.pre);
2476 info->descriptor = se.expr;
2477 ss_info->string_length = se.string_length;
2478
2479 if (base)
2480 {
2481 /* Also the data pointer. */
2482 tmp = gfc_conv_array_data (se.expr);
2483 /* If this is a variable or address of a variable we use it directly.
2484 Otherwise we must evaluate it now to avoid breaking dependency
2485 analysis by pulling the expressions for elemental array indices
2486 inside the loop. */
2487 if (!(DECL_P (tmp)
2488 || (TREE_CODE (tmp) == ADDR_EXPR
2489 && DECL_P (TREE_OPERAND (tmp, 0)))))
2490 tmp = gfc_evaluate_now (tmp, block);
2491 info->data = tmp;
2492
2493 tmp = gfc_conv_array_offset (se.expr);
2494 info->offset = gfc_evaluate_now (tmp, block);
2495
2496 /* Make absolutely sure that the saved_offset is indeed saved
2497 so that the variable is still accessible after the loops
2498 are translated. */
2499 info->saved_offset = info->offset;
2500 }
2501 }
2502
2503
2504 /* Initialize a gfc_loopinfo structure. */
2505
2506 void
2507 gfc_init_loopinfo (gfc_loopinfo * loop)
2508 {
2509 int n;
2510
2511 memset (loop, 0, sizeof (gfc_loopinfo));
2512 gfc_init_block (&loop->pre);
2513 gfc_init_block (&loop->post);
2514
2515 /* Initially scalarize in order and default to no loop reversal. */
2516 for (n = 0; n < GFC_MAX_DIMENSIONS; n++)
2517 {
2518 loop->order[n] = n;
2519 loop->reverse[n] = GFC_INHIBIT_REVERSE;
2520 }
2521
2522 loop->ss = gfc_ss_terminator;
2523 }
2524
2525
2526 /* Copies the loop variable info to a gfc_se structure. Does not copy the SS
2527 chain. */
2528
2529 void
2530 gfc_copy_loopinfo_to_se (gfc_se * se, gfc_loopinfo * loop)
2531 {
2532 se->loop = loop;
2533 }
2534
2535
2536 /* Return an expression for the data pointer of an array. */
2537
2538 tree
2539 gfc_conv_array_data (tree descriptor)
2540 {
2541 tree type;
2542
2543 type = TREE_TYPE (descriptor);
2544 if (GFC_ARRAY_TYPE_P (type))
2545 {
2546 if (TREE_CODE (type) == POINTER_TYPE)
2547 return descriptor;
2548 else
2549 {
2550 /* Descriptorless arrays. */
2551 return gfc_build_addr_expr (NULL_TREE, descriptor);
2552 }
2553 }
2554 else
2555 return gfc_conv_descriptor_data_get (descriptor);
2556 }
2557
2558
2559 /* Return an expression for the base offset of an array. */
2560
2561 tree
2562 gfc_conv_array_offset (tree descriptor)
2563 {
2564 tree type;
2565
2566 type = TREE_TYPE (descriptor);
2567 if (GFC_ARRAY_TYPE_P (type))
2568 return GFC_TYPE_ARRAY_OFFSET (type);
2569 else
2570 return gfc_conv_descriptor_offset_get (descriptor);
2571 }
2572
2573
2574 /* Get an expression for the array stride. */
2575
2576 tree
2577 gfc_conv_array_stride (tree descriptor, int dim)
2578 {
2579 tree tmp;
2580 tree type;
2581
2582 type = TREE_TYPE (descriptor);
2583
2584 /* For descriptorless arrays use the array size. */
2585 tmp = GFC_TYPE_ARRAY_STRIDE (type, dim);
2586 if (tmp != NULL_TREE)
2587 return tmp;
2588
2589 tmp = gfc_conv_descriptor_stride_get (descriptor, gfc_rank_cst[dim]);
2590 return tmp;
2591 }
2592
2593
2594 /* Like gfc_conv_array_stride, but for the lower bound. */
2595
2596 tree
2597 gfc_conv_array_lbound (tree descriptor, int dim)
2598 {
2599 tree tmp;
2600 tree type;
2601
2602 type = TREE_TYPE (descriptor);
2603
2604 tmp = GFC_TYPE_ARRAY_LBOUND (type, dim);
2605 if (tmp != NULL_TREE)
2606 return tmp;
2607
2608 tmp = gfc_conv_descriptor_lbound_get (descriptor, gfc_rank_cst[dim]);
2609 return tmp;
2610 }
2611
2612
2613 /* Like gfc_conv_array_stride, but for the upper bound. */
2614
2615 tree
2616 gfc_conv_array_ubound (tree descriptor, int dim)
2617 {
2618 tree tmp;
2619 tree type;
2620
2621 type = TREE_TYPE (descriptor);
2622
2623 tmp = GFC_TYPE_ARRAY_UBOUND (type, dim);
2624 if (tmp != NULL_TREE)
2625 return tmp;
2626
2627 /* This should only ever happen when passing an assumed shape array
2628 as an actual parameter. The value will never be used. */
2629 if (GFC_ARRAY_TYPE_P (TREE_TYPE (descriptor)))
2630 return gfc_index_zero_node;
2631
2632 tmp = gfc_conv_descriptor_ubound_get (descriptor, gfc_rank_cst[dim]);
2633 return tmp;
2634 }
2635
2636
2637 /* Generate code to perform an array index bound check. */
2638
2639 static tree
2640 trans_array_bound_check (gfc_se * se, gfc_ss *ss, tree index, int n,
2641 locus * where, bool check_upper)
2642 {
2643 tree fault;
2644 tree tmp_lo, tmp_up;
2645 tree descriptor;
2646 char *msg;
2647 const char * name = NULL;
2648
2649 if (!(gfc_option.rtcheck & GFC_RTCHECK_BOUNDS))
2650 return index;
2651
2652 descriptor = ss->info->data.array.descriptor;
2653
2654 index = gfc_evaluate_now (index, &se->pre);
2655
2656 /* We find a name for the error message. */
2657 name = ss->info->expr->symtree->n.sym->name;
2658 gcc_assert (name != NULL);
2659
2660 if (TREE_CODE (descriptor) == VAR_DECL)
2661 name = IDENTIFIER_POINTER (DECL_NAME (descriptor));
2662
2663 /* If upper bound is present, include both bounds in the error message. */
2664 if (check_upper)
2665 {
2666 tmp_lo = gfc_conv_array_lbound (descriptor, n);
2667 tmp_up = gfc_conv_array_ubound (descriptor, n);
2668
2669 if (name)
2670 asprintf (&msg, "Index '%%ld' of dimension %d of array '%s' "
2671 "outside of expected range (%%ld:%%ld)", n+1, name);
2672 else
2673 asprintf (&msg, "Index '%%ld' of dimension %d "
2674 "outside of expected range (%%ld:%%ld)", n+1);
2675
2676 fault = fold_build2_loc (input_location, LT_EXPR, boolean_type_node,
2677 index, tmp_lo);
2678 gfc_trans_runtime_check (true, false, fault, &se->pre, where, msg,
2679 fold_convert (long_integer_type_node, index),
2680 fold_convert (long_integer_type_node, tmp_lo),
2681 fold_convert (long_integer_type_node, tmp_up));
2682 fault = fold_build2_loc (input_location, GT_EXPR, boolean_type_node,
2683 index, tmp_up);
2684 gfc_trans_runtime_check (true, false, fault, &se->pre, where, msg,
2685 fold_convert (long_integer_type_node, index),
2686 fold_convert (long_integer_type_node, tmp_lo),
2687 fold_convert (long_integer_type_node, tmp_up));
2688 free (msg);
2689 }
2690 else
2691 {
2692 tmp_lo = gfc_conv_array_lbound (descriptor, n);
2693
2694 if (name)
2695 asprintf (&msg, "Index '%%ld' of dimension %d of array '%s' "
2696 "below lower bound of %%ld", n+1, name);
2697 else
2698 asprintf (&msg, "Index '%%ld' of dimension %d "
2699 "below lower bound of %%ld", n+1);
2700
2701 fault = fold_build2_loc (input_location, LT_EXPR, boolean_type_node,
2702 index, tmp_lo);
2703 gfc_trans_runtime_check (true, false, fault, &se->pre, where, msg,
2704 fold_convert (long_integer_type_node, index),
2705 fold_convert (long_integer_type_node, tmp_lo));
2706 free (msg);
2707 }
2708
2709 return index;
2710 }
2711
2712
2713 /* Return the offset for an index. Performs bound checking for elemental
2714 dimensions. Single element references are processed separately.
2715 DIM is the array dimension, I is the loop dimension. */
2716
2717 static tree
2718 conv_array_index_offset (gfc_se * se, gfc_ss * ss, int dim, int i,
2719 gfc_array_ref * ar, tree stride)
2720 {
2721 gfc_array_info *info;
2722 tree index;
2723 tree desc;
2724 tree data;
2725
2726 info = &ss->info->data.array;
2727
2728 /* Get the index into the array for this dimension. */
2729 if (ar)
2730 {
2731 gcc_assert (ar->type != AR_ELEMENT);
2732 switch (ar->dimen_type[dim])
2733 {
2734 case DIMEN_THIS_IMAGE:
2735 gcc_unreachable ();
2736 break;
2737 case DIMEN_ELEMENT:
2738 /* Elemental dimension. */
2739 gcc_assert (info->subscript[dim]
2740 && info->subscript[dim]->info->type == GFC_SS_SCALAR);
2741 /* We've already translated this value outside the loop. */
2742 index = info->subscript[dim]->info->data.scalar.value;
2743
2744 index = trans_array_bound_check (se, ss, index, dim, &ar->where,
2745 ar->as->type != AS_ASSUMED_SIZE
2746 || dim < ar->dimen - 1);
2747 break;
2748
2749 case DIMEN_VECTOR:
2750 gcc_assert (info && se->loop);
2751 gcc_assert (info->subscript[dim]
2752 && info->subscript[dim]->info->type == GFC_SS_VECTOR);
2753 desc = info->subscript[dim]->info->data.array.descriptor;
2754
2755 /* Get a zero-based index into the vector. */
2756 index = fold_build2_loc (input_location, MINUS_EXPR,
2757 gfc_array_index_type,
2758 se->loop->loopvar[i], se->loop->from[i]);
2759
2760 /* Multiply the index by the stride. */
2761 index = fold_build2_loc (input_location, MULT_EXPR,
2762 gfc_array_index_type,
2763 index, gfc_conv_array_stride (desc, 0));
2764
2765 /* Read the vector to get an index into info->descriptor. */
2766 data = build_fold_indirect_ref_loc (input_location,
2767 gfc_conv_array_data (desc));
2768 index = gfc_build_array_ref (data, index, NULL);
2769 index = gfc_evaluate_now (index, &se->pre);
2770 index = fold_convert (gfc_array_index_type, index);
2771
2772 /* Do any bounds checking on the final info->descriptor index. */
2773 index = trans_array_bound_check (se, ss, index, dim, &ar->where,
2774 ar->as->type != AS_ASSUMED_SIZE
2775 || dim < ar->dimen - 1);
2776 break;
2777
2778 case DIMEN_RANGE:
2779 /* Scalarized dimension. */
2780 gcc_assert (info && se->loop);
2781
2782 /* Multiply the loop variable by the stride and delta. */
2783 index = se->loop->loopvar[i];
2784 if (!integer_onep (info->stride[dim]))
2785 index = fold_build2_loc (input_location, MULT_EXPR,
2786 gfc_array_index_type, index,
2787 info->stride[dim]);
2788 if (!integer_zerop (info->delta[dim]))
2789 index = fold_build2_loc (input_location, PLUS_EXPR,
2790 gfc_array_index_type, index,
2791 info->delta[dim]);
2792 break;
2793
2794 default:
2795 gcc_unreachable ();
2796 }
2797 }
2798 else
2799 {
2800 /* Temporary array or derived type component. */
2801 gcc_assert (se->loop);
2802 index = se->loop->loopvar[se->loop->order[i]];
2803
2804 /* Pointer functions can have stride[0] different from unity.
2805 Use the stride returned by the function call and stored in
2806 the descriptor for the temporary. */
2807 if (se->ss && se->ss->info->type == GFC_SS_FUNCTION
2808 && se->ss->info->expr
2809 && se->ss->info->expr->symtree
2810 && se->ss->info->expr->symtree->n.sym->result
2811 && se->ss->info->expr->symtree->n.sym->result->attr.pointer)
2812 stride = gfc_conv_descriptor_stride_get (info->descriptor,
2813 gfc_rank_cst[dim]);
2814
2815 if (!integer_zerop (info->delta[dim]))
2816 index = fold_build2_loc (input_location, PLUS_EXPR,
2817 gfc_array_index_type, index, info->delta[dim]);
2818 }
2819
2820 /* Multiply by the stride. */
2821 if (!integer_onep (stride))
2822 index = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
2823 index, stride);
2824
2825 return index;
2826 }
2827
2828
2829 /* Build a scalarized reference to an array. */
2830
2831 static void
2832 gfc_conv_scalarized_array_ref (gfc_se * se, gfc_array_ref * ar)
2833 {
2834 gfc_array_info *info;
2835 tree decl = NULL_TREE;
2836 tree index;
2837 tree tmp;
2838 gfc_ss *ss;
2839 gfc_expr *expr;
2840 int n;
2841
2842 ss = se->ss;
2843 expr = ss->info->expr;
2844 info = &ss->info->data.array;
2845 if (ar)
2846 n = se->loop->order[0];
2847 else
2848 n = 0;
2849
2850 index = conv_array_index_offset (se, ss, ss->dim[n], n, ar, info->stride0);
2851 /* Add the offset for this dimension to the stored offset for all other
2852 dimensions. */
2853 if (!integer_zerop (info->offset))
2854 index = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
2855 index, info->offset);
2856
2857 if (expr && is_subref_array (expr))
2858 decl = expr->symtree->n.sym->backend_decl;
2859
2860 tmp = build_fold_indirect_ref_loc (input_location, info->data);
2861 se->expr = gfc_build_array_ref (tmp, index, decl);
2862 }
2863
2864
2865 /* Translate access of temporary array. */
2866
2867 void
2868 gfc_conv_tmp_array_ref (gfc_se * se)
2869 {
2870 se->string_length = se->ss->info->string_length;
2871 gfc_conv_scalarized_array_ref (se, NULL);
2872 gfc_advance_se_ss_chain (se);
2873 }
2874
2875 /* Add T to the offset pair *OFFSET, *CST_OFFSET. */
2876
2877 static void
2878 add_to_offset (tree *cst_offset, tree *offset, tree t)
2879 {
2880 if (TREE_CODE (t) == INTEGER_CST)
2881 *cst_offset = int_const_binop (PLUS_EXPR, *cst_offset, t);
2882 else
2883 {
2884 if (!integer_zerop (*offset))
2885 *offset = fold_build2_loc (input_location, PLUS_EXPR,
2886 gfc_array_index_type, *offset, t);
2887 else
2888 *offset = t;
2889 }
2890 }
2891
2892 /* Build an array reference. se->expr already holds the array descriptor.
2893 This should be either a variable, indirect variable reference or component
2894 reference. For arrays which do not have a descriptor, se->expr will be
2895 the data pointer.
2896 a(i, j, k) = base[offset + i * stride[0] + j * stride[1] + k * stride[2]]*/
2897
2898 void
2899 gfc_conv_array_ref (gfc_se * se, gfc_array_ref * ar, gfc_symbol * sym,
2900 locus * where)
2901 {
2902 int n;
2903 tree offset, cst_offset;
2904 tree tmp;
2905 tree stride;
2906 gfc_se indexse;
2907 gfc_se tmpse;
2908
2909 if (ar->dimen == 0)
2910 {
2911 gcc_assert (ar->codimen);
2912
2913 if (GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (se->expr)))
2914 se->expr = build_fold_indirect_ref (gfc_conv_array_data (se->expr));
2915 else
2916 {
2917 if (GFC_ARRAY_TYPE_P (TREE_TYPE (se->expr))
2918 && TREE_CODE (TREE_TYPE (se->expr)) == POINTER_TYPE)
2919 se->expr = build_fold_indirect_ref_loc (input_location, se->expr);
2920
2921 /* Use the actual tree type and not the wrapped coarray. */
2922 if (!se->want_pointer)
2923 se->expr = fold_convert (TYPE_MAIN_VARIANT (TREE_TYPE (se->expr)),
2924 se->expr);
2925 }
2926
2927 return;
2928 }
2929
2930 /* Handle scalarized references separately. */
2931 if (ar->type != AR_ELEMENT)
2932 {
2933 gfc_conv_scalarized_array_ref (se, ar);
2934 gfc_advance_se_ss_chain (se);
2935 return;
2936 }
2937
2938 cst_offset = offset = gfc_index_zero_node;
2939 add_to_offset (&cst_offset, &offset, gfc_conv_array_offset (se->expr));
2940
2941 /* Calculate the offsets from all the dimensions. Make sure to associate
2942 the final offset so that we form a chain of loop invariant summands. */
2943 for (n = ar->dimen - 1; n >= 0; n--)
2944 {
2945 /* Calculate the index for this dimension. */
2946 gfc_init_se (&indexse, se);
2947 gfc_conv_expr_type (&indexse, ar->start[n], gfc_array_index_type);
2948 gfc_add_block_to_block (&se->pre, &indexse.pre);
2949
2950 if (gfc_option.rtcheck & GFC_RTCHECK_BOUNDS)
2951 {
2952 /* Check array bounds. */
2953 tree cond;
2954 char *msg;
2955
2956 /* Evaluate the indexse.expr only once. */
2957 indexse.expr = save_expr (indexse.expr);
2958
2959 /* Lower bound. */
2960 tmp = gfc_conv_array_lbound (se->expr, n);
2961 if (sym->attr.temporary)
2962 {
2963 gfc_init_se (&tmpse, se);
2964 gfc_conv_expr_type (&tmpse, ar->as->lower[n],
2965 gfc_array_index_type);
2966 gfc_add_block_to_block (&se->pre, &tmpse.pre);
2967 tmp = tmpse.expr;
2968 }
2969
2970 cond = fold_build2_loc (input_location, LT_EXPR, boolean_type_node,
2971 indexse.expr, tmp);
2972 asprintf (&msg, "Index '%%ld' of dimension %d of array '%s' "
2973 "below lower bound of %%ld", n+1, sym->name);
2974 gfc_trans_runtime_check (true, false, cond, &se->pre, where, msg,
2975 fold_convert (long_integer_type_node,
2976 indexse.expr),
2977 fold_convert (long_integer_type_node, tmp));
2978 free (msg);
2979
2980 /* Upper bound, but not for the last dimension of assumed-size
2981 arrays. */
2982 if (n < ar->dimen - 1 || ar->as->type != AS_ASSUMED_SIZE)
2983 {
2984 tmp = gfc_conv_array_ubound (se->expr, n);
2985 if (sym->attr.temporary)
2986 {
2987 gfc_init_se (&tmpse, se);
2988 gfc_conv_expr_type (&tmpse, ar->as->upper[n],
2989 gfc_array_index_type);
2990 gfc_add_block_to_block (&se->pre, &tmpse.pre);
2991 tmp = tmpse.expr;
2992 }
2993
2994 cond = fold_build2_loc (input_location, GT_EXPR,
2995 boolean_type_node, indexse.expr, tmp);
2996 asprintf (&msg, "Index '%%ld' of dimension %d of array '%s' "
2997 "above upper bound of %%ld", n+1, sym->name);
2998 gfc_trans_runtime_check (true, false, cond, &se->pre, where, msg,
2999 fold_convert (long_integer_type_node,
3000 indexse.expr),
3001 fold_convert (long_integer_type_node, tmp));
3002 free (msg);
3003 }
3004 }
3005
3006 /* Multiply the index by the stride. */
3007 stride = gfc_conv_array_stride (se->expr, n);
3008 tmp = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
3009 indexse.expr, stride);
3010
3011 /* And add it to the total. */
3012 add_to_offset (&cst_offset, &offset, tmp);
3013 }
3014
3015 if (!integer_zerop (cst_offset))
3016 offset = fold_build2_loc (input_location, PLUS_EXPR,
3017 gfc_array_index_type, offset, cst_offset);
3018
3019 /* Access the calculated element. */
3020 tmp = gfc_conv_array_data (se->expr);
3021 tmp = build_fold_indirect_ref (tmp);
3022 se->expr = gfc_build_array_ref (tmp, offset, sym->backend_decl);
3023 }
3024
3025
3026 /* Add the offset corresponding to array's ARRAY_DIM dimension and loop's
3027 LOOP_DIM dimension (if any) to array's offset. */
3028
3029 static void
3030 add_array_offset (stmtblock_t *pblock, gfc_loopinfo *loop, gfc_ss *ss,
3031 gfc_array_ref *ar, int array_dim, int loop_dim)
3032 {
3033 gfc_se se;
3034 gfc_array_info *info;
3035 tree stride, index;
3036
3037 info = &ss->info->data.array;
3038
3039 gfc_init_se (&se, NULL);
3040 se.loop = loop;
3041 se.expr = info->descriptor;
3042 stride = gfc_conv_array_stride (info->descriptor, array_dim);
3043 index = conv_array_index_offset (&se, ss, array_dim, loop_dim, ar, stride);
3044 gfc_add_block_to_block (pblock, &se.pre);
3045
3046 info->offset = fold_build2_loc (input_location, PLUS_EXPR,
3047 gfc_array_index_type,
3048 info->offset, index);
3049 info->offset = gfc_evaluate_now (info->offset, pblock);
3050 }
3051
3052
3053 /* Generate the code to be executed immediately before entering a
3054 scalarization loop. */
3055
3056 static void
3057 gfc_trans_preloop_setup (gfc_loopinfo * loop, int dim, int flag,
3058 stmtblock_t * pblock)
3059 {
3060 tree stride;
3061 gfc_ss_info *ss_info;
3062 gfc_array_info *info;
3063 gfc_ss_type ss_type;
3064 gfc_ss *ss;
3065 gfc_array_ref *ar;
3066 int i;
3067
3068 /* This code will be executed before entering the scalarization loop
3069 for this dimension. */
3070 for (ss = loop->ss; ss != gfc_ss_terminator; ss = ss->loop_chain)
3071 {
3072 ss_info = ss->info;
3073
3074 if ((ss_info->useflags & flag) == 0)
3075 continue;
3076
3077 ss_type = ss_info->type;
3078 if (ss_type != GFC_SS_SECTION
3079 && ss_type != GFC_SS_FUNCTION
3080 && ss_type != GFC_SS_CONSTRUCTOR
3081 && ss_type != GFC_SS_COMPONENT)
3082 continue;
3083
3084 info = &ss_info->data.array;
3085
3086 gcc_assert (dim < ss->dimen);
3087 gcc_assert (ss->dimen == loop->dimen);
3088
3089 if (info->ref)
3090 ar = &info->ref->u.ar;
3091 else
3092 ar = NULL;
3093
3094 if (dim == loop->dimen - 1)
3095 i = 0;
3096 else
3097 i = dim + 1;
3098
3099 /* For the time being, there is no loop reordering. */
3100 gcc_assert (i == loop->order[i]);
3101 i = loop->order[i];
3102
3103 if (dim == loop->dimen - 1)
3104 {
3105 stride = gfc_conv_array_stride (info->descriptor, ss->dim[i]);
3106
3107 /* Calculate the stride of the innermost loop. Hopefully this will
3108 allow the backend optimizers to do their stuff more effectively.
3109 */
3110 info->stride0 = gfc_evaluate_now (stride, pblock);
3111
3112 /* For the outermost loop calculate the offset due to any
3113 elemental dimensions. It will have been initialized with the
3114 base offset of the array. */
3115 if (info->ref)
3116 {
3117 for (i = 0; i < ar->dimen; i++)
3118 {
3119 if (ar->dimen_type[i] != DIMEN_ELEMENT)
3120 continue;
3121
3122 add_array_offset (pblock, loop, ss, ar, i, /* unused */ -1);
3123 }
3124 }
3125 }
3126 else
3127 /* Add the offset for the previous loop dimension. */
3128 add_array_offset (pblock, loop, ss, ar, ss->dim[i], i);
3129
3130 /* Remember this offset for the second loop. */
3131 if (dim == loop->temp_dim - 1)
3132 info->saved_offset = info->offset;
3133 }
3134 }
3135
3136
3137 /* Start a scalarized expression. Creates a scope and declares loop
3138 variables. */
3139
3140 void
3141 gfc_start_scalarized_body (gfc_loopinfo * loop, stmtblock_t * pbody)
3142 {
3143 int dim;
3144 int n;
3145 int flags;
3146
3147 gcc_assert (!loop->array_parameter);
3148
3149 for (dim = loop->dimen - 1; dim >= 0; dim--)
3150 {
3151 n = loop->order[dim];
3152
3153 gfc_start_block (&loop->code[n]);
3154
3155 /* Create the loop variable. */
3156 loop->loopvar[n] = gfc_create_var (gfc_array_index_type, "S");
3157
3158 if (dim < loop->temp_dim)
3159 flags = 3;
3160 else
3161 flags = 1;
3162 /* Calculate values that will be constant within this loop. */
3163 gfc_trans_preloop_setup (loop, dim, flags, &loop->code[n]);
3164 }
3165 gfc_start_block (pbody);
3166 }
3167
3168
3169 /* Generates the actual loop code for a scalarization loop. */
3170
3171 void
3172 gfc_trans_scalarized_loop_end (gfc_loopinfo * loop, int n,
3173 stmtblock_t * pbody)
3174 {
3175 stmtblock_t block;
3176 tree cond;
3177 tree tmp;
3178 tree loopbody;
3179 tree exit_label;
3180 tree stmt;
3181 tree init;
3182 tree incr;
3183
3184 if ((ompws_flags & (OMPWS_WORKSHARE_FLAG | OMPWS_SCALARIZER_WS))
3185 == (OMPWS_WORKSHARE_FLAG | OMPWS_SCALARIZER_WS)
3186 && n == loop->dimen - 1)
3187 {
3188 /* We create an OMP_FOR construct for the outermost scalarized loop. */
3189 init = make_tree_vec (1);
3190 cond = make_tree_vec (1);
3191 incr = make_tree_vec (1);
3192
3193 /* Cycle statement is implemented with a goto. Exit statement must not
3194 be present for this loop. */
3195 exit_label = gfc_build_label_decl (NULL_TREE);
3196 TREE_USED (exit_label) = 1;
3197
3198 /* Label for cycle statements (if needed). */
3199 tmp = build1_v (LABEL_EXPR, exit_label);
3200 gfc_add_expr_to_block (pbody, tmp);
3201
3202 stmt = make_node (OMP_FOR);
3203
3204 TREE_TYPE (stmt) = void_type_node;
3205 OMP_FOR_BODY (stmt) = loopbody = gfc_finish_block (pbody);
3206
3207 OMP_FOR_CLAUSES (stmt) = build_omp_clause (input_location,
3208 OMP_CLAUSE_SCHEDULE);
3209 OMP_CLAUSE_SCHEDULE_KIND (OMP_FOR_CLAUSES (stmt))
3210 = OMP_CLAUSE_SCHEDULE_STATIC;
3211 if (ompws_flags & OMPWS_NOWAIT)
3212 OMP_CLAUSE_CHAIN (OMP_FOR_CLAUSES (stmt))
3213 = build_omp_clause (input_location, OMP_CLAUSE_NOWAIT);
3214
3215 /* Initialize the loopvar. */
3216 TREE_VEC_ELT (init, 0) = build2_v (MODIFY_EXPR, loop->loopvar[n],
3217 loop->from[n]);
3218 OMP_FOR_INIT (stmt) = init;
3219 /* The exit condition. */
3220 TREE_VEC_ELT (cond, 0) = build2_loc (input_location, LE_EXPR,
3221 boolean_type_node,
3222 loop->loopvar[n], loop->to[n]);
3223 SET_EXPR_LOCATION (TREE_VEC_ELT (cond, 0), input_location);
3224 OMP_FOR_COND (stmt) = cond;
3225 /* Increment the loopvar. */
3226 tmp = build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
3227 loop->loopvar[n], gfc_index_one_node);
3228 TREE_VEC_ELT (incr, 0) = fold_build2_loc (input_location, MODIFY_EXPR,
3229 void_type_node, loop->loopvar[n], tmp);
3230 OMP_FOR_INCR (stmt) = incr;
3231
3232 ompws_flags &= ~OMPWS_CURR_SINGLEUNIT;
3233 gfc_add_expr_to_block (&loop->code[n], stmt);
3234 }
3235 else
3236 {
3237 bool reverse_loop = (loop->reverse[n] == GFC_REVERSE_SET)
3238 && (loop->temp_ss == NULL);
3239
3240 loopbody = gfc_finish_block (pbody);
3241
3242 if (reverse_loop)
3243 {
3244 tmp = loop->from[n];
3245 loop->from[n] = loop->to[n];
3246 loop->to[n] = tmp;
3247 }
3248
3249 /* Initialize the loopvar. */
3250 if (loop->loopvar[n] != loop->from[n])
3251 gfc_add_modify (&loop->code[n], loop->loopvar[n], loop->from[n]);
3252
3253 exit_label = gfc_build_label_decl (NULL_TREE);
3254
3255 /* Generate the loop body. */
3256 gfc_init_block (&block);
3257
3258 /* The exit condition. */
3259 cond = fold_build2_loc (input_location, reverse_loop ? LT_EXPR : GT_EXPR,
3260 boolean_type_node, loop->loopvar[n], loop->to[n]);
3261 tmp = build1_v (GOTO_EXPR, exit_label);
3262 TREE_USED (exit_label) = 1;
3263 tmp = build3_v (COND_EXPR, cond, tmp, build_empty_stmt (input_location));
3264 gfc_add_expr_to_block (&block, tmp);
3265
3266 /* The main body. */
3267 gfc_add_expr_to_block (&block, loopbody);
3268
3269 /* Increment the loopvar. */
3270 tmp = fold_build2_loc (input_location,
3271 reverse_loop ? MINUS_EXPR : PLUS_EXPR,
3272 gfc_array_index_type, loop->loopvar[n],
3273 gfc_index_one_node);
3274
3275 gfc_add_modify (&block, loop->loopvar[n], tmp);
3276
3277 /* Build the loop. */
3278 tmp = gfc_finish_block (&block);
3279 tmp = build1_v (LOOP_EXPR, tmp);
3280 gfc_add_expr_to_block (&loop->code[n], tmp);
3281
3282 /* Add the exit label. */
3283 tmp = build1_v (LABEL_EXPR, exit_label);
3284 gfc_add_expr_to_block (&loop->code[n], tmp);
3285 }
3286
3287 }
3288
3289
3290 /* Finishes and generates the loops for a scalarized expression. */
3291
3292 void
3293 gfc_trans_scalarizing_loops (gfc_loopinfo * loop, stmtblock_t * body)
3294 {
3295 int dim;
3296 int n;
3297 gfc_ss *ss;
3298 stmtblock_t *pblock;
3299 tree tmp;
3300
3301 pblock = body;
3302 /* Generate the loops. */
3303 for (dim = 0; dim < loop->dimen; dim++)
3304 {
3305 n = loop->order[dim];
3306 gfc_trans_scalarized_loop_end (loop, n, pblock);
3307 loop->loopvar[n] = NULL_TREE;
3308 pblock = &loop->code[n];
3309 }
3310
3311 tmp = gfc_finish_block (pblock);
3312 gfc_add_expr_to_block (&loop->pre, tmp);
3313
3314 /* Clear all the used flags. */
3315 for (ss = loop->ss; ss != gfc_ss_terminator; ss = ss->loop_chain)
3316 if (ss->parent == NULL)
3317 ss->info->useflags = 0;
3318 }
3319
3320
3321 /* Finish the main body of a scalarized expression, and start the secondary
3322 copying body. */
3323
3324 void
3325 gfc_trans_scalarized_loop_boundary (gfc_loopinfo * loop, stmtblock_t * body)
3326 {
3327 int dim;
3328 int n;
3329 stmtblock_t *pblock;
3330 gfc_ss *ss;
3331
3332 pblock = body;
3333 /* We finish as many loops as are used by the temporary. */
3334 for (dim = 0; dim < loop->temp_dim - 1; dim++)
3335 {
3336 n = loop->order[dim];
3337 gfc_trans_scalarized_loop_end (loop, n, pblock);
3338 loop->loopvar[n] = NULL_TREE;
3339 pblock = &loop->code[n];
3340 }
3341
3342 /* We don't want to finish the outermost loop entirely. */
3343 n = loop->order[loop->temp_dim - 1];
3344 gfc_trans_scalarized_loop_end (loop, n, pblock);
3345
3346 /* Restore the initial offsets. */
3347 for (ss = loop->ss; ss != gfc_ss_terminator; ss = ss->loop_chain)
3348 {
3349 gfc_ss_type ss_type;
3350 gfc_ss_info *ss_info;
3351
3352 ss_info = ss->info;
3353
3354 if ((ss_info->useflags & 2) == 0)
3355 continue;
3356
3357 ss_type = ss_info->type;
3358 if (ss_type != GFC_SS_SECTION
3359 && ss_type != GFC_SS_FUNCTION
3360 && ss_type != GFC_SS_CONSTRUCTOR
3361 && ss_type != GFC_SS_COMPONENT)
3362 continue;
3363
3364 ss_info->data.array.offset = ss_info->data.array.saved_offset;
3365 }
3366
3367 /* Restart all the inner loops we just finished. */
3368 for (dim = loop->temp_dim - 2; dim >= 0; dim--)
3369 {
3370 n = loop->order[dim];
3371
3372 gfc_start_block (&loop->code[n]);
3373
3374 loop->loopvar[n] = gfc_create_var (gfc_array_index_type, "Q");
3375
3376 gfc_trans_preloop_setup (loop, dim, 2, &loop->code[n]);
3377 }
3378
3379 /* Start a block for the secondary copying code. */
3380 gfc_start_block (body);
3381 }
3382
3383
3384 /* Precalculate (either lower or upper) bound of an array section.
3385 BLOCK: Block in which the (pre)calculation code will go.
3386 BOUNDS[DIM]: Where the bound value will be stored once evaluated.
3387 VALUES[DIM]: Specified bound (NULL <=> unspecified).
3388 DESC: Array descriptor from which the bound will be picked if unspecified
3389 (either lower or upper bound according to LBOUND). */
3390
3391 static void
3392 evaluate_bound (stmtblock_t *block, tree *bounds, gfc_expr ** values,
3393 tree desc, int dim, bool lbound)
3394 {
3395 gfc_se se;
3396 gfc_expr * input_val = values[dim];
3397 tree *output = &bounds[dim];
3398
3399
3400 if (input_val)
3401 {
3402 /* Specified section bound. */
3403 gfc_init_se (&se, NULL);
3404 gfc_conv_expr_type (&se, input_val, gfc_array_index_type);
3405 gfc_add_block_to_block (block, &se.pre);
3406 *output = se.expr;
3407 }
3408 else
3409 {
3410 /* No specific bound specified so use the bound of the array. */
3411 *output = lbound ? gfc_conv_array_lbound (desc, dim) :
3412 gfc_conv_array_ubound (desc, dim);
3413 }
3414 *output = gfc_evaluate_now (*output, block);
3415 }
3416
3417
3418 /* Calculate the lower bound of an array section. */
3419
3420 static void
3421 gfc_conv_section_startstride (gfc_loopinfo * loop, gfc_ss * ss, int dim)
3422 {
3423 gfc_expr *stride = NULL;
3424 tree desc;
3425 gfc_se se;
3426 gfc_array_info *info;
3427 gfc_array_ref *ar;
3428
3429 gcc_assert (ss->info->type == GFC_SS_SECTION);
3430
3431 info = &ss->info->data.array;
3432 ar = &info->ref->u.ar;
3433
3434 if (ar->dimen_type[dim] == DIMEN_VECTOR)
3435 {
3436 /* We use a zero-based index to access the vector. */
3437 info->start[dim] = gfc_index_zero_node;
3438 info->end[dim] = NULL;
3439 info->stride[dim] = gfc_index_one_node;
3440 return;
3441 }
3442
3443 gcc_assert (ar->dimen_type[dim] == DIMEN_RANGE
3444 || ar->dimen_type[dim] == DIMEN_THIS_IMAGE);
3445 desc = info->descriptor;
3446 stride = ar->stride[dim];
3447
3448 /* Calculate the start of the range. For vector subscripts this will
3449 be the range of the vector. */
3450 evaluate_bound (&loop->pre, info->start, ar->start, desc, dim, true);
3451
3452 /* Similarly calculate the end. Although this is not used in the
3453 scalarizer, it is needed when checking bounds and where the end
3454 is an expression with side-effects. */
3455 evaluate_bound (&loop->pre, info->end, ar->end, desc, dim, false);
3456
3457 /* Calculate the stride. */
3458 if (stride == NULL)
3459 info->stride[dim] = gfc_index_one_node;
3460 else
3461 {
3462 gfc_init_se (&se, NULL);
3463 gfc_conv_expr_type (&se, stride, gfc_array_index_type);
3464 gfc_add_block_to_block (&loop->pre, &se.pre);
3465 info->stride[dim] = gfc_evaluate_now (se.expr, &loop->pre);
3466 }
3467 }
3468
3469
3470 /* Calculates the range start and stride for a SS chain. Also gets the
3471 descriptor and data pointer. The range of vector subscripts is the size
3472 of the vector. Array bounds are also checked. */
3473
3474 void
3475 gfc_conv_ss_startstride (gfc_loopinfo * loop)
3476 {
3477 int n;
3478 tree tmp;
3479 gfc_ss *ss;
3480 tree desc;
3481
3482 loop->dimen = 0;
3483 /* Determine the rank of the loop. */
3484 for (ss = loop->ss; ss != gfc_ss_terminator; ss = ss->loop_chain)
3485 {
3486 switch (ss->info->type)
3487 {
3488 case GFC_SS_SECTION:
3489 case GFC_SS_CONSTRUCTOR:
3490 case GFC_SS_FUNCTION:
3491 case GFC_SS_COMPONENT:
3492 loop->dimen = ss->dimen;
3493 goto done;
3494
3495 /* As usual, lbound and ubound are exceptions!. */
3496 case GFC_SS_INTRINSIC:
3497 switch (ss->info->expr->value.function.isym->id)
3498 {
3499 case GFC_ISYM_LBOUND:
3500 case GFC_ISYM_UBOUND:
3501 case GFC_ISYM_LCOBOUND:
3502 case GFC_ISYM_UCOBOUND:
3503 case GFC_ISYM_THIS_IMAGE:
3504 loop->dimen = ss->dimen;
3505 goto done;
3506
3507 default:
3508 break;
3509 }
3510
3511 default:
3512 break;
3513 }
3514 }
3515
3516 /* We should have determined the rank of the expression by now. If
3517 not, that's bad news. */
3518 gcc_unreachable ();
3519
3520 done:
3521 /* Loop over all the SS in the chain. */
3522 for (ss = loop->ss; ss != gfc_ss_terminator; ss = ss->loop_chain)
3523 {
3524 gfc_ss_info *ss_info;
3525 gfc_array_info *info;
3526 gfc_expr *expr;
3527
3528 ss_info = ss->info;
3529 expr = ss_info->expr;
3530 info = &ss_info->data.array;
3531
3532 if (expr && expr->shape && !info->shape)
3533 info->shape = expr->shape;
3534
3535 switch (ss_info->type)
3536 {
3537 case GFC_SS_SECTION:
3538 /* Get the descriptor for the array. If it is a cross loops array,
3539 we got the descriptor already in the outermost loop. */
3540 if (ss->parent == NULL)
3541 gfc_conv_ss_descriptor (&loop->pre, ss, !loop->array_parameter);
3542
3543 for (n = 0; n < ss->dimen; n++)
3544 gfc_conv_section_startstride (loop, ss, ss->dim[n]);
3545 break;
3546
3547 case GFC_SS_INTRINSIC:
3548 switch (expr->value.function.isym->id)
3549 {
3550 /* Fall through to supply start and stride. */
3551 case GFC_ISYM_LBOUND:
3552 case GFC_ISYM_UBOUND:
3553 case GFC_ISYM_LCOBOUND:
3554 case GFC_ISYM_UCOBOUND:
3555 case GFC_ISYM_THIS_IMAGE:
3556 break;
3557
3558 default:
3559 continue;
3560 }
3561
3562 case GFC_SS_CONSTRUCTOR:
3563 case GFC_SS_FUNCTION:
3564 for (n = 0; n < ss->dimen; n++)
3565 {
3566 int dim = ss->dim[n];
3567
3568 info->start[dim] = gfc_index_zero_node;
3569 info->end[dim] = gfc_index_zero_node;
3570 info->stride[dim] = gfc_index_one_node;
3571 }
3572 break;
3573
3574 default:
3575 break;
3576 }
3577 }
3578
3579 /* The rest is just runtime bound checking. */
3580 if (gfc_option.rtcheck & GFC_RTCHECK_BOUNDS)
3581 {
3582 stmtblock_t block;
3583 tree lbound, ubound;
3584 tree end;
3585 tree size[GFC_MAX_DIMENSIONS];
3586 tree stride_pos, stride_neg, non_zerosized, tmp2, tmp3;
3587 gfc_array_info *info;
3588 char *msg;
3589 int dim;
3590
3591 gfc_start_block (&block);
3592
3593 for (n = 0; n < loop->dimen; n++)
3594 size[n] = NULL_TREE;
3595
3596 for (ss = loop->ss; ss != gfc_ss_terminator; ss = ss->loop_chain)
3597 {
3598 stmtblock_t inner;
3599 gfc_ss_info *ss_info;
3600 gfc_expr *expr;
3601 locus *expr_loc;
3602 const char *expr_name;
3603
3604 ss_info = ss->info;
3605 if (ss_info->type != GFC_SS_SECTION)
3606 continue;
3607
3608 /* Catch allocatable lhs in f2003. */
3609 if (gfc_option.flag_realloc_lhs && ss->is_alloc_lhs)
3610 continue;
3611
3612 expr = ss_info->expr;
3613 expr_loc = &expr->where;
3614 expr_name = expr->symtree->name;
3615
3616 gfc_start_block (&inner);
3617
3618 /* TODO: range checking for mapped dimensions. */
3619 info = &ss_info->data.array;
3620
3621 /* This code only checks ranges. Elemental and vector
3622 dimensions are checked later. */
3623 for (n = 0; n < loop->dimen; n++)
3624 {
3625 bool check_upper;
3626
3627 dim = ss->dim[n];
3628 if (info->ref->u.ar.dimen_type[dim] != DIMEN_RANGE)
3629 continue;
3630
3631 if (dim == info->ref->u.ar.dimen - 1
3632 && info->ref->u.ar.as->type == AS_ASSUMED_SIZE)
3633 check_upper = false;
3634 else
3635 check_upper = true;
3636
3637 /* Zero stride is not allowed. */
3638 tmp = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node,
3639 info->stride[dim], gfc_index_zero_node);
3640 asprintf (&msg, "Zero stride is not allowed, for dimension %d "
3641 "of array '%s'", dim + 1, expr_name);
3642 gfc_trans_runtime_check (true, false, tmp, &inner,
3643 expr_loc, msg);
3644 free (msg);
3645
3646 desc = info->descriptor;
3647
3648 /* This is the run-time equivalent of resolve.c's
3649 check_dimension(). The logical is more readable there
3650 than it is here, with all the trees. */
3651 lbound = gfc_conv_array_lbound (desc, dim);
3652 end = info->end[dim];
3653 if (check_upper)
3654 ubound = gfc_conv_array_ubound (desc, dim);
3655 else
3656 ubound = NULL;
3657
3658 /* non_zerosized is true when the selected range is not
3659 empty. */
3660 stride_pos = fold_build2_loc (input_location, GT_EXPR,
3661 boolean_type_node, info->stride[dim],
3662 gfc_index_zero_node);
3663 tmp = fold_build2_loc (input_location, LE_EXPR, boolean_type_node,
3664 info->start[dim], end);
3665 stride_pos = fold_build2_loc (input_location, TRUTH_AND_EXPR,
3666 boolean_type_node, stride_pos, tmp);
3667
3668 stride_neg = fold_build2_loc (input_location, LT_EXPR,
3669 boolean_type_node,
3670 info->stride[dim], gfc_index_zero_node);
3671 tmp = fold_build2_loc (input_location, GE_EXPR, boolean_type_node,
3672 info->start[dim], end);
3673 stride_neg = fold_build2_loc (input_location, TRUTH_AND_EXPR,
3674 boolean_type_node,
3675 stride_neg, tmp);
3676 non_zerosized = fold_build2_loc (input_location, TRUTH_OR_EXPR,
3677 boolean_type_node,
3678 stride_pos, stride_neg);
3679
3680 /* Check the start of the range against the lower and upper
3681 bounds of the array, if the range is not empty.
3682 If upper bound is present, include both bounds in the
3683 error message. */
3684 if (check_upper)
3685 {
3686 tmp = fold_build2_loc (input_location, LT_EXPR,
3687 boolean_type_node,
3688 info->start[dim], lbound);
3689 tmp = fold_build2_loc (input_location, TRUTH_AND_EXPR,
3690 boolean_type_node,
3691 non_zerosized, tmp);
3692 tmp2 = fold_build2_loc (input_location, GT_EXPR,
3693 boolean_type_node,
3694 info->start[dim], ubound);
3695 tmp2 = fold_build2_loc (input_location, TRUTH_AND_EXPR,
3696 boolean_type_node,
3697 non_zerosized, tmp2);
3698 asprintf (&msg, "Index '%%ld' of dimension %d of array '%s' "
3699 "outside of expected range (%%ld:%%ld)",
3700 dim + 1, expr_name);
3701 gfc_trans_runtime_check (true, false, tmp, &inner,
3702 expr_loc, msg,
3703 fold_convert (long_integer_type_node, info->start[dim]),
3704 fold_convert (long_integer_type_node, lbound),
3705 fold_convert (long_integer_type_node, ubound));
3706 gfc_trans_runtime_check (true, false, tmp2, &inner,
3707 expr_loc, msg,
3708 fold_convert (long_integer_type_node, info->start[dim]),
3709 fold_convert (long_integer_type_node, lbound),
3710 fold_convert (long_integer_type_node, ubound));
3711 free (msg);
3712 }
3713 else
3714 {
3715 tmp = fold_build2_loc (input_location, LT_EXPR,
3716 boolean_type_node,
3717 info->start[dim], lbound);
3718 tmp = fold_build2_loc (input_location, TRUTH_AND_EXPR,
3719 boolean_type_node, non_zerosized, tmp);
3720 asprintf (&msg, "Index '%%ld' of dimension %d of array '%s' "
3721 "below lower bound of %%ld",
3722 dim + 1, expr_name);
3723 gfc_trans_runtime_check (true, false, tmp, &inner,
3724 expr_loc, msg,
3725 fold_convert (long_integer_type_node, info->start[dim]),
3726 fold_convert (long_integer_type_node, lbound));
3727 free (msg);
3728 }
3729
3730 /* Compute the last element of the range, which is not
3731 necessarily "end" (think 0:5:3, which doesn't contain 5)
3732 and check it against both lower and upper bounds. */
3733
3734 tmp = fold_build2_loc (input_location, MINUS_EXPR,
3735 gfc_array_index_type, end,
3736 info->start[dim]);
3737 tmp = fold_build2_loc (input_location, TRUNC_MOD_EXPR,
3738 gfc_array_index_type, tmp,
3739 info->stride[dim]);
3740 tmp = fold_build2_loc (input_location, MINUS_EXPR,
3741 gfc_array_index_type, end, tmp);
3742 tmp2 = fold_build2_loc (input_location, LT_EXPR,
3743 boolean_type_node, tmp, lbound);
3744 tmp2 = fold_build2_loc (input_location, TRUTH_AND_EXPR,
3745 boolean_type_node, non_zerosized, tmp2);
3746 if (check_upper)
3747 {
3748 tmp3 = fold_build2_loc (input_location, GT_EXPR,
3749 boolean_type_node, tmp, ubound);
3750 tmp3 = fold_build2_loc (input_location, TRUTH_AND_EXPR,
3751 boolean_type_node, non_zerosized, tmp3);
3752 asprintf (&msg, "Index '%%ld' of dimension %d of array '%s' "
3753 "outside of expected range (%%ld:%%ld)",
3754 dim + 1, expr_name);
3755 gfc_trans_runtime_check (true, false, tmp2, &inner,
3756 expr_loc, msg,
3757 fold_convert (long_integer_type_node, tmp),
3758 fold_convert (long_integer_type_node, ubound),
3759 fold_convert (long_integer_type_node, lbound));
3760 gfc_trans_runtime_check (true, false, tmp3, &inner,
3761 expr_loc, msg,
3762 fold_convert (long_integer_type_node, tmp),
3763 fold_convert (long_integer_type_node, ubound),
3764 fold_convert (long_integer_type_node, lbound));
3765 free (msg);
3766 }
3767 else
3768 {
3769 asprintf (&msg, "Index '%%ld' of dimension %d of array '%s' "
3770 "below lower bound of %%ld",
3771 dim + 1, expr_name);
3772 gfc_trans_runtime_check (true, false, tmp2, &inner,
3773 expr_loc, msg,
3774 fold_convert (long_integer_type_node, tmp),
3775 fold_convert (long_integer_type_node, lbound));
3776 free (msg);
3777 }
3778
3779 /* Check the section sizes match. */
3780 tmp = fold_build2_loc (input_location, MINUS_EXPR,
3781 gfc_array_index_type, end,
3782 info->start[dim]);
3783 tmp = fold_build2_loc (input_location, FLOOR_DIV_EXPR,
3784 gfc_array_index_type, tmp,
3785 info->stride[dim]);
3786 tmp = fold_build2_loc (input_location, PLUS_EXPR,
3787 gfc_array_index_type,
3788 gfc_index_one_node, tmp);
3789 tmp = fold_build2_loc (input_location, MAX_EXPR,
3790 gfc_array_index_type, tmp,
3791 build_int_cst (gfc_array_index_type, 0));
3792 /* We remember the size of the first section, and check all the
3793 others against this. */
3794 if (size[n])
3795 {
3796 tmp3 = fold_build2_loc (input_location, NE_EXPR,
3797 boolean_type_node, tmp, size[n]);
3798 asprintf (&msg, "Array bound mismatch for dimension %d "
3799 "of array '%s' (%%ld/%%ld)",
3800 dim + 1, expr_name);
3801
3802 gfc_trans_runtime_check (true, false, tmp3, &inner,
3803 expr_loc, msg,
3804 fold_convert (long_integer_type_node, tmp),
3805 fold_convert (long_integer_type_node, size[n]));
3806
3807 free (msg);
3808 }
3809 else
3810 size[n] = gfc_evaluate_now (tmp, &inner);
3811 }
3812
3813 tmp = gfc_finish_block (&inner);
3814
3815 /* For optional arguments, only check bounds if the argument is
3816 present. */
3817 if (expr->symtree->n.sym->attr.optional
3818 || expr->symtree->n.sym->attr.not_always_present)
3819 tmp = build3_v (COND_EXPR,
3820 gfc_conv_expr_present (expr->symtree->n.sym),
3821 tmp, build_empty_stmt (input_location));
3822
3823 gfc_add_expr_to_block (&block, tmp);
3824
3825 }
3826
3827 tmp = gfc_finish_block (&block);
3828 gfc_add_expr_to_block (&loop->pre, tmp);
3829 }
3830
3831 for (loop = loop->nested; loop; loop = loop->next)
3832 gfc_conv_ss_startstride (loop);
3833 }
3834
3835 /* Return true if both symbols could refer to the same data object. Does
3836 not take account of aliasing due to equivalence statements. */
3837
3838 static int
3839 symbols_could_alias (gfc_symbol *lsym, gfc_symbol *rsym, bool lsym_pointer,
3840 bool lsym_target, bool rsym_pointer, bool rsym_target)
3841 {
3842 /* Aliasing isn't possible if the symbols have different base types. */
3843 if (gfc_compare_types (&lsym->ts, &rsym->ts) == 0)
3844 return 0;
3845
3846 /* Pointers can point to other pointers and target objects. */
3847
3848 if ((lsym_pointer && (rsym_pointer || rsym_target))
3849 || (rsym_pointer && (lsym_pointer || lsym_target)))
3850 return 1;
3851
3852 /* Special case: Argument association, cf. F90 12.4.1.6, F2003 12.4.1.7
3853 and F2008 12.5.2.13 items 3b and 4b. The pointer case (a) is already
3854 checked above. */
3855 if (lsym_target && rsym_target
3856 && ((lsym->attr.dummy && !lsym->attr.contiguous
3857 && (!lsym->attr.dimension || lsym->as->type == AS_ASSUMED_SHAPE))
3858 || (rsym->attr.dummy && !rsym->attr.contiguous
3859 && (!rsym->attr.dimension
3860 || rsym->as->type == AS_ASSUMED_SHAPE))))
3861 return 1;
3862
3863 return 0;
3864 }
3865
3866
3867 /* Return true if the two SS could be aliased, i.e. both point to the same data
3868 object. */
3869 /* TODO: resolve aliases based on frontend expressions. */
3870
3871 static int
3872 gfc_could_be_alias (gfc_ss * lss, gfc_ss * rss)
3873 {
3874 gfc_ref *lref;
3875 gfc_ref *rref;
3876 gfc_expr *lexpr, *rexpr;
3877 gfc_symbol *lsym;
3878 gfc_symbol *rsym;
3879 bool lsym_pointer, lsym_target, rsym_pointer, rsym_target;
3880
3881 lexpr = lss->info->expr;
3882 rexpr = rss->info->expr;
3883
3884 lsym = lexpr->symtree->n.sym;
3885 rsym = rexpr->symtree->n.sym;
3886
3887 lsym_pointer = lsym->attr.pointer;
3888 lsym_target = lsym->attr.target;
3889 rsym_pointer = rsym->attr.pointer;
3890 rsym_target = rsym->attr.target;
3891
3892 if (symbols_could_alias (lsym, rsym, lsym_pointer, lsym_target,
3893 rsym_pointer, rsym_target))
3894 return 1;
3895
3896 if (rsym->ts.type != BT_DERIVED && rsym->ts.type != BT_CLASS
3897 && lsym->ts.type != BT_DERIVED && lsym->ts.type != BT_CLASS)
3898 return 0;
3899
3900 /* For derived types we must check all the component types. We can ignore
3901 array references as these will have the same base type as the previous
3902 component ref. */
3903 for (lref = lexpr->ref; lref != lss->info->data.array.ref; lref = lref->next)
3904 {
3905 if (lref->type != REF_COMPONENT)
3906 continue;
3907
3908 lsym_pointer = lsym_pointer || lref->u.c.sym->attr.pointer;
3909 lsym_target = lsym_target || lref->u.c.sym->attr.target;
3910
3911 if (symbols_could_alias (lref->u.c.sym, rsym, lsym_pointer, lsym_target,
3912 rsym_pointer, rsym_target))
3913 return 1;
3914
3915 if ((lsym_pointer && (rsym_pointer || rsym_target))
3916 || (rsym_pointer && (lsym_pointer || lsym_target)))
3917 {
3918 if (gfc_compare_types (&lref->u.c.component->ts,
3919 &rsym->ts))
3920 return 1;
3921 }
3922
3923 for (rref = rexpr->ref; rref != rss->info->data.array.ref;
3924 rref = rref->next)
3925 {
3926 if (rref->type != REF_COMPONENT)
3927 continue;
3928
3929 rsym_pointer = rsym_pointer || rref->u.c.sym->attr.pointer;
3930 rsym_target = lsym_target || rref->u.c.sym->attr.target;
3931
3932 if (symbols_could_alias (lref->u.c.sym, rref->u.c.sym,
3933 lsym_pointer, lsym_target,
3934 rsym_pointer, rsym_target))
3935 return 1;
3936
3937 if ((lsym_pointer && (rsym_pointer || rsym_target))
3938 || (rsym_pointer && (lsym_pointer || lsym_target)))
3939 {
3940 if (gfc_compare_types (&lref->u.c.component->ts,
3941 &rref->u.c.sym->ts))
3942 return 1;
3943 if (gfc_compare_types (&lref->u.c.sym->ts,
3944 &rref->u.c.component->ts))
3945 return 1;
3946 if (gfc_compare_types (&lref->u.c.component->ts,
3947 &rref->u.c.component->ts))
3948 return 1;
3949 }
3950 }
3951 }
3952
3953 lsym_pointer = lsym->attr.pointer;
3954 lsym_target = lsym->attr.target;
3955 lsym_pointer = lsym->attr.pointer;
3956 lsym_target = lsym->attr.target;
3957
3958 for (rref = rexpr->ref; rref != rss->info->data.array.ref; rref = rref->next)
3959 {
3960 if (rref->type != REF_COMPONENT)
3961 break;
3962
3963 rsym_pointer = rsym_pointer || rref->u.c.sym->attr.pointer;
3964 rsym_target = lsym_target || rref->u.c.sym->attr.target;
3965
3966 if (symbols_could_alias (rref->u.c.sym, lsym,
3967 lsym_pointer, lsym_target,
3968 rsym_pointer, rsym_target))
3969 return 1;
3970
3971 if ((lsym_pointer && (rsym_pointer || rsym_target))
3972 || (rsym_pointer && (lsym_pointer || lsym_target)))
3973 {
3974 if (gfc_compare_types (&lsym->ts, &rref->u.c.component->ts))
3975 return 1;
3976 }
3977 }
3978
3979 return 0;
3980 }
3981
3982
3983 /* Resolve array data dependencies. Creates a temporary if required. */
3984 /* TODO: Calc dependencies with gfc_expr rather than gfc_ss, and move to
3985 dependency.c. */
3986
3987 void
3988 gfc_conv_resolve_dependencies (gfc_loopinfo * loop, gfc_ss * dest,
3989 gfc_ss * rss)
3990 {
3991 gfc_ss *ss;
3992 gfc_ref *lref;
3993 gfc_ref *rref;
3994 gfc_expr *dest_expr;
3995 gfc_expr *ss_expr;
3996 int nDepend = 0;
3997 int i, j;
3998
3999 loop->temp_ss = NULL;
4000 dest_expr = dest->info->expr;
4001
4002 for (ss = rss; ss != gfc_ss_terminator; ss = ss->next)
4003 {
4004 if (ss->info->type != GFC_SS_SECTION)
4005 continue;
4006
4007 ss_expr = ss->info->expr;
4008
4009 if (dest_expr->symtree->n.sym != ss_expr->symtree->n.sym)
4010 {
4011 if (gfc_could_be_alias (dest, ss)
4012 || gfc_are_equivalenced_arrays (dest_expr, ss_expr))
4013 {
4014 nDepend = 1;
4015 break;
4016 }
4017 }
4018 else
4019 {
4020 lref = dest_expr->ref;
4021 rref = ss_expr->ref;
4022
4023 nDepend = gfc_dep_resolver (lref, rref, &loop->reverse[0]);
4024
4025 if (nDepend == 1)
4026 break;
4027
4028 for (i = 0; i < dest->dimen; i++)
4029 for (j = 0; j < ss->dimen; j++)
4030 if (i != j
4031 && dest->dim[i] == ss->dim[j])
4032 {
4033 /* If we don't access array elements in the same order,
4034 there is a dependency. */
4035 nDepend = 1;
4036 goto temporary;
4037 }
4038 #if 0
4039 /* TODO : loop shifting. */
4040 if (nDepend == 1)
4041 {
4042 /* Mark the dimensions for LOOP SHIFTING */
4043 for (n = 0; n < loop->dimen; n++)
4044 {
4045 int dim = dest->data.info.dim[n];
4046
4047 if (lref->u.ar.dimen_type[dim] == DIMEN_VECTOR)
4048 depends[n] = 2;
4049 else if (! gfc_is_same_range (&lref->u.ar,
4050 &rref->u.ar, dim, 0))
4051 depends[n] = 1;
4052 }
4053
4054 /* Put all the dimensions with dependencies in the
4055 innermost loops. */
4056 dim = 0;
4057 for (n = 0; n < loop->dimen; n++)
4058 {
4059 gcc_assert (loop->order[n] == n);
4060 if (depends[n])
4061 loop->order[dim++] = n;
4062 }
4063 for (n = 0; n < loop->dimen; n++)
4064 {
4065 if (! depends[n])
4066 loop->order[dim++] = n;
4067 }
4068
4069 gcc_assert (dim == loop->dimen);
4070 break;
4071 }
4072 #endif
4073 }
4074 }
4075
4076 temporary:
4077
4078 if (nDepend == 1)
4079 {
4080 tree base_type = gfc_typenode_for_spec (&dest_expr->ts);
4081 if (GFC_ARRAY_TYPE_P (base_type)
4082 || GFC_DESCRIPTOR_TYPE_P (base_type))
4083 base_type = gfc_get_element_type (base_type);
4084 loop->temp_ss = gfc_get_temp_ss (base_type, dest->info->string_length,
4085 loop->dimen);
4086 gfc_add_ss_to_loop (loop, loop->temp_ss);
4087 }
4088 else
4089 loop->temp_ss = NULL;
4090 }
4091
4092
4093 /* Browse through each array's information from the scalarizer and set the loop
4094 bounds according to the "best" one (per dimension), i.e. the one which
4095 provides the most information (constant bounds, shape, etc). */
4096
4097 static void
4098 set_loop_bounds (gfc_loopinfo *loop)
4099 {
4100 int n, dim, spec_dim;
4101 gfc_array_info *info;
4102 gfc_array_info *specinfo;
4103 gfc_ss *ss;
4104 tree tmp;
4105 gfc_ss **loopspec;
4106 bool dynamic[GFC_MAX_DIMENSIONS];
4107 mpz_t *cshape;
4108 mpz_t i;
4109
4110 loopspec = loop->specloop;
4111
4112 mpz_init (i);
4113 for (n = 0; n < loop->dimen; n++)
4114 {
4115 loopspec[n] = NULL;
4116 dynamic[n] = false;
4117 /* We use one SS term, and use that to determine the bounds of the
4118 loop for this dimension. We try to pick the simplest term. */
4119 for (ss = loop->ss; ss != gfc_ss_terminator; ss = ss->loop_chain)
4120 {
4121 gfc_ss_type ss_type;
4122
4123 ss_type = ss->info->type;
4124 if (ss_type == GFC_SS_SCALAR
4125 || ss_type == GFC_SS_TEMP
4126 || ss_type == GFC_SS_REFERENCE)
4127 continue;
4128
4129 info = &ss->info->data.array;
4130 dim = ss->dim[n];
4131
4132 if (loopspec[n] != NULL)
4133 {
4134 specinfo = &loopspec[n]->info->data.array;
4135 spec_dim = loopspec[n]->dim[n];
4136 }
4137 else
4138 {
4139 /* Silence unitialized warnings. */
4140 specinfo = NULL;
4141 spec_dim = 0;
4142 }
4143
4144 if (info->shape)
4145 {
4146 gcc_assert (info->shape[dim]);
4147 /* The frontend has worked out the size for us. */
4148 if (!loopspec[n]
4149 || !specinfo->shape
4150 || !integer_zerop (specinfo->start[spec_dim]))
4151 /* Prefer zero-based descriptors if possible. */
4152 loopspec[n] = ss;
4153 continue;
4154 }
4155
4156 if (ss_type == GFC_SS_CONSTRUCTOR)
4157 {
4158 gfc_constructor_base base;
4159 /* An unknown size constructor will always be rank one.
4160 Higher rank constructors will either have known shape,
4161 or still be wrapped in a call to reshape. */
4162 gcc_assert (loop->dimen == 1);
4163
4164 /* Always prefer to use the constructor bounds if the size
4165 can be determined at compile time. Prefer not to otherwise,
4166 since the general case involves realloc, and it's better to
4167 avoid that overhead if possible. */
4168 base = ss->info->expr->value.constructor;
4169 dynamic[n] = gfc_get_array_constructor_size (&i, base);
4170 if (!dynamic[n] || !loopspec[n])
4171 loopspec[n] = ss;
4172 continue;
4173 }
4174
4175 /* TODO: Pick the best bound if we have a choice between a
4176 function and something else. */
4177 if (ss_type == GFC_SS_FUNCTION)
4178 {
4179 loopspec[n] = ss;
4180 continue;
4181 }
4182
4183 /* Avoid using an allocatable lhs in an assignment, since
4184 there might be a reallocation coming. */
4185 if (loopspec[n] && ss->is_alloc_lhs)
4186 continue;
4187
4188 if (ss_type != GFC_SS_SECTION)
4189 continue;
4190
4191 if (!loopspec[n])
4192 loopspec[n] = ss;
4193 /* Criteria for choosing a loop specifier (most important first):
4194 doesn't need realloc
4195 stride of one
4196 known stride
4197 known lower bound
4198 known upper bound
4199 */
4200 else if ((loopspec[n]->info->type == GFC_SS_CONSTRUCTOR && dynamic[n])
4201 || n >= loop->dimen)
4202 loopspec[n] = ss;
4203 else if (integer_onep (info->stride[dim])
4204 && !integer_onep (specinfo->stride[spec_dim]))
4205 loopspec[n] = ss;
4206 else if (INTEGER_CST_P (info->stride[dim])
4207 && !INTEGER_CST_P (specinfo->stride[spec_dim]))
4208 loopspec[n] = ss;
4209 else if (INTEGER_CST_P (info->start[dim])
4210 && !INTEGER_CST_P (specinfo->start[spec_dim]))
4211 loopspec[n] = ss;
4212 /* We don't work out the upper bound.
4213 else if (INTEGER_CST_P (info->finish[n])
4214 && ! INTEGER_CST_P (specinfo->finish[n]))
4215 loopspec[n] = ss; */
4216 }
4217
4218 /* We should have found the scalarization loop specifier. If not,
4219 that's bad news. */
4220 gcc_assert (loopspec[n]);
4221
4222 info = &loopspec[n]->info->data.array;
4223 dim = loopspec[n]->dim[n];
4224
4225 /* Set the extents of this range. */
4226 cshape = info->shape;
4227 if (cshape && INTEGER_CST_P (info->start[dim])
4228 && INTEGER_CST_P (info->stride[dim]))
4229 {
4230 loop->from[n] = info->start[dim];
4231 mpz_set (i, cshape[get_array_ref_dim_for_loop_dim (loopspec[n], n)]);
4232 mpz_sub_ui (i, i, 1);
4233 /* To = from + (size - 1) * stride. */
4234 tmp = gfc_conv_mpz_to_tree (i, gfc_index_integer_kind);
4235 if (!integer_onep (info->stride[dim]))
4236 tmp = fold_build2_loc (input_location, MULT_EXPR,
4237 gfc_array_index_type, tmp,
4238 info->stride[dim]);
4239 loop->to[n] = fold_build2_loc (input_location, PLUS_EXPR,
4240 gfc_array_index_type,
4241 loop->from[n], tmp);
4242 }
4243 else
4244 {
4245 loop->from[n] = info->start[dim];
4246 switch (loopspec[n]->info->type)
4247 {
4248 case GFC_SS_CONSTRUCTOR:
4249 /* The upper bound is calculated when we expand the
4250 constructor. */
4251 gcc_assert (loop->to[n] == NULL_TREE);
4252 break;
4253
4254 case GFC_SS_SECTION:
4255 /* Use the end expression if it exists and is not constant,
4256 so that it is only evaluated once. */
4257 loop->to[n] = info->end[dim];
4258 break;
4259
4260 case GFC_SS_FUNCTION:
4261 /* The loop bound will be set when we generate the call. */
4262 gcc_assert (loop->to[n] == NULL_TREE);
4263 break;
4264
4265 default:
4266 gcc_unreachable ();
4267 }
4268 }
4269
4270 /* Transform everything so we have a simple incrementing variable. */
4271 if (n < loop->dimen && integer_onep (info->stride[dim]))
4272 info->delta[dim] = gfc_index_zero_node;
4273 else if (n < loop->dimen)
4274 {
4275 /* Set the delta for this section. */
4276 info->delta[dim] = gfc_evaluate_now (loop->from[n], &loop->pre);
4277 /* Number of iterations is (end - start + step) / step.
4278 with start = 0, this simplifies to
4279 last = end / step;
4280 for (i = 0; i<=last; i++){...}; */
4281 tmp = fold_build2_loc (input_location, MINUS_EXPR,
4282 gfc_array_index_type, loop->to[n],
4283 loop->from[n]);
4284 tmp = fold_build2_loc (input_location, FLOOR_DIV_EXPR,
4285 gfc_array_index_type, tmp, info->stride[dim]);
4286 tmp = fold_build2_loc (input_location, MAX_EXPR, gfc_array_index_type,
4287 tmp, build_int_cst (gfc_array_index_type, -1));
4288 loop->to[n] = gfc_evaluate_now (tmp, &loop->pre);
4289 /* Make the loop variable start at 0. */
4290 loop->from[n] = gfc_index_zero_node;
4291 }
4292 }
4293 mpz_clear (i);
4294
4295 for (loop = loop->nested; loop; loop = loop->next)
4296 set_loop_bounds (loop);
4297 }
4298
4299
4300 static void set_delta (gfc_loopinfo *loop);
4301
4302
4303 /* Initialize the scalarization loop. Creates the loop variables. Determines
4304 the range of the loop variables. Creates a temporary if required.
4305 Also generates code for scalar expressions which have been
4306 moved outside the loop. */
4307
4308 void
4309 gfc_conv_loop_setup (gfc_loopinfo * loop, locus * where)
4310 {
4311 gfc_ss *tmp_ss;
4312 tree tmp;
4313
4314 set_loop_bounds (loop);
4315
4316 /* Add all the scalar code that can be taken out of the loops.
4317 This may include calculating the loop bounds, so do it before
4318 allocating the temporary. */
4319 gfc_add_loop_ss_code (loop, loop->ss, false, where);
4320
4321 tmp_ss = loop->temp_ss;
4322 /* If we want a temporary then create it. */
4323 if (tmp_ss != NULL)
4324 {
4325 gfc_ss_info *tmp_ss_info;
4326
4327 tmp_ss_info = tmp_ss->info;
4328 gcc_assert (tmp_ss_info->type == GFC_SS_TEMP);
4329 gcc_assert (loop->parent == NULL);
4330
4331 /* Make absolutely sure that this is a complete type. */
4332 if (tmp_ss_info->string_length)
4333 tmp_ss_info->data.temp.type
4334 = gfc_get_character_type_len_for_eltype
4335 (TREE_TYPE (tmp_ss_info->data.temp.type),
4336 tmp_ss_info->string_length);
4337
4338 tmp = tmp_ss_info->data.temp.type;
4339 memset (&tmp_ss_info->data.array, 0, sizeof (gfc_array_info));
4340 tmp_ss_info->type = GFC_SS_SECTION;
4341
4342 gcc_assert (tmp_ss->dimen != 0);
4343
4344 gfc_trans_create_temp_array (&loop->pre, &loop->post, tmp_ss, tmp,
4345 NULL_TREE, false, true, false, where);
4346 }
4347
4348 /* For array parameters we don't have loop variables, so don't calculate the
4349 translations. */
4350 if (loop->array_parameter)
4351 return;
4352
4353 set_delta (loop);
4354 }
4355
4356
4357 /* Calculates how to transform from loop variables to array indices for each
4358 array: once loop bounds are chosen, sets the difference (DELTA field) between
4359 loop bounds and array reference bounds, for each array info. */
4360
4361 static void
4362 set_delta (gfc_loopinfo *loop)
4363 {
4364 gfc_ss *ss, **loopspec;
4365 gfc_array_info *info;
4366 tree tmp;
4367 int n, dim;
4368
4369 loopspec = loop->specloop;
4370
4371 /* Calculate the translation from loop variables to array indices. */
4372 for (ss = loop->ss; ss != gfc_ss_terminator; ss = ss->loop_chain)
4373 {
4374 gfc_ss_type ss_type;
4375
4376 ss_type = ss->info->type;
4377 if (ss_type != GFC_SS_SECTION
4378 && ss_type != GFC_SS_COMPONENT
4379 && ss_type != GFC_SS_CONSTRUCTOR)
4380 continue;
4381
4382 info = &ss->info->data.array;
4383
4384 for (n = 0; n < ss->dimen; n++)
4385 {
4386 /* If we are specifying the range the delta is already set. */
4387 if (loopspec[n] != ss)
4388 {
4389 dim = ss->dim[n];
4390
4391 /* Calculate the offset relative to the loop variable.
4392 First multiply by the stride. */
4393 tmp = loop->from[n];
4394 if (!integer_onep (info->stride[dim]))
4395 tmp = fold_build2_loc (input_location, MULT_EXPR,
4396 gfc_array_index_type,
4397 tmp, info->stride[dim]);
4398
4399 /* Then subtract this from our starting value. */
4400 tmp = fold_build2_loc (input_location, MINUS_EXPR,
4401 gfc_array_index_type,
4402 info->start[dim], tmp);
4403
4404 info->delta[dim] = gfc_evaluate_now (tmp, &loop->pre);
4405 }
4406 }
4407 }
4408
4409 for (loop = loop->nested; loop; loop = loop->next)
4410 set_delta (loop);
4411 }
4412
4413
4414 /* Calculate the size of a given array dimension from the bounds. This
4415 is simply (ubound - lbound + 1) if this expression is positive
4416 or 0 if it is negative (pick either one if it is zero). Optionally
4417 (if or_expr is present) OR the (expression != 0) condition to it. */
4418
4419 tree
4420 gfc_conv_array_extent_dim (tree lbound, tree ubound, tree* or_expr)
4421 {
4422 tree res;
4423 tree cond;
4424
4425 /* Calculate (ubound - lbound + 1). */
4426 res = fold_build2_loc (input_location, MINUS_EXPR, gfc_array_index_type,
4427 ubound, lbound);
4428 res = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type, res,
4429 gfc_index_one_node);
4430
4431 /* Check whether the size for this dimension is negative. */
4432 cond = fold_build2_loc (input_location, LE_EXPR, boolean_type_node, res,
4433 gfc_index_zero_node);
4434 res = fold_build3_loc (input_location, COND_EXPR, gfc_array_index_type, cond,
4435 gfc_index_zero_node, res);
4436
4437 /* Build OR expression. */
4438 if (or_expr)
4439 *or_expr = fold_build2_loc (input_location, TRUTH_OR_EXPR,
4440 boolean_type_node, *or_expr, cond);
4441
4442 return res;
4443 }
4444
4445
4446 /* For an array descriptor, get the total number of elements. This is just
4447 the product of the extents along from_dim to to_dim. */
4448
4449 static tree
4450 gfc_conv_descriptor_size_1 (tree desc, int from_dim, int to_dim)
4451 {
4452 tree res;
4453 int dim;
4454
4455 res = gfc_index_one_node;
4456
4457 for (dim = from_dim; dim < to_dim; ++dim)
4458 {
4459 tree lbound;
4460 tree ubound;
4461 tree extent;
4462
4463 lbound = gfc_conv_descriptor_lbound_get (desc, gfc_rank_cst[dim]);
4464 ubound = gfc_conv_descriptor_ubound_get (desc, gfc_rank_cst[dim]);
4465
4466 extent = gfc_conv_array_extent_dim (lbound, ubound, NULL);
4467 res = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
4468 res, extent);
4469 }
4470
4471 return res;
4472 }
4473
4474
4475 /* Full size of an array. */
4476
4477 tree
4478 gfc_conv_descriptor_size (tree desc, int rank)
4479 {
4480 return gfc_conv_descriptor_size_1 (desc, 0, rank);
4481 }
4482
4483
4484 /* Size of a coarray for all dimensions but the last. */
4485
4486 tree
4487 gfc_conv_descriptor_cosize (tree desc, int rank, int corank)
4488 {
4489 return gfc_conv_descriptor_size_1 (desc, rank, rank + corank - 1);
4490 }
4491
4492
4493 /* Fills in an array descriptor, and returns the size of the array.
4494 The size will be a simple_val, ie a variable or a constant. Also
4495 calculates the offset of the base. The pointer argument overflow,
4496 which should be of integer type, will increase in value if overflow
4497 occurs during the size calculation. Returns the size of the array.
4498 {
4499 stride = 1;
4500 offset = 0;
4501 for (n = 0; n < rank; n++)
4502 {
4503 a.lbound[n] = specified_lower_bound;
4504 offset = offset + a.lbond[n] * stride;
4505 size = 1 - lbound;
4506 a.ubound[n] = specified_upper_bound;
4507 a.stride[n] = stride;
4508 size = size >= 0 ? ubound + size : 0; //size = ubound + 1 - lbound
4509 overflow += size == 0 ? 0: (MAX/size < stride ? 1: 0);
4510 stride = stride * size;
4511 }
4512 for (n = rank; n < rank+corank; n++)
4513 (Set lcobound/ucobound as above.)
4514 element_size = sizeof (array element);
4515 if (!rank)
4516 return element_size
4517 stride = (size_t) stride;
4518 overflow += element_size == 0 ? 0: (MAX/element_size < stride ? 1: 0);
4519 stride = stride * element_size;
4520 return (stride);
4521 } */
4522 /*GCC ARRAYS*/
4523
4524 static tree
4525 gfc_array_init_size (tree descriptor, int rank, int corank, tree * poffset,
4526 gfc_expr ** lower, gfc_expr ** upper, stmtblock_t * pblock,
4527 stmtblock_t * descriptor_block, tree * overflow)
4528 {
4529 tree type;
4530 tree tmp;
4531 tree size;
4532 tree offset;
4533 tree stride;
4534 tree element_size;
4535 tree or_expr;
4536 tree thencase;
4537 tree elsecase;
4538 tree cond;
4539 tree var;
4540 stmtblock_t thenblock;
4541 stmtblock_t elseblock;
4542 gfc_expr *ubound;
4543 gfc_se se;
4544 int n;
4545
4546 type = TREE_TYPE (descriptor);
4547
4548 stride = gfc_index_one_node;
4549 offset = gfc_index_zero_node;
4550
4551 /* Set the dtype. */
4552 tmp = gfc_conv_descriptor_dtype (descriptor);
4553 gfc_add_modify (descriptor_block, tmp, gfc_get_dtype (TREE_TYPE (descriptor)));
4554
4555 or_expr = boolean_false_node;
4556
4557 for (n = 0; n < rank; n++)
4558 {
4559 tree conv_lbound;
4560 tree conv_ubound;
4561
4562 /* We have 3 possibilities for determining the size of the array:
4563 lower == NULL => lbound = 1, ubound = upper[n]
4564 upper[n] = NULL => lbound = 1, ubound = lower[n]
4565 upper[n] != NULL => lbound = lower[n], ubound = upper[n] */
4566 ubound = upper[n];
4567
4568 /* Set lower bound. */
4569 gfc_init_se (&se, NULL);
4570 if (lower == NULL)
4571 se.expr = gfc_index_one_node;
4572 else
4573 {
4574 gcc_assert (lower[n]);
4575 if (ubound)
4576 {
4577 gfc_conv_expr_type (&se, lower[n], gfc_array_index_type);
4578 gfc_add_block_to_block (pblock, &se.pre);
4579 }
4580 else
4581 {
4582 se.expr = gfc_index_one_node;
4583 ubound = lower[n];
4584 }
4585 }
4586 gfc_conv_descriptor_lbound_set (descriptor_block, descriptor,
4587 gfc_rank_cst[n], se.expr);
4588 conv_lbound = se.expr;
4589
4590 /* Work out the offset for this component. */
4591 tmp = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
4592 se.expr, stride);
4593 offset = fold_build2_loc (input_location, MINUS_EXPR,
4594 gfc_array_index_type, offset, tmp);
4595
4596 /* Set upper bound. */
4597 gfc_init_se (&se, NULL);
4598 gcc_assert (ubound);
4599 gfc_conv_expr_type (&se, ubound, gfc_array_index_type);
4600 gfc_add_block_to_block (pblock, &se.pre);
4601
4602 gfc_conv_descriptor_ubound_set (descriptor_block, descriptor,
4603 gfc_rank_cst[n], se.expr);
4604 conv_ubound = se.expr;
4605
4606 /* Store the stride. */
4607 gfc_conv_descriptor_stride_set (descriptor_block, descriptor,
4608 gfc_rank_cst[n], stride);
4609
4610 /* Calculate size and check whether extent is negative. */
4611 size = gfc_conv_array_extent_dim (conv_lbound, conv_ubound, &or_expr);
4612 size = gfc_evaluate_now (size, pblock);
4613
4614 /* Check whether multiplying the stride by the number of
4615 elements in this dimension would overflow. We must also check
4616 whether the current dimension has zero size in order to avoid
4617 division by zero.
4618 */
4619 tmp = fold_build2_loc (input_location, TRUNC_DIV_EXPR,
4620 gfc_array_index_type,
4621 fold_convert (gfc_array_index_type,
4622 TYPE_MAX_VALUE (gfc_array_index_type)),
4623 size);
4624 cond = gfc_unlikely (fold_build2_loc (input_location, LT_EXPR,
4625 boolean_type_node, tmp, stride));
4626 tmp = fold_build3_loc (input_location, COND_EXPR, integer_type_node, cond,
4627 integer_one_node, integer_zero_node);
4628 cond = gfc_unlikely (fold_build2_loc (input_location, EQ_EXPR,
4629 boolean_type_node, size,
4630 gfc_index_zero_node));
4631 tmp = fold_build3_loc (input_location, COND_EXPR, integer_type_node, cond,
4632 integer_zero_node, tmp);
4633 tmp = fold_build2_loc (input_location, PLUS_EXPR, integer_type_node,
4634 *overflow, tmp);
4635 *overflow = gfc_evaluate_now (tmp, pblock);
4636
4637 /* Multiply the stride by the number of elements in this dimension. */
4638 stride = fold_build2_loc (input_location, MULT_EXPR,
4639 gfc_array_index_type, stride, size);
4640 stride = gfc_evaluate_now (stride, pblock);
4641 }
4642
4643 for (n = rank; n < rank + corank; n++)
4644 {
4645 ubound = upper[n];
4646
4647 /* Set lower bound. */
4648 gfc_init_se (&se, NULL);
4649 if (lower == NULL || lower[n] == NULL)
4650 {
4651 gcc_assert (n == rank + corank - 1);
4652 se.expr = gfc_index_one_node;
4653 }
4654 else
4655 {
4656 if (ubound || n == rank + corank - 1)
4657 {
4658 gfc_conv_expr_type (&se, lower[n], gfc_array_index_type);
4659 gfc_add_block_to_block (pblock, &se.pre);
4660 }
4661 else
4662 {
4663 se.expr = gfc_index_one_node;
4664 ubound = lower[n];
4665 }
4666 }
4667 gfc_conv_descriptor_lbound_set (descriptor_block, descriptor,
4668 gfc_rank_cst[n], se.expr);
4669
4670 if (n < rank + corank - 1)
4671 {
4672 gfc_init_se (&se, NULL);
4673 gcc_assert (ubound);
4674 gfc_conv_expr_type (&se, ubound, gfc_array_index_type);
4675 gfc_add_block_to_block (pblock, &se.pre);
4676 gfc_conv_descriptor_ubound_set (descriptor_block, descriptor,
4677 gfc_rank_cst[n], se.expr);
4678 }
4679 }
4680
4681 /* The stride is the number of elements in the array, so multiply by the
4682 size of an element to get the total size. */
4683 tmp = TYPE_SIZE_UNIT (gfc_get_element_type (type));
4684 /* Convert to size_t. */
4685 element_size = fold_convert (size_type_node, tmp);
4686
4687 if (rank == 0)
4688 return element_size;
4689
4690 stride = fold_convert (size_type_node, stride);
4691
4692 /* First check for overflow. Since an array of type character can
4693 have zero element_size, we must check for that before
4694 dividing. */
4695 tmp = fold_build2_loc (input_location, TRUNC_DIV_EXPR,
4696 size_type_node,
4697 TYPE_MAX_VALUE (size_type_node), element_size);
4698 cond = gfc_unlikely (fold_build2_loc (input_location, LT_EXPR,
4699 boolean_type_node, tmp, stride));
4700 tmp = fold_build3_loc (input_location, COND_EXPR, integer_type_node, cond,
4701 integer_one_node, integer_zero_node);
4702 cond = gfc_unlikely (fold_build2_loc (input_location, EQ_EXPR,
4703 boolean_type_node, element_size,
4704 build_int_cst (size_type_node, 0)));
4705 tmp = fold_build3_loc (input_location, COND_EXPR, integer_type_node, cond,
4706 integer_zero_node, tmp);
4707 tmp = fold_build2_loc (input_location, PLUS_EXPR, integer_type_node,
4708 *overflow, tmp);
4709 *overflow = gfc_evaluate_now (tmp, pblock);
4710
4711 size = fold_build2_loc (input_location, MULT_EXPR, size_type_node,
4712 stride, element_size);
4713
4714 if (poffset != NULL)
4715 {
4716 offset = gfc_evaluate_now (offset, pblock);
4717 *poffset = offset;
4718 }
4719
4720 if (integer_zerop (or_expr))
4721 return size;
4722 if (integer_onep (or_expr))
4723 return build_int_cst (size_type_node, 0);
4724
4725 var = gfc_create_var (TREE_TYPE (size), "size");
4726 gfc_start_block (&thenblock);
4727 gfc_add_modify (&thenblock, var, build_int_cst (size_type_node, 0));
4728 thencase = gfc_finish_block (&thenblock);
4729
4730 gfc_start_block (&elseblock);
4731 gfc_add_modify (&elseblock, var, size);
4732 elsecase = gfc_finish_block (&elseblock);
4733
4734 tmp = gfc_evaluate_now (or_expr, pblock);
4735 tmp = build3_v (COND_EXPR, tmp, thencase, elsecase);
4736 gfc_add_expr_to_block (pblock, tmp);
4737
4738 return var;
4739 }
4740
4741
4742 /* Initializes the descriptor and generates a call to _gfor_allocate. Does
4743 the work for an ALLOCATE statement. */
4744 /*GCC ARRAYS*/
4745
4746 bool
4747 gfc_array_allocate (gfc_se * se, gfc_expr * expr, tree status, tree errmsg,
4748 tree errlen)
4749 {
4750 tree tmp;
4751 tree pointer;
4752 tree offset = NULL_TREE;
4753 tree token = NULL_TREE;
4754 tree size;
4755 tree msg;
4756 tree error = NULL_TREE;
4757 tree overflow; /* Boolean storing whether size calculation overflows. */
4758 tree var_overflow = NULL_TREE;
4759 tree cond;
4760 tree set_descriptor;
4761 stmtblock_t set_descriptor_block;
4762 stmtblock_t elseblock;
4763 gfc_expr **lower;
4764 gfc_expr **upper;
4765 gfc_ref *ref, *prev_ref = NULL;
4766 bool allocatable, coarray, dimension;
4767
4768 ref = expr->ref;
4769
4770 /* Find the last reference in the chain. */
4771 while (ref && ref->next != NULL)
4772 {
4773 gcc_assert (ref->type != REF_ARRAY || ref->u.ar.type == AR_ELEMENT
4774 || (ref->u.ar.dimen == 0 && ref->u.ar.codimen > 0));
4775 prev_ref = ref;
4776 ref = ref->next;
4777 }
4778
4779 if (ref == NULL || ref->type != REF_ARRAY)
4780 return false;
4781
4782 if (!prev_ref)
4783 {
4784 allocatable = expr->symtree->n.sym->attr.allocatable;
4785 coarray = expr->symtree->n.sym->attr.codimension;
4786 dimension = expr->symtree->n.sym->attr.dimension;
4787 }
4788 else
4789 {
4790 allocatable = prev_ref->u.c.component->attr.allocatable;
4791 coarray = prev_ref->u.c.component->attr.codimension;
4792 dimension = prev_ref->u.c.component->attr.dimension;
4793 }
4794
4795 if (!dimension)
4796 gcc_assert (coarray);
4797
4798 /* Figure out the size of the array. */
4799 switch (ref->u.ar.type)
4800 {
4801 case AR_ELEMENT:
4802 if (!coarray)
4803 {
4804 lower = NULL;
4805 upper = ref->u.ar.start;
4806 break;
4807 }
4808 /* Fall through. */
4809
4810 case AR_SECTION:
4811 lower = ref->u.ar.start;
4812 upper = ref->u.ar.end;
4813 break;
4814
4815 case AR_FULL:
4816 gcc_assert (ref->u.ar.as->type == AS_EXPLICIT);
4817
4818 lower = ref->u.ar.as->lower;
4819 upper = ref->u.ar.as->upper;
4820 break;
4821
4822 default:
4823 gcc_unreachable ();
4824 break;
4825 }
4826
4827 overflow = integer_zero_node;
4828
4829 gfc_init_block (&set_descriptor_block);
4830 size = gfc_array_init_size (se->expr, ref->u.ar.as->rank,
4831 ref->u.ar.as->corank, &offset, lower, upper,
4832 &se->pre, &set_descriptor_block, &overflow);
4833
4834 if (dimension)
4835 {
4836
4837 var_overflow = gfc_create_var (integer_type_node, "overflow");
4838 gfc_add_modify (&se->pre, var_overflow, overflow);
4839
4840 /* Generate the block of code handling overflow. */
4841 msg = gfc_build_addr_expr (pchar_type_node,
4842 gfc_build_localized_cstring_const
4843 ("Integer overflow when calculating the amount of "
4844 "memory to allocate"));
4845 error = build_call_expr_loc (input_location, gfor_fndecl_runtime_error,
4846 1, msg);
4847 }
4848
4849 if (status != NULL_TREE)
4850 {
4851 tree status_type = TREE_TYPE (status);
4852 stmtblock_t set_status_block;
4853
4854 gfc_start_block (&set_status_block);
4855 gfc_add_modify (&set_status_block, status,
4856 build_int_cst (status_type, LIBERROR_ALLOCATION));
4857 error = gfc_finish_block (&set_status_block);
4858 }
4859
4860 gfc_start_block (&elseblock);
4861
4862 /* Allocate memory to store the data. */
4863 pointer = gfc_conv_descriptor_data_get (se->expr);
4864 STRIP_NOPS (pointer);
4865
4866 if (coarray && gfc_option.coarray == GFC_FCOARRAY_LIB)
4867 token = gfc_build_addr_expr (NULL_TREE,
4868 gfc_conv_descriptor_token (se->expr));
4869
4870 /* The allocatable variant takes the old pointer as first argument. */
4871 if (allocatable)
4872 gfc_allocate_allocatable (&elseblock, pointer, size, token,
4873 status, errmsg, errlen, expr);
4874 else
4875 gfc_allocate_using_malloc (&elseblock, pointer, size, status);
4876
4877 if (dimension)
4878 {
4879 cond = gfc_unlikely (fold_build2_loc (input_location, NE_EXPR,
4880 boolean_type_node, var_overflow, integer_zero_node));
4881 tmp = fold_build3_loc (input_location, COND_EXPR, void_type_node, cond,
4882 error, gfc_finish_block (&elseblock));
4883 }
4884 else
4885 tmp = gfc_finish_block (&elseblock);
4886
4887 gfc_add_expr_to_block (&se->pre, tmp);
4888
4889 /* Update the array descriptors. */
4890 if (dimension)
4891 gfc_conv_descriptor_offset_set (&set_descriptor_block, se->expr, offset);
4892
4893 set_descriptor = gfc_finish_block (&set_descriptor_block);
4894 if (status != NULL_TREE)
4895 {
4896 cond = fold_build2_loc (input_location, EQ_EXPR,
4897 boolean_type_node, status,
4898 build_int_cst (TREE_TYPE (status), 0));
4899 gfc_add_expr_to_block (&se->pre,
4900 fold_build3_loc (input_location, COND_EXPR, void_type_node,
4901 gfc_likely (cond), set_descriptor,
4902 build_empty_stmt (input_location)));
4903 }
4904 else
4905 gfc_add_expr_to_block (&se->pre, set_descriptor);
4906
4907 if ((expr->ts.type == BT_DERIVED || expr->ts.type == BT_CLASS)
4908 && expr->ts.u.derived->attr.alloc_comp)
4909 {
4910 tmp = gfc_nullify_alloc_comp (expr->ts.u.derived, se->expr,
4911 ref->u.ar.as->rank);
4912 gfc_add_expr_to_block (&se->pre, tmp);
4913 }
4914
4915 return true;
4916 }
4917
4918
4919 /* Deallocate an array variable. Also used when an allocated variable goes
4920 out of scope. */
4921 /*GCC ARRAYS*/
4922
4923 tree
4924 gfc_array_deallocate (tree descriptor, tree pstat, gfc_expr* expr)
4925 {
4926 tree var;
4927 tree tmp;
4928 stmtblock_t block;
4929
4930 gfc_start_block (&block);
4931 /* Get a pointer to the data. */
4932 var = gfc_conv_descriptor_data_get (descriptor);
4933 STRIP_NOPS (var);
4934
4935 /* Parameter is the address of the data component. */
4936 tmp = gfc_deallocate_with_status (var, pstat, false, expr);
4937 gfc_add_expr_to_block (&block, tmp);
4938
4939 /* Zero the data pointer. */
4940 tmp = fold_build2_loc (input_location, MODIFY_EXPR, void_type_node,
4941 var, build_int_cst (TREE_TYPE (var), 0));
4942 gfc_add_expr_to_block (&block, tmp);
4943
4944 return gfc_finish_block (&block);
4945 }
4946
4947
4948 /* Create an array constructor from an initialization expression.
4949 We assume the frontend already did any expansions and conversions. */
4950
4951 tree
4952 gfc_conv_array_initializer (tree type, gfc_expr * expr)
4953 {
4954 gfc_constructor *c;
4955 tree tmp;
4956 gfc_se se;
4957 HOST_WIDE_INT hi;
4958 unsigned HOST_WIDE_INT lo;
4959 tree index, range;
4960 VEC(constructor_elt,gc) *v = NULL;
4961
4962 switch (expr->expr_type)
4963 {
4964 case EXPR_CONSTANT:
4965 case EXPR_STRUCTURE:
4966 /* A single scalar or derived type value. Create an array with all
4967 elements equal to that value. */
4968 gfc_init_se (&se, NULL);
4969
4970 if (expr->expr_type == EXPR_CONSTANT)
4971 gfc_conv_constant (&se, expr);
4972 else
4973 gfc_conv_structure (&se, expr, 1);
4974
4975 tmp = TYPE_MAX_VALUE (TYPE_DOMAIN (type));
4976 gcc_assert (tmp && INTEGER_CST_P (tmp));
4977 hi = TREE_INT_CST_HIGH (tmp);
4978 lo = TREE_INT_CST_LOW (tmp);
4979 lo++;
4980 if (lo == 0)
4981 hi++;
4982 /* This will probably eat buckets of memory for large arrays. */
4983 while (hi != 0 || lo != 0)
4984 {
4985 CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, se.expr);
4986 if (lo == 0)
4987 hi--;
4988 lo--;
4989 }
4990 break;
4991
4992 case EXPR_ARRAY:
4993 /* Create a vector of all the elements. */
4994 for (c = gfc_constructor_first (expr->value.constructor);
4995 c; c = gfc_constructor_next (c))
4996 {
4997 if (c->iterator)
4998 {
4999 /* Problems occur when we get something like
5000 integer :: a(lots) = (/(i, i=1, lots)/) */
5001 gfc_fatal_error ("The number of elements in the array constructor "
5002 "at %L requires an increase of the allowed %d "
5003 "upper limit. See -fmax-array-constructor "
5004 "option", &expr->where,
5005 gfc_option.flag_max_array_constructor);
5006 return NULL_TREE;
5007 }
5008 if (mpz_cmp_si (c->offset, 0) != 0)
5009 index = gfc_conv_mpz_to_tree (c->offset, gfc_index_integer_kind);
5010 else
5011 index = NULL_TREE;
5012
5013 if (mpz_cmp_si (c->repeat, 1) > 0)
5014 {
5015 tree tmp1, tmp2;
5016 mpz_t maxval;
5017
5018 mpz_init (maxval);
5019 mpz_add (maxval, c->offset, c->repeat);
5020 mpz_sub_ui (maxval, maxval, 1);
5021 tmp2 = gfc_conv_mpz_to_tree (maxval, gfc_index_integer_kind);
5022 if (mpz_cmp_si (c->offset, 0) != 0)
5023 {
5024 mpz_add_ui (maxval, c->offset, 1);
5025 tmp1 = gfc_conv_mpz_to_tree (maxval, gfc_index_integer_kind);
5026 }
5027 else
5028 tmp1 = gfc_conv_mpz_to_tree (c->offset, gfc_index_integer_kind);
5029
5030 range = fold_build2 (RANGE_EXPR, gfc_array_index_type, tmp1, tmp2);
5031 mpz_clear (maxval);
5032 }
5033 else
5034 range = NULL;
5035
5036 gfc_init_se (&se, NULL);
5037 switch (c->expr->expr_type)
5038 {
5039 case EXPR_CONSTANT:
5040 gfc_conv_constant (&se, c->expr);
5041 break;
5042
5043 case EXPR_STRUCTURE:
5044 gfc_conv_structure (&se, c->expr, 1);
5045 break;
5046
5047 default:
5048 /* Catch those occasional beasts that do not simplify
5049 for one reason or another, assuming that if they are
5050 standard defying the frontend will catch them. */
5051 gfc_conv_expr (&se, c->expr);
5052 break;
5053 }
5054
5055 if (range == NULL_TREE)
5056 CONSTRUCTOR_APPEND_ELT (v, index, se.expr);
5057 else
5058 {
5059 if (index != NULL_TREE)
5060 CONSTRUCTOR_APPEND_ELT (v, index, se.expr);
5061 CONSTRUCTOR_APPEND_ELT (v, range, se.expr);
5062 }
5063 }
5064 break;
5065
5066 case EXPR_NULL:
5067 return gfc_build_null_descriptor (type);
5068
5069 default:
5070 gcc_unreachable ();
5071 }
5072
5073 /* Create a constructor from the list of elements. */
5074 tmp = build_constructor (type, v);
5075 TREE_CONSTANT (tmp) = 1;
5076 return tmp;
5077 }
5078
5079
5080 /* Generate code to evaluate non-constant coarray cobounds. */
5081
5082 void
5083 gfc_trans_array_cobounds (tree type, stmtblock_t * pblock,
5084 const gfc_symbol *sym)
5085 {
5086 int dim;
5087 tree ubound;
5088 tree lbound;
5089 gfc_se se;
5090 gfc_array_spec *as;
5091
5092 as = sym->as;
5093
5094 for (dim = as->rank; dim < as->rank + as->corank; dim++)
5095 {
5096 /* Evaluate non-constant array bound expressions. */
5097 lbound = GFC_TYPE_ARRAY_LBOUND (type, dim);
5098 if (as->lower[dim] && !INTEGER_CST_P (lbound))
5099 {
5100 gfc_init_se (&se, NULL);
5101 gfc_conv_expr_type (&se, as->lower[dim], gfc_array_index_type);
5102 gfc_add_block_to_block (pblock, &se.pre);
5103 gfc_add_modify (pblock, lbound, se.expr);
5104 }
5105 ubound = GFC_TYPE_ARRAY_UBOUND (type, dim);
5106 if (as->upper[dim] && !INTEGER_CST_P (ubound))
5107 {
5108 gfc_init_se (&se, NULL);
5109 gfc_conv_expr_type (&se, as->upper[dim], gfc_array_index_type);
5110 gfc_add_block_to_block (pblock, &se.pre);
5111 gfc_add_modify (pblock, ubound, se.expr);
5112 }
5113 }
5114 }
5115
5116
5117 /* Generate code to evaluate non-constant array bounds. Sets *poffset and
5118 returns the size (in elements) of the array. */
5119
5120 static tree
5121 gfc_trans_array_bounds (tree type, gfc_symbol * sym, tree * poffset,
5122 stmtblock_t * pblock)
5123 {
5124 gfc_array_spec *as;
5125 tree size;
5126 tree stride;
5127 tree offset;
5128 tree ubound;
5129 tree lbound;
5130 tree tmp;
5131 gfc_se se;
5132
5133 int dim;
5134
5135 as = sym->as;
5136
5137 size = gfc_index_one_node;
5138 offset = gfc_index_zero_node;
5139 for (dim = 0; dim < as->rank; dim++)
5140 {
5141 /* Evaluate non-constant array bound expressions. */
5142 lbound = GFC_TYPE_ARRAY_LBOUND (type, dim);
5143 if (as->lower[dim] && !INTEGER_CST_P (lbound))
5144 {
5145 gfc_init_se (&se, NULL);
5146 gfc_conv_expr_type (&se, as->lower[dim], gfc_array_index_type);
5147 gfc_add_block_to_block (pblock, &se.pre);
5148 gfc_add_modify (pblock, lbound, se.expr);
5149 }
5150 ubound = GFC_TYPE_ARRAY_UBOUND (type, dim);
5151 if (as->upper[dim] && !INTEGER_CST_P (ubound))
5152 {
5153 gfc_init_se (&se, NULL);
5154 gfc_conv_expr_type (&se, as->upper[dim], gfc_array_index_type);
5155 gfc_add_block_to_block (pblock, &se.pre);
5156 gfc_add_modify (pblock, ubound, se.expr);
5157 }
5158 /* The offset of this dimension. offset = offset - lbound * stride. */
5159 tmp = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
5160 lbound, size);
5161 offset = fold_build2_loc (input_location, MINUS_EXPR, gfc_array_index_type,
5162 offset, tmp);
5163
5164 /* The size of this dimension, and the stride of the next. */
5165 if (dim + 1 < as->rank)
5166 stride = GFC_TYPE_ARRAY_STRIDE (type, dim + 1);
5167 else
5168 stride = GFC_TYPE_ARRAY_SIZE (type);
5169
5170 if (ubound != NULL_TREE && !(stride && INTEGER_CST_P (stride)))
5171 {
5172 /* Calculate stride = size * (ubound + 1 - lbound). */
5173 tmp = fold_build2_loc (input_location, MINUS_EXPR,
5174 gfc_array_index_type,
5175 gfc_index_one_node, lbound);
5176 tmp = fold_build2_loc (input_location, PLUS_EXPR,
5177 gfc_array_index_type, ubound, tmp);
5178 tmp = fold_build2_loc (input_location, MULT_EXPR,
5179 gfc_array_index_type, size, tmp);
5180 if (stride)
5181 gfc_add_modify (pblock, stride, tmp);
5182 else
5183 stride = gfc_evaluate_now (tmp, pblock);
5184
5185 /* Make sure that negative size arrays are translated
5186 to being zero size. */
5187 tmp = fold_build2_loc (input_location, GE_EXPR, boolean_type_node,
5188 stride, gfc_index_zero_node);
5189 tmp = fold_build3_loc (input_location, COND_EXPR,
5190 gfc_array_index_type, tmp,
5191 stride, gfc_index_zero_node);
5192 gfc_add_modify (pblock, stride, tmp);
5193 }
5194
5195 size = stride;
5196 }
5197
5198 gfc_trans_array_cobounds (type, pblock, sym);
5199 gfc_trans_vla_type_sizes (sym, pblock);
5200
5201 *poffset = offset;
5202 return size;
5203 }
5204
5205
5206 /* Generate code to initialize/allocate an array variable. */
5207
5208 void
5209 gfc_trans_auto_array_allocation (tree decl, gfc_symbol * sym,
5210 gfc_wrapped_block * block)
5211 {
5212 stmtblock_t init;
5213 tree type;
5214 tree tmp = NULL_TREE;
5215 tree size;
5216 tree offset;
5217 tree space;
5218 tree inittree;
5219 bool onstack;
5220
5221 gcc_assert (!(sym->attr.pointer || sym->attr.allocatable));
5222
5223 /* Do nothing for USEd variables. */
5224 if (sym->attr.use_assoc)
5225 return;
5226
5227 type = TREE_TYPE (decl);
5228 gcc_assert (GFC_ARRAY_TYPE_P (type));
5229 onstack = TREE_CODE (type) != POINTER_TYPE;
5230
5231 gfc_init_block (&init);
5232
5233 /* Evaluate character string length. */
5234 if (sym->ts.type == BT_CHARACTER
5235 && onstack && !INTEGER_CST_P (sym->ts.u.cl->backend_decl))
5236 {
5237 gfc_conv_string_length (sym->ts.u.cl, NULL, &init);
5238
5239 gfc_trans_vla_type_sizes (sym, &init);
5240
5241 /* Emit a DECL_EXPR for this variable, which will cause the
5242 gimplifier to allocate storage, and all that good stuff. */
5243 tmp = fold_build1_loc (input_location, DECL_EXPR, TREE_TYPE (decl), decl);
5244 gfc_add_expr_to_block (&init, tmp);
5245 }
5246
5247 if (onstack)
5248 {
5249 gfc_add_init_cleanup (block, gfc_finish_block (&init), NULL_TREE);
5250 return;
5251 }
5252
5253 type = TREE_TYPE (type);
5254
5255 gcc_assert (!sym->attr.use_assoc);
5256 gcc_assert (!TREE_STATIC (decl));
5257 gcc_assert (!sym->module);
5258
5259 if (sym->ts.type == BT_CHARACTER
5260 && !INTEGER_CST_P (sym->ts.u.cl->backend_decl))
5261 gfc_conv_string_length (sym->ts.u.cl, NULL, &init);
5262
5263 size = gfc_trans_array_bounds (type, sym, &offset, &init);
5264
5265 /* Don't actually allocate space for Cray Pointees. */
5266 if (sym->attr.cray_pointee)
5267 {
5268 if (TREE_CODE (GFC_TYPE_ARRAY_OFFSET (type)) == VAR_DECL)
5269 gfc_add_modify (&init, GFC_TYPE_ARRAY_OFFSET (type), offset);
5270
5271 gfc_add_init_cleanup (block, gfc_finish_block (&init), NULL_TREE);
5272 return;
5273 }
5274
5275 if (gfc_option.flag_stack_arrays)
5276 {
5277 gcc_assert (TREE_CODE (TREE_TYPE (decl)) == POINTER_TYPE);
5278 space = build_decl (sym->declared_at.lb->location,
5279 VAR_DECL, create_tmp_var_name ("A"),
5280 TREE_TYPE (TREE_TYPE (decl)));
5281 gfc_trans_vla_type_sizes (sym, &init);
5282 }
5283 else
5284 {
5285 /* The size is the number of elements in the array, so multiply by the
5286 size of an element to get the total size. */
5287 tmp = TYPE_SIZE_UNIT (gfc_get_element_type (type));
5288 size = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
5289 size, fold_convert (gfc_array_index_type, tmp));
5290
5291 /* Allocate memory to hold the data. */
5292 tmp = gfc_call_malloc (&init, TREE_TYPE (decl), size);
5293 gfc_add_modify (&init, decl, tmp);
5294
5295 /* Free the temporary. */
5296 tmp = gfc_call_free (convert (pvoid_type_node, decl));
5297 space = NULL_TREE;
5298 }
5299
5300 /* Set offset of the array. */
5301 if (TREE_CODE (GFC_TYPE_ARRAY_OFFSET (type)) == VAR_DECL)
5302 gfc_add_modify (&init, GFC_TYPE_ARRAY_OFFSET (type), offset);
5303
5304 /* Automatic arrays should not have initializers. */
5305 gcc_assert (!sym->value);
5306
5307 inittree = gfc_finish_block (&init);
5308
5309 if (space)
5310 {
5311 tree addr;
5312 pushdecl (space);
5313
5314 /* Don't create new scope, emit the DECL_EXPR in exactly the scope
5315 where also space is located. */
5316 gfc_init_block (&init);
5317 tmp = fold_build1_loc (input_location, DECL_EXPR,
5318 TREE_TYPE (space), space);
5319 gfc_add_expr_to_block (&init, tmp);
5320 addr = fold_build1_loc (sym->declared_at.lb->location,
5321 ADDR_EXPR, TREE_TYPE (decl), space);
5322 gfc_add_modify (&init, decl, addr);
5323 gfc_add_init_cleanup (block, gfc_finish_block (&init), NULL_TREE);
5324 tmp = NULL_TREE;
5325 }
5326 gfc_add_init_cleanup (block, inittree, tmp);
5327 }
5328
5329
5330 /* Generate entry and exit code for g77 calling convention arrays. */
5331
5332 void
5333 gfc_trans_g77_array (gfc_symbol * sym, gfc_wrapped_block * block)
5334 {
5335 tree parm;
5336 tree type;
5337 locus loc;
5338 tree offset;
5339 tree tmp;
5340 tree stmt;
5341 stmtblock_t init;
5342
5343 gfc_save_backend_locus (&loc);
5344 gfc_set_backend_locus (&sym->declared_at);
5345
5346 /* Descriptor type. */
5347 parm = sym->backend_decl;
5348 type = TREE_TYPE (parm);
5349 gcc_assert (GFC_ARRAY_TYPE_P (type));
5350
5351 gfc_start_block (&init);
5352
5353 if (sym->ts.type == BT_CHARACTER
5354 && TREE_CODE (sym->ts.u.cl->backend_decl) == VAR_DECL)
5355 gfc_conv_string_length (sym->ts.u.cl, NULL, &init);
5356
5357 /* Evaluate the bounds of the array. */
5358 gfc_trans_array_bounds (type, sym, &offset, &init);
5359
5360 /* Set the offset. */
5361 if (TREE_CODE (GFC_TYPE_ARRAY_OFFSET (type)) == VAR_DECL)
5362 gfc_add_modify (&init, GFC_TYPE_ARRAY_OFFSET (type), offset);
5363
5364 /* Set the pointer itself if we aren't using the parameter directly. */
5365 if (TREE_CODE (parm) != PARM_DECL)
5366 {
5367 tmp = convert (TREE_TYPE (parm), GFC_DECL_SAVED_DESCRIPTOR (parm));
5368 gfc_add_modify (&init, parm, tmp);
5369 }
5370 stmt = gfc_finish_block (&init);
5371
5372 gfc_restore_backend_locus (&loc);
5373
5374 /* Add the initialization code to the start of the function. */
5375
5376 if (sym->attr.optional || sym->attr.not_always_present)
5377 {
5378 tmp = gfc_conv_expr_present (sym);
5379 stmt = build3_v (COND_EXPR, tmp, stmt, build_empty_stmt (input_location));
5380 }
5381
5382 gfc_add_init_cleanup (block, stmt, NULL_TREE);
5383 }
5384
5385
5386 /* Modify the descriptor of an array parameter so that it has the
5387 correct lower bound. Also move the upper bound accordingly.
5388 If the array is not packed, it will be copied into a temporary.
5389 For each dimension we set the new lower and upper bounds. Then we copy the
5390 stride and calculate the offset for this dimension. We also work out
5391 what the stride of a packed array would be, and see it the two match.
5392 If the array need repacking, we set the stride to the values we just
5393 calculated, recalculate the offset and copy the array data.
5394 Code is also added to copy the data back at the end of the function.
5395 */
5396
5397 void
5398 gfc_trans_dummy_array_bias (gfc_symbol * sym, tree tmpdesc,
5399 gfc_wrapped_block * block)
5400 {
5401 tree size;
5402 tree type;
5403 tree offset;
5404 locus loc;
5405 stmtblock_t init;
5406 tree stmtInit, stmtCleanup;
5407 tree lbound;
5408 tree ubound;
5409 tree dubound;
5410 tree dlbound;
5411 tree dumdesc;
5412 tree tmp;
5413 tree stride, stride2;
5414 tree stmt_packed;
5415 tree stmt_unpacked;
5416 tree partial;
5417 gfc_se se;
5418 int n;
5419 int checkparm;
5420 int no_repack;
5421 bool optional_arg;
5422
5423 /* Do nothing for pointer and allocatable arrays. */
5424 if (sym->attr.pointer || sym->attr.allocatable)
5425 return;
5426
5427 if (sym->attr.dummy && gfc_is_nodesc_array (sym))
5428 {
5429 gfc_trans_g77_array (sym, block);
5430 return;
5431 }
5432
5433 gfc_save_backend_locus (&loc);
5434 gfc_set_backend_locus (&sym->declared_at);
5435
5436 /* Descriptor type. */
5437 type = TREE_TYPE (tmpdesc);
5438 gcc_assert (GFC_ARRAY_TYPE_P (type));
5439 dumdesc = GFC_DECL_SAVED_DESCRIPTOR (tmpdesc);
5440 dumdesc = build_fold_indirect_ref_loc (input_location, dumdesc);
5441 gfc_start_block (&init);
5442
5443 if (sym->ts.type == BT_CHARACTER
5444 && TREE_CODE (sym->ts.u.cl->backend_decl) == VAR_DECL)
5445 gfc_conv_string_length (sym->ts.u.cl, NULL, &init);
5446
5447 checkparm = (sym->as->type == AS_EXPLICIT
5448 && (gfc_option.rtcheck & GFC_RTCHECK_BOUNDS));
5449
5450 no_repack = !(GFC_DECL_PACKED_ARRAY (tmpdesc)
5451 || GFC_DECL_PARTIAL_PACKED_ARRAY (tmpdesc));
5452
5453 if (GFC_DECL_PARTIAL_PACKED_ARRAY (tmpdesc))
5454 {
5455 /* For non-constant shape arrays we only check if the first dimension
5456 is contiguous. Repacking higher dimensions wouldn't gain us
5457 anything as we still don't know the array stride. */
5458 partial = gfc_create_var (boolean_type_node, "partial");
5459 TREE_USED (partial) = 1;
5460 tmp = gfc_conv_descriptor_stride_get (dumdesc, gfc_rank_cst[0]);
5461 tmp = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node, tmp,
5462 gfc_index_one_node);
5463 gfc_add_modify (&init, partial, tmp);
5464 }
5465 else
5466 partial = NULL_TREE;
5467
5468 /* The naming of stmt_unpacked and stmt_packed may be counter-intuitive
5469 here, however I think it does the right thing. */
5470 if (no_repack)
5471 {
5472 /* Set the first stride. */
5473 stride = gfc_conv_descriptor_stride_get (dumdesc, gfc_rank_cst[0]);
5474 stride = gfc_evaluate_now (stride, &init);
5475
5476 tmp = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node,
5477 stride, gfc_index_zero_node);
5478 tmp = fold_build3_loc (input_location, COND_EXPR, gfc_array_index_type,
5479 tmp, gfc_index_one_node, stride);
5480 stride = GFC_TYPE_ARRAY_STRIDE (type, 0);
5481 gfc_add_modify (&init, stride, tmp);
5482
5483 /* Allow the user to disable array repacking. */
5484 stmt_unpacked = NULL_TREE;
5485 }
5486 else
5487 {
5488 gcc_assert (integer_onep (GFC_TYPE_ARRAY_STRIDE (type, 0)));
5489 /* A library call to repack the array if necessary. */
5490 tmp = GFC_DECL_SAVED_DESCRIPTOR (tmpdesc);
5491 stmt_unpacked = build_call_expr_loc (input_location,
5492 gfor_fndecl_in_pack, 1, tmp);
5493
5494 stride = gfc_index_one_node;
5495
5496 if (gfc_option.warn_array_temp)
5497 gfc_warning ("Creating array temporary at %L", &loc);
5498 }
5499
5500 /* This is for the case where the array data is used directly without
5501 calling the repack function. */
5502 if (no_repack || partial != NULL_TREE)
5503 stmt_packed = gfc_conv_descriptor_data_get (dumdesc);
5504 else
5505 stmt_packed = NULL_TREE;
5506
5507 /* Assign the data pointer. */
5508 if (stmt_packed != NULL_TREE && stmt_unpacked != NULL_TREE)
5509 {
5510 /* Don't repack unknown shape arrays when the first stride is 1. */
5511 tmp = fold_build3_loc (input_location, COND_EXPR, TREE_TYPE (stmt_packed),
5512 partial, stmt_packed, stmt_unpacked);
5513 }
5514 else
5515 tmp = stmt_packed != NULL_TREE ? stmt_packed : stmt_unpacked;
5516 gfc_add_modify (&init, tmpdesc, fold_convert (type, tmp));
5517
5518 offset = gfc_index_zero_node;
5519 size = gfc_index_one_node;
5520
5521 /* Evaluate the bounds of the array. */
5522 for (n = 0; n < sym->as->rank; n++)
5523 {
5524 if (checkparm || !sym->as->upper[n])
5525 {
5526 /* Get the bounds of the actual parameter. */
5527 dubound = gfc_conv_descriptor_ubound_get (dumdesc, gfc_rank_cst[n]);
5528 dlbound = gfc_conv_descriptor_lbound_get (dumdesc, gfc_rank_cst[n]);
5529 }
5530 else
5531 {
5532 dubound = NULL_TREE;
5533 dlbound = NULL_TREE;
5534 }
5535
5536 lbound = GFC_TYPE_ARRAY_LBOUND (type, n);
5537 if (!INTEGER_CST_P (lbound))
5538 {
5539 gfc_init_se (&se, NULL);
5540 gfc_conv_expr_type (&se, sym->as->lower[n],
5541 gfc_array_index_type);
5542 gfc_add_block_to_block (&init, &se.pre);
5543 gfc_add_modify (&init, lbound, se.expr);
5544 }
5545
5546 ubound = GFC_TYPE_ARRAY_UBOUND (type, n);
5547 /* Set the desired upper bound. */
5548 if (sym->as->upper[n])
5549 {
5550 /* We know what we want the upper bound to be. */
5551 if (!INTEGER_CST_P (ubound))
5552 {
5553 gfc_init_se (&se, NULL);
5554 gfc_conv_expr_type (&se, sym->as->upper[n],
5555 gfc_array_index_type);
5556 gfc_add_block_to_block (&init, &se.pre);
5557 gfc_add_modify (&init, ubound, se.expr);
5558 }
5559
5560 /* Check the sizes match. */
5561 if (checkparm)
5562 {
5563 /* Check (ubound(a) - lbound(a) == ubound(b) - lbound(b)). */
5564 char * msg;
5565 tree temp;
5566
5567 temp = fold_build2_loc (input_location, MINUS_EXPR,
5568 gfc_array_index_type, ubound, lbound);
5569 temp = fold_build2_loc (input_location, PLUS_EXPR,
5570 gfc_array_index_type,
5571 gfc_index_one_node, temp);
5572 stride2 = fold_build2_loc (input_location, MINUS_EXPR,
5573 gfc_array_index_type, dubound,
5574 dlbound);
5575 stride2 = fold_build2_loc (input_location, PLUS_EXPR,
5576 gfc_array_index_type,
5577 gfc_index_one_node, stride2);
5578 tmp = fold_build2_loc (input_location, NE_EXPR,
5579 gfc_array_index_type, temp, stride2);
5580 asprintf (&msg, "Dimension %d of array '%s' has extent "
5581 "%%ld instead of %%ld", n+1, sym->name);
5582
5583 gfc_trans_runtime_check (true, false, tmp, &init, &loc, msg,
5584 fold_convert (long_integer_type_node, temp),
5585 fold_convert (long_integer_type_node, stride2));
5586
5587 free (msg);
5588 }
5589 }
5590 else
5591 {
5592 /* For assumed shape arrays move the upper bound by the same amount
5593 as the lower bound. */
5594 tmp = fold_build2_loc (input_location, MINUS_EXPR,
5595 gfc_array_index_type, dubound, dlbound);
5596 tmp = fold_build2_loc (input_location, PLUS_EXPR,
5597 gfc_array_index_type, tmp, lbound);
5598 gfc_add_modify (&init, ubound, tmp);
5599 }
5600 /* The offset of this dimension. offset = offset - lbound * stride. */
5601 tmp = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
5602 lbound, stride);
5603 offset = fold_build2_loc (input_location, MINUS_EXPR,
5604 gfc_array_index_type, offset, tmp);
5605
5606 /* The size of this dimension, and the stride of the next. */
5607 if (n + 1 < sym->as->rank)
5608 {
5609 stride = GFC_TYPE_ARRAY_STRIDE (type, n + 1);
5610
5611 if (no_repack || partial != NULL_TREE)
5612 stmt_unpacked =
5613 gfc_conv_descriptor_stride_get (dumdesc, gfc_rank_cst[n+1]);
5614
5615 /* Figure out the stride if not a known constant. */
5616 if (!INTEGER_CST_P (stride))
5617 {
5618 if (no_repack)
5619 stmt_packed = NULL_TREE;
5620 else
5621 {
5622 /* Calculate stride = size * (ubound + 1 - lbound). */
5623 tmp = fold_build2_loc (input_location, MINUS_EXPR,
5624 gfc_array_index_type,
5625 gfc_index_one_node, lbound);
5626 tmp = fold_build2_loc (input_location, PLUS_EXPR,
5627 gfc_array_index_type, ubound, tmp);
5628 size = fold_build2_loc (input_location, MULT_EXPR,
5629 gfc_array_index_type, size, tmp);
5630 stmt_packed = size;
5631 }
5632
5633 /* Assign the stride. */
5634 if (stmt_packed != NULL_TREE && stmt_unpacked != NULL_TREE)
5635 tmp = fold_build3_loc (input_location, COND_EXPR,
5636 gfc_array_index_type, partial,
5637 stmt_unpacked, stmt_packed);
5638 else
5639 tmp = (stmt_packed != NULL_TREE) ? stmt_packed : stmt_unpacked;
5640 gfc_add_modify (&init, stride, tmp);
5641 }
5642 }
5643 else
5644 {
5645 stride = GFC_TYPE_ARRAY_SIZE (type);
5646
5647 if (stride && !INTEGER_CST_P (stride))
5648 {
5649 /* Calculate size = stride * (ubound + 1 - lbound). */
5650 tmp = fold_build2_loc (input_location, MINUS_EXPR,
5651 gfc_array_index_type,
5652 gfc_index_one_node, lbound);
5653 tmp = fold_build2_loc (input_location, PLUS_EXPR,
5654 gfc_array_index_type,
5655 ubound, tmp);
5656 tmp = fold_build2_loc (input_location, MULT_EXPR,
5657 gfc_array_index_type,
5658 GFC_TYPE_ARRAY_STRIDE (type, n), tmp);
5659 gfc_add_modify (&init, stride, tmp);
5660 }
5661 }
5662 }
5663
5664 gfc_trans_array_cobounds (type, &init, sym);
5665
5666 /* Set the offset. */
5667 if (TREE_CODE (GFC_TYPE_ARRAY_OFFSET (type)) == VAR_DECL)
5668 gfc_add_modify (&init, GFC_TYPE_ARRAY_OFFSET (type), offset);
5669
5670 gfc_trans_vla_type_sizes (sym, &init);
5671
5672 stmtInit = gfc_finish_block (&init);
5673
5674 /* Only do the entry/initialization code if the arg is present. */
5675 dumdesc = GFC_DECL_SAVED_DESCRIPTOR (tmpdesc);
5676 optional_arg = (sym->attr.optional
5677 || (sym->ns->proc_name->attr.entry_master
5678 && sym->attr.dummy));
5679 if (optional_arg)
5680 {
5681 tmp = gfc_conv_expr_present (sym);
5682 stmtInit = build3_v (COND_EXPR, tmp, stmtInit,
5683 build_empty_stmt (input_location));
5684 }
5685
5686 /* Cleanup code. */
5687 if (no_repack)
5688 stmtCleanup = NULL_TREE;
5689 else
5690 {
5691 stmtblock_t cleanup;
5692 gfc_start_block (&cleanup);
5693
5694 if (sym->attr.intent != INTENT_IN)
5695 {
5696 /* Copy the data back. */
5697 tmp = build_call_expr_loc (input_location,
5698 gfor_fndecl_in_unpack, 2, dumdesc, tmpdesc);
5699 gfc_add_expr_to_block (&cleanup, tmp);
5700 }
5701
5702 /* Free the temporary. */
5703 tmp = gfc_call_free (tmpdesc);
5704 gfc_add_expr_to_block (&cleanup, tmp);
5705
5706 stmtCleanup = gfc_finish_block (&cleanup);
5707
5708 /* Only do the cleanup if the array was repacked. */
5709 tmp = build_fold_indirect_ref_loc (input_location, dumdesc);
5710 tmp = gfc_conv_descriptor_data_get (tmp);
5711 tmp = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
5712 tmp, tmpdesc);
5713 stmtCleanup = build3_v (COND_EXPR, tmp, stmtCleanup,
5714 build_empty_stmt (input_location));
5715
5716 if (optional_arg)
5717 {
5718 tmp = gfc_conv_expr_present (sym);
5719 stmtCleanup = build3_v (COND_EXPR, tmp, stmtCleanup,
5720 build_empty_stmt (input_location));
5721 }
5722 }
5723
5724 /* We don't need to free any memory allocated by internal_pack as it will
5725 be freed at the end of the function by pop_context. */
5726 gfc_add_init_cleanup (block, stmtInit, stmtCleanup);
5727
5728 gfc_restore_backend_locus (&loc);
5729 }
5730
5731
5732 /* Calculate the overall offset, including subreferences. */
5733 static void
5734 gfc_get_dataptr_offset (stmtblock_t *block, tree parm, tree desc, tree offset,
5735 bool subref, gfc_expr *expr)
5736 {
5737 tree tmp;
5738 tree field;
5739 tree stride;
5740 tree index;
5741 gfc_ref *ref;
5742 gfc_se start;
5743 int n;
5744
5745 /* If offset is NULL and this is not a subreferenced array, there is
5746 nothing to do. */
5747 if (offset == NULL_TREE)
5748 {
5749 if (subref)
5750 offset = gfc_index_zero_node;
5751 else
5752 return;
5753 }
5754
5755 tmp = gfc_conv_array_data (desc);
5756 tmp = build_fold_indirect_ref_loc (input_location,
5757 tmp);
5758 tmp = gfc_build_array_ref (tmp, offset, NULL);
5759
5760 /* Offset the data pointer for pointer assignments from arrays with
5761 subreferences; e.g. my_integer => my_type(:)%integer_component. */
5762 if (subref)
5763 {
5764 /* Go past the array reference. */
5765 for (ref = expr->ref; ref; ref = ref->next)
5766 if (ref->type == REF_ARRAY &&
5767 ref->u.ar.type != AR_ELEMENT)
5768 {
5769 ref = ref->next;
5770 break;
5771 }
5772
5773 /* Calculate the offset for each subsequent subreference. */
5774 for (; ref; ref = ref->next)
5775 {
5776 switch (ref->type)
5777 {
5778 case REF_COMPONENT:
5779 field = ref->u.c.component->backend_decl;
5780 gcc_assert (field && TREE_CODE (field) == FIELD_DECL);
5781 tmp = fold_build3_loc (input_location, COMPONENT_REF,
5782 TREE_TYPE (field),
5783 tmp, field, NULL_TREE);
5784 break;
5785
5786 case REF_SUBSTRING:
5787 gcc_assert (TREE_CODE (TREE_TYPE (tmp)) == ARRAY_TYPE);
5788 gfc_init_se (&start, NULL);
5789 gfc_conv_expr_type (&start, ref->u.ss.start, gfc_charlen_type_node);
5790 gfc_add_block_to_block (block, &start.pre);
5791 tmp = gfc_build_array_ref (tmp, start.expr, NULL);
5792 break;
5793
5794 case REF_ARRAY:
5795 gcc_assert (TREE_CODE (TREE_TYPE (tmp)) == ARRAY_TYPE
5796 && ref->u.ar.type == AR_ELEMENT);
5797
5798 /* TODO - Add bounds checking. */
5799 stride = gfc_index_one_node;
5800 index = gfc_index_zero_node;
5801 for (n = 0; n < ref->u.ar.dimen; n++)
5802 {
5803 tree itmp;
5804 tree jtmp;
5805
5806 /* Update the index. */
5807 gfc_init_se (&start, NULL);
5808 gfc_conv_expr_type (&start, ref->u.ar.start[n], gfc_array_index_type);
5809 itmp = gfc_evaluate_now (start.expr, block);
5810 gfc_init_se (&start, NULL);
5811 gfc_conv_expr_type (&start, ref->u.ar.as->lower[n], gfc_array_index_type);
5812 jtmp = gfc_evaluate_now (start.expr, block);
5813 itmp = fold_build2_loc (input_location, MINUS_EXPR,
5814 gfc_array_index_type, itmp, jtmp);
5815 itmp = fold_build2_loc (input_location, MULT_EXPR,
5816 gfc_array_index_type, itmp, stride);
5817 index = fold_build2_loc (input_location, PLUS_EXPR,
5818 gfc_array_index_type, itmp, index);
5819 index = gfc_evaluate_now (index, block);
5820
5821 /* Update the stride. */
5822 gfc_init_se (&start, NULL);
5823 gfc_conv_expr_type (&start, ref->u.ar.as->upper[n], gfc_array_index_type);
5824 itmp = fold_build2_loc (input_location, MINUS_EXPR,
5825 gfc_array_index_type, start.expr,
5826 jtmp);
5827 itmp = fold_build2_loc (input_location, PLUS_EXPR,
5828 gfc_array_index_type,
5829 gfc_index_one_node, itmp);
5830 stride = fold_build2_loc (input_location, MULT_EXPR,
5831 gfc_array_index_type, stride, itmp);
5832 stride = gfc_evaluate_now (stride, block);
5833 }
5834
5835 /* Apply the index to obtain the array element. */
5836 tmp = gfc_build_array_ref (tmp, index, NULL);
5837 break;
5838
5839 default:
5840 gcc_unreachable ();
5841 break;
5842 }
5843 }
5844 }
5845
5846 /* Set the target data pointer. */
5847 offset = gfc_build_addr_expr (gfc_array_dataptr_type (desc), tmp);
5848 gfc_conv_descriptor_data_set (block, parm, offset);
5849 }
5850
5851
5852 /* gfc_conv_expr_descriptor needs the string length an expression
5853 so that the size of the temporary can be obtained. This is done
5854 by adding up the string lengths of all the elements in the
5855 expression. Function with non-constant expressions have their
5856 string lengths mapped onto the actual arguments using the
5857 interface mapping machinery in trans-expr.c. */
5858 static void
5859 get_array_charlen (gfc_expr *expr, gfc_se *se)
5860 {
5861 gfc_interface_mapping mapping;
5862 gfc_formal_arglist *formal;
5863 gfc_actual_arglist *arg;
5864 gfc_se tse;
5865
5866 if (expr->ts.u.cl->length
5867 && gfc_is_constant_expr (expr->ts.u.cl->length))
5868 {
5869 if (!expr->ts.u.cl->backend_decl)
5870 gfc_conv_string_length (expr->ts.u.cl, expr, &se->pre);
5871 return;
5872 }
5873
5874 switch (expr->expr_type)
5875 {
5876 case EXPR_OP:
5877 get_array_charlen (expr->value.op.op1, se);
5878
5879 /* For parentheses the expression ts.u.cl is identical. */
5880 if (expr->value.op.op == INTRINSIC_PARENTHESES)
5881 return;
5882
5883 expr->ts.u.cl->backend_decl =
5884 gfc_create_var (gfc_charlen_type_node, "sln");
5885
5886 if (expr->value.op.op2)
5887 {
5888 get_array_charlen (expr->value.op.op2, se);
5889
5890 gcc_assert (expr->value.op.op == INTRINSIC_CONCAT);
5891
5892 /* Add the string lengths and assign them to the expression
5893 string length backend declaration. */
5894 gfc_add_modify (&se->pre, expr->ts.u.cl->backend_decl,
5895 fold_build2_loc (input_location, PLUS_EXPR,
5896 gfc_charlen_type_node,
5897 expr->value.op.op1->ts.u.cl->backend_decl,
5898 expr->value.op.op2->ts.u.cl->backend_decl));
5899 }
5900 else
5901 gfc_add_modify (&se->pre, expr->ts.u.cl->backend_decl,
5902 expr->value.op.op1->ts.u.cl->backend_decl);
5903 break;
5904
5905 case EXPR_FUNCTION:
5906 if (expr->value.function.esym == NULL
5907 || expr->ts.u.cl->length->expr_type == EXPR_CONSTANT)
5908 {
5909 gfc_conv_string_length (expr->ts.u.cl, expr, &se->pre);
5910 break;
5911 }
5912
5913 /* Map expressions involving the dummy arguments onto the actual
5914 argument expressions. */
5915 gfc_init_interface_mapping (&mapping);
5916 formal = expr->symtree->n.sym->formal;
5917 arg = expr->value.function.actual;
5918
5919 /* Set se = NULL in the calls to the interface mapping, to suppress any
5920 backend stuff. */
5921 for (; arg != NULL; arg = arg->next, formal = formal ? formal->next : NULL)
5922 {
5923 if (!arg->expr)
5924 continue;
5925 if (formal->sym)
5926 gfc_add_interface_mapping (&mapping, formal->sym, NULL, arg->expr);
5927 }
5928
5929 gfc_init_se (&tse, NULL);
5930
5931 /* Build the expression for the character length and convert it. */
5932 gfc_apply_interface_mapping (&mapping, &tse, expr->ts.u.cl->length);
5933
5934 gfc_add_block_to_block (&se->pre, &tse.pre);
5935 gfc_add_block_to_block (&se->post, &tse.post);
5936 tse.expr = fold_convert (gfc_charlen_type_node, tse.expr);
5937 tse.expr = fold_build2_loc (input_location, MAX_EXPR,
5938 gfc_charlen_type_node, tse.expr,
5939 build_int_cst (gfc_charlen_type_node, 0));
5940 expr->ts.u.cl->backend_decl = tse.expr;
5941 gfc_free_interface_mapping (&mapping);
5942 break;
5943
5944 default:
5945 gfc_conv_string_length (expr->ts.u.cl, expr, &se->pre);
5946 break;
5947 }
5948 }
5949
5950
5951 /* Helper function to check dimensions. */
5952 static bool
5953 transposed_dims (gfc_ss *ss)
5954 {
5955 int n;
5956
5957 for (n = 0; n < ss->dimen; n++)
5958 if (ss->dim[n] != n)
5959 return true;
5960 return false;
5961 }
5962
5963 /* Convert an array for passing as an actual argument. Expressions and
5964 vector subscripts are evaluated and stored in a temporary, which is then
5965 passed. For whole arrays the descriptor is passed. For array sections
5966 a modified copy of the descriptor is passed, but using the original data.
5967
5968 This function is also used for array pointer assignments, and there
5969 are three cases:
5970
5971 - se->want_pointer && !se->direct_byref
5972 EXPR is an actual argument. On exit, se->expr contains a
5973 pointer to the array descriptor.
5974
5975 - !se->want_pointer && !se->direct_byref
5976 EXPR is an actual argument to an intrinsic function or the
5977 left-hand side of a pointer assignment. On exit, se->expr
5978 contains the descriptor for EXPR.
5979
5980 - !se->want_pointer && se->direct_byref
5981 EXPR is the right-hand side of a pointer assignment and
5982 se->expr is the descriptor for the previously-evaluated
5983 left-hand side. The function creates an assignment from
5984 EXPR to se->expr.
5985
5986
5987 The se->force_tmp flag disables the non-copying descriptor optimization
5988 that is used for transpose. It may be used in cases where there is an
5989 alias between the transpose argument and another argument in the same
5990 function call. */
5991
5992 void
5993 gfc_conv_expr_descriptor (gfc_se * se, gfc_expr * expr, gfc_ss * ss)
5994 {
5995 gfc_ss_type ss_type;
5996 gfc_ss_info *ss_info;
5997 gfc_loopinfo loop;
5998 gfc_array_info *info;
5999 int need_tmp;
6000 int n;
6001 tree tmp;
6002 tree desc;
6003 stmtblock_t block;
6004 tree start;
6005 tree offset;
6006 int full;
6007 bool subref_array_target = false;
6008 gfc_expr *arg, *ss_expr;
6009
6010 gcc_assert (ss != NULL);
6011 gcc_assert (ss != gfc_ss_terminator);
6012
6013 ss_info = ss->info;
6014 ss_type = ss_info->type;
6015 ss_expr = ss_info->expr;
6016
6017 /* Special case things we know we can pass easily. */
6018 switch (expr->expr_type)
6019 {
6020 case EXPR_VARIABLE:
6021 /* If we have a linear array section, we can pass it directly.
6022 Otherwise we need to copy it into a temporary. */
6023
6024 gcc_assert (ss_type == GFC_SS_SECTION);
6025 gcc_assert (ss_expr == expr);
6026 info = &ss_info->data.array;
6027
6028 /* Get the descriptor for the array. */
6029 gfc_conv_ss_descriptor (&se->pre, ss, 0);
6030 desc = info->descriptor;
6031
6032 subref_array_target = se->direct_byref && is_subref_array (expr);
6033 need_tmp = gfc_ref_needs_temporary_p (expr->ref)
6034 && !subref_array_target;
6035
6036 if (se->force_tmp)
6037 need_tmp = 1;
6038
6039 if (need_tmp)
6040 full = 0;
6041 else if (GFC_ARRAY_TYPE_P (TREE_TYPE (desc)))
6042 {
6043 /* Create a new descriptor if the array doesn't have one. */
6044 full = 0;
6045 }
6046 else if (info->ref->u.ar.type == AR_FULL)
6047 full = 1;
6048 else if (se->direct_byref)
6049 full = 0;
6050 else
6051 full = gfc_full_array_ref_p (info->ref, NULL);
6052
6053 if (full && !transposed_dims (ss))
6054 {
6055 if (se->direct_byref && !se->byref_noassign)
6056 {
6057 /* Copy the descriptor for pointer assignments. */
6058 gfc_add_modify (&se->pre, se->expr, desc);
6059
6060 /* Add any offsets from subreferences. */
6061 gfc_get_dataptr_offset (&se->pre, se->expr, desc, NULL_TREE,
6062 subref_array_target, expr);
6063 }
6064 else if (se->want_pointer)
6065 {
6066 /* We pass full arrays directly. This means that pointers and
6067 allocatable arrays should also work. */
6068 se->expr = gfc_build_addr_expr (NULL_TREE, desc);
6069 }
6070 else
6071 {
6072 se->expr = desc;
6073 }
6074
6075 if (expr->ts.type == BT_CHARACTER)
6076 se->string_length = gfc_get_expr_charlen (expr);
6077
6078 return;
6079 }
6080 break;
6081
6082 case EXPR_FUNCTION:
6083
6084 /* We don't need to copy data in some cases. */
6085 arg = gfc_get_noncopying_intrinsic_argument (expr);
6086 if (arg)
6087 {
6088 /* This is a call to transpose... */
6089 gcc_assert (expr->value.function.isym->id == GFC_ISYM_TRANSPOSE);
6090 /* ... which has already been handled by the scalarizer, so
6091 that we just need to get its argument's descriptor. */
6092 gfc_conv_expr_descriptor (se, expr->value.function.actual->expr, ss);
6093 return;
6094 }
6095
6096 /* A transformational function return value will be a temporary
6097 array descriptor. We still need to go through the scalarizer
6098 to create the descriptor. Elemental functions ar handled as
6099 arbitrary expressions, i.e. copy to a temporary. */
6100
6101 if (se->direct_byref)
6102 {
6103 gcc_assert (ss_type == GFC_SS_FUNCTION && ss_expr == expr);
6104
6105 /* For pointer assignments pass the descriptor directly. */
6106 if (se->ss == NULL)
6107 se->ss = ss;
6108 else
6109 gcc_assert (se->ss == ss);
6110 se->expr = gfc_build_addr_expr (NULL_TREE, se->expr);
6111 gfc_conv_expr (se, expr);
6112 return;
6113 }
6114
6115 if (ss_expr != expr || ss_type != GFC_SS_FUNCTION)
6116 {
6117 if (ss_expr != expr)
6118 /* Elemental function. */
6119 gcc_assert ((expr->value.function.esym != NULL
6120 && expr->value.function.esym->attr.elemental)
6121 || (expr->value.function.isym != NULL
6122 && expr->value.function.isym->elemental));
6123 else
6124 gcc_assert (ss_type == GFC_SS_INTRINSIC);
6125
6126 need_tmp = 1;
6127 if (expr->ts.type == BT_CHARACTER
6128 && expr->ts.u.cl->length->expr_type != EXPR_CONSTANT)
6129 get_array_charlen (expr, se);
6130
6131 info = NULL;
6132 }
6133 else
6134 {
6135 /* Transformational function. */
6136 info = &ss_info->data.array;
6137 need_tmp = 0;
6138 }
6139 break;
6140
6141 case EXPR_ARRAY:
6142 /* Constant array constructors don't need a temporary. */
6143 if (ss_type == GFC_SS_CONSTRUCTOR
6144 && expr->ts.type != BT_CHARACTER
6145 && gfc_constant_array_constructor_p (expr->value.constructor))
6146 {
6147 need_tmp = 0;
6148 info = &ss_info->data.array;
6149 }
6150 else
6151 {
6152 need_tmp = 1;
6153 info = NULL;
6154 }
6155 break;
6156
6157 default:
6158 /* Something complicated. Copy it into a temporary. */
6159 need_tmp = 1;
6160 info = NULL;
6161 break;
6162 }
6163
6164 /* If we are creating a temporary, we don't need to bother about aliases
6165 anymore. */
6166 if (need_tmp)
6167 se->force_tmp = 0;
6168
6169 gfc_init_loopinfo (&loop);
6170
6171 /* Associate the SS with the loop. */
6172 gfc_add_ss_to_loop (&loop, ss);
6173
6174 /* Tell the scalarizer not to bother creating loop variables, etc. */
6175 if (!need_tmp)
6176 loop.array_parameter = 1;
6177 else
6178 /* The right-hand side of a pointer assignment mustn't use a temporary. */
6179 gcc_assert (!se->direct_byref);
6180
6181 /* Setup the scalarizing loops and bounds. */
6182 gfc_conv_ss_startstride (&loop);
6183
6184 if (need_tmp)
6185 {
6186 if (expr->ts.type == BT_CHARACTER && !expr->ts.u.cl->backend_decl)
6187 get_array_charlen (expr, se);
6188
6189 /* Tell the scalarizer to make a temporary. */
6190 loop.temp_ss = gfc_get_temp_ss (gfc_typenode_for_spec (&expr->ts),
6191 ((expr->ts.type == BT_CHARACTER)
6192 ? expr->ts.u.cl->backend_decl
6193 : NULL),
6194 loop.dimen);
6195
6196 se->string_length = loop.temp_ss->info->string_length;
6197 gcc_assert (loop.temp_ss->dimen == loop.dimen);
6198 gfc_add_ss_to_loop (&loop, loop.temp_ss);
6199 }
6200
6201 gfc_conv_loop_setup (&loop, & expr->where);
6202
6203 if (need_tmp)
6204 {
6205 /* Copy into a temporary and pass that. We don't need to copy the data
6206 back because expressions and vector subscripts must be INTENT_IN. */
6207 /* TODO: Optimize passing function return values. */
6208 gfc_se lse;
6209 gfc_se rse;
6210
6211 /* Start the copying loops. */
6212 gfc_mark_ss_chain_used (loop.temp_ss, 1);
6213 gfc_mark_ss_chain_used (ss, 1);
6214 gfc_start_scalarized_body (&loop, &block);
6215
6216 /* Copy each data element. */
6217 gfc_init_se (&lse, NULL);
6218 gfc_copy_loopinfo_to_se (&lse, &loop);
6219 gfc_init_se (&rse, NULL);
6220 gfc_copy_loopinfo_to_se (&rse, &loop);
6221
6222 lse.ss = loop.temp_ss;
6223 rse.ss = ss;
6224
6225 gfc_conv_scalarized_array_ref (&lse, NULL);
6226 if (expr->ts.type == BT_CHARACTER)
6227 {
6228 gfc_conv_expr (&rse, expr);
6229 if (POINTER_TYPE_P (TREE_TYPE (rse.expr)))
6230 rse.expr = build_fold_indirect_ref_loc (input_location,
6231 rse.expr);
6232 }
6233 else
6234 gfc_conv_expr_val (&rse, expr);
6235
6236 gfc_add_block_to_block (&block, &rse.pre);
6237 gfc_add_block_to_block (&block, &lse.pre);
6238
6239 lse.string_length = rse.string_length;
6240 tmp = gfc_trans_scalar_assign (&lse, &rse, expr->ts, true,
6241 expr->expr_type == EXPR_VARIABLE
6242 || expr->expr_type == EXPR_ARRAY, true);
6243 gfc_add_expr_to_block (&block, tmp);
6244
6245 /* Finish the copying loops. */
6246 gfc_trans_scalarizing_loops (&loop, &block);
6247
6248 desc = loop.temp_ss->info->data.array.descriptor;
6249 }
6250 else if (expr->expr_type == EXPR_FUNCTION && !transposed_dims (ss))
6251 {
6252 desc = info->descriptor;
6253 se->string_length = ss_info->string_length;
6254 }
6255 else
6256 {
6257 /* We pass sections without copying to a temporary. Make a new
6258 descriptor and point it at the section we want. The loop variable
6259 limits will be the limits of the section.
6260 A function may decide to repack the array to speed up access, but
6261 we're not bothered about that here. */
6262 int dim, ndim, codim;
6263 tree parm;
6264 tree parmtype;
6265 tree stride;
6266 tree from;
6267 tree to;
6268 tree base;
6269
6270 ndim = info->ref ? info->ref->u.ar.dimen : ss->dimen;
6271
6272 if (se->want_coarray)
6273 {
6274 gfc_array_ref *ar = &info->ref->u.ar;
6275
6276 codim = gfc_get_corank (expr);
6277 for (n = 0; n < codim - 1; n++)
6278 {
6279 /* Make sure we are not lost somehow. */
6280 gcc_assert (ar->dimen_type[n + ndim] == DIMEN_THIS_IMAGE);
6281
6282 /* Make sure the call to gfc_conv_section_startstride won't
6283 generate unnecessary code to calculate stride. */
6284 gcc_assert (ar->stride[n + ndim] == NULL);
6285
6286 gfc_conv_section_startstride (&loop, ss, n + ndim);
6287 loop.from[n + loop.dimen] = info->start[n + ndim];
6288 loop.to[n + loop.dimen] = info->end[n + ndim];
6289 }
6290
6291 gcc_assert (n == codim - 1);
6292 evaluate_bound (&loop.pre, info->start, ar->start,
6293 info->descriptor, n + ndim, true);
6294 loop.from[n + loop.dimen] = info->start[n + ndim];
6295 }
6296 else
6297 codim = 0;
6298
6299 /* Set the string_length for a character array. */
6300 if (expr->ts.type == BT_CHARACTER)
6301 se->string_length = gfc_get_expr_charlen (expr);
6302
6303 desc = info->descriptor;
6304 if (se->direct_byref && !se->byref_noassign)
6305 {
6306 /* For pointer assignments we fill in the destination. */
6307 parm = se->expr;
6308 parmtype = TREE_TYPE (parm);
6309 }
6310 else
6311 {
6312 /* Otherwise make a new one. */
6313 parmtype = gfc_get_element_type (TREE_TYPE (desc));
6314 parmtype = gfc_get_array_type_bounds (parmtype, loop.dimen, codim,
6315 loop.from, loop.to, 0,
6316 GFC_ARRAY_UNKNOWN, false);
6317 parm = gfc_create_var (parmtype, "parm");
6318 }
6319
6320 offset = gfc_index_zero_node;
6321
6322 /* The following can be somewhat confusing. We have two
6323 descriptors, a new one and the original array.
6324 {parm, parmtype, dim} refer to the new one.
6325 {desc, type, n, loop} refer to the original, which maybe
6326 a descriptorless array.
6327 The bounds of the scalarization are the bounds of the section.
6328 We don't have to worry about numeric overflows when calculating
6329 the offsets because all elements are within the array data. */
6330
6331 /* Set the dtype. */
6332 tmp = gfc_conv_descriptor_dtype (parm);
6333 gfc_add_modify (&loop.pre, tmp, gfc_get_dtype (parmtype));
6334
6335 /* Set offset for assignments to pointer only to zero if it is not
6336 the full array. */
6337 if (se->direct_byref
6338 && info->ref && info->ref->u.ar.type != AR_FULL)
6339 base = gfc_index_zero_node;
6340 else if (GFC_ARRAY_TYPE_P (TREE_TYPE (desc)))
6341 base = gfc_evaluate_now (gfc_conv_array_offset (desc), &loop.pre);
6342 else
6343 base = NULL_TREE;
6344
6345 for (n = 0; n < ndim; n++)
6346 {
6347 stride = gfc_conv_array_stride (desc, n);
6348
6349 /* Work out the offset. */
6350 if (info->ref
6351 && info->ref->u.ar.dimen_type[n] == DIMEN_ELEMENT)
6352 {
6353 gcc_assert (info->subscript[n]
6354 && info->subscript[n]->info->type == GFC_SS_SCALAR);
6355 start = info->subscript[n]->info->data.scalar.value;
6356 }
6357 else
6358 {
6359 /* Evaluate and remember the start of the section. */
6360 start = info->start[n];
6361 stride = gfc_evaluate_now (stride, &loop.pre);
6362 }
6363
6364 tmp = gfc_conv_array_lbound (desc, n);
6365 tmp = fold_build2_loc (input_location, MINUS_EXPR, TREE_TYPE (tmp),
6366 start, tmp);
6367 tmp = fold_build2_loc (input_location, MULT_EXPR, TREE_TYPE (tmp),
6368 tmp, stride);
6369 offset = fold_build2_loc (input_location, PLUS_EXPR, TREE_TYPE (tmp),
6370 offset, tmp);
6371
6372 if (info->ref
6373 && info->ref->u.ar.dimen_type[n] == DIMEN_ELEMENT)
6374 {
6375 /* For elemental dimensions, we only need the offset. */
6376 continue;
6377 }
6378
6379 /* Vector subscripts need copying and are handled elsewhere. */
6380 if (info->ref)
6381 gcc_assert (info->ref->u.ar.dimen_type[n] == DIMEN_RANGE);
6382
6383 /* look for the corresponding scalarizer dimension: dim. */
6384 for (dim = 0; dim < ndim; dim++)
6385 if (ss->dim[dim] == n)
6386 break;
6387
6388 /* loop exited early: the DIM being looked for has been found. */
6389 gcc_assert (dim < ndim);
6390
6391 /* Set the new lower bound. */
6392 from = loop.from[dim];
6393 to = loop.to[dim];
6394
6395 /* If we have an array section or are assigning make sure that
6396 the lower bound is 1. References to the full
6397 array should otherwise keep the original bounds. */
6398 if ((!info->ref
6399 || info->ref->u.ar.type != AR_FULL)
6400 && !integer_onep (from))
6401 {
6402 tmp = fold_build2_loc (input_location, MINUS_EXPR,
6403 gfc_array_index_type, gfc_index_one_node,
6404 from);
6405 to = fold_build2_loc (input_location, PLUS_EXPR,
6406 gfc_array_index_type, to, tmp);
6407 from = gfc_index_one_node;
6408 }
6409 gfc_conv_descriptor_lbound_set (&loop.pre, parm,
6410 gfc_rank_cst[dim], from);
6411
6412 /* Set the new upper bound. */
6413 gfc_conv_descriptor_ubound_set (&loop.pre, parm,
6414 gfc_rank_cst[dim], to);
6415
6416 /* Multiply the stride by the section stride to get the
6417 total stride. */
6418 stride = fold_build2_loc (input_location, MULT_EXPR,
6419 gfc_array_index_type,
6420 stride, info->stride[n]);
6421
6422 if (se->direct_byref
6423 && info->ref
6424 && info->ref->u.ar.type != AR_FULL)
6425 {
6426 base = fold_build2_loc (input_location, MINUS_EXPR,
6427 TREE_TYPE (base), base, stride);
6428 }
6429 else if (GFC_ARRAY_TYPE_P (TREE_TYPE (desc)))
6430 {
6431 tmp = gfc_conv_array_lbound (desc, n);
6432 tmp = fold_build2_loc (input_location, MINUS_EXPR,
6433 TREE_TYPE (base), tmp, loop.from[dim]);
6434 tmp = fold_build2_loc (input_location, MULT_EXPR,
6435 TREE_TYPE (base), tmp,
6436 gfc_conv_array_stride (desc, n));
6437 base = fold_build2_loc (input_location, PLUS_EXPR,
6438 TREE_TYPE (base), tmp, base);
6439 }
6440
6441 /* Store the new stride. */
6442 gfc_conv_descriptor_stride_set (&loop.pre, parm,
6443 gfc_rank_cst[dim], stride);
6444 }
6445
6446 for (n = loop.dimen; n < loop.dimen + codim; n++)
6447 {
6448 from = loop.from[n];
6449 to = loop.to[n];
6450 gfc_conv_descriptor_lbound_set (&loop.pre, parm,
6451 gfc_rank_cst[n], from);
6452 if (n < loop.dimen + codim - 1)
6453 gfc_conv_descriptor_ubound_set (&loop.pre, parm,
6454 gfc_rank_cst[n], to);
6455 }
6456
6457 if (se->data_not_needed)
6458 gfc_conv_descriptor_data_set (&loop.pre, parm,
6459 gfc_index_zero_node);
6460 else
6461 /* Point the data pointer at the 1st element in the section. */
6462 gfc_get_dataptr_offset (&loop.pre, parm, desc, offset,
6463 subref_array_target, expr);
6464
6465 if ((se->direct_byref || GFC_ARRAY_TYPE_P (TREE_TYPE (desc)))
6466 && !se->data_not_needed)
6467 {
6468 /* Set the offset. */
6469 gfc_conv_descriptor_offset_set (&loop.pre, parm, base);
6470 }
6471 else
6472 {
6473 /* Only the callee knows what the correct offset it, so just set
6474 it to zero here. */
6475 gfc_conv_descriptor_offset_set (&loop.pre, parm, gfc_index_zero_node);
6476 }
6477 desc = parm;
6478 }
6479
6480 if (!se->direct_byref || se->byref_noassign)
6481 {
6482 /* Get a pointer to the new descriptor. */
6483 if (se->want_pointer)
6484 se->expr = gfc_build_addr_expr (NULL_TREE, desc);
6485 else
6486 se->expr = desc;
6487 }
6488
6489 gfc_add_block_to_block (&se->pre, &loop.pre);
6490 gfc_add_block_to_block (&se->post, &loop.post);
6491
6492 /* Cleanup the scalarizer. */
6493 gfc_cleanup_loop (&loop);
6494 }
6495
6496 /* Helper function for gfc_conv_array_parameter if array size needs to be
6497 computed. */
6498
6499 static void
6500 array_parameter_size (tree desc, gfc_expr *expr, tree *size)
6501 {
6502 tree elem;
6503 if (GFC_ARRAY_TYPE_P (TREE_TYPE (desc)))
6504 *size = GFC_TYPE_ARRAY_SIZE (TREE_TYPE (desc));
6505 else if (expr->rank > 1)
6506 *size = build_call_expr_loc (input_location,
6507 gfor_fndecl_size0, 1,
6508 gfc_build_addr_expr (NULL, desc));
6509 else
6510 {
6511 tree ubound = gfc_conv_descriptor_ubound_get (desc, gfc_index_zero_node);
6512 tree lbound = gfc_conv_descriptor_lbound_get (desc, gfc_index_zero_node);
6513
6514 *size = fold_build2_loc (input_location, MINUS_EXPR,
6515 gfc_array_index_type, ubound, lbound);
6516 *size = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
6517 *size, gfc_index_one_node);
6518 *size = fold_build2_loc (input_location, MAX_EXPR, gfc_array_index_type,
6519 *size, gfc_index_zero_node);
6520 }
6521 elem = TYPE_SIZE_UNIT (gfc_get_element_type (TREE_TYPE (desc)));
6522 *size = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
6523 *size, fold_convert (gfc_array_index_type, elem));
6524 }
6525
6526 /* Convert an array for passing as an actual parameter. */
6527 /* TODO: Optimize passing g77 arrays. */
6528
6529 void
6530 gfc_conv_array_parameter (gfc_se * se, gfc_expr * expr, gfc_ss * ss, bool g77,
6531 const gfc_symbol *fsym, const char *proc_name,
6532 tree *size)
6533 {
6534 tree ptr;
6535 tree desc;
6536 tree tmp = NULL_TREE;
6537 tree stmt;
6538 tree parent = DECL_CONTEXT (current_function_decl);
6539 bool full_array_var;
6540 bool this_array_result;
6541 bool contiguous;
6542 bool no_pack;
6543 bool array_constructor;
6544 bool good_allocatable;
6545 bool ultimate_ptr_comp;
6546 bool ultimate_alloc_comp;
6547 gfc_symbol *sym;
6548 stmtblock_t block;
6549 gfc_ref *ref;
6550
6551 ultimate_ptr_comp = false;
6552 ultimate_alloc_comp = false;
6553
6554 for (ref = expr->ref; ref; ref = ref->next)
6555 {
6556 if (ref->next == NULL)
6557 break;
6558
6559 if (ref->type == REF_COMPONENT)
6560 {
6561 ultimate_ptr_comp = ref->u.c.component->attr.pointer;
6562 ultimate_alloc_comp = ref->u.c.component->attr.allocatable;
6563 }
6564 }
6565
6566 full_array_var = false;
6567 contiguous = false;
6568
6569 if (expr->expr_type == EXPR_VARIABLE && ref && !ultimate_ptr_comp)
6570 full_array_var = gfc_full_array_ref_p (ref, &contiguous);
6571
6572 sym = full_array_var ? expr->symtree->n.sym : NULL;
6573
6574 /* The symbol should have an array specification. */
6575 gcc_assert (!sym || sym->as || ref->u.ar.as);
6576
6577 if (expr->expr_type == EXPR_ARRAY && expr->ts.type == BT_CHARACTER)
6578 {
6579 get_array_ctor_strlen (&se->pre, expr->value.constructor, &tmp);
6580 expr->ts.u.cl->backend_decl = tmp;
6581 se->string_length = tmp;
6582 }
6583
6584 /* Is this the result of the enclosing procedure? */
6585 this_array_result = (full_array_var && sym->attr.flavor == FL_PROCEDURE);
6586 if (this_array_result
6587 && (sym->backend_decl != current_function_decl)
6588 && (sym->backend_decl != parent))
6589 this_array_result = false;
6590
6591 /* Passing address of the array if it is not pointer or assumed-shape. */
6592 if (full_array_var && g77 && !this_array_result)
6593 {
6594 tmp = gfc_get_symbol_decl (sym);
6595
6596 if (sym->ts.type == BT_CHARACTER)
6597 se->string_length = sym->ts.u.cl->backend_decl;
6598
6599 if (sym->ts.type == BT_DERIVED || sym->ts.type == BT_CLASS)
6600 {
6601 gfc_conv_expr_descriptor (se, expr, ss);
6602 se->expr = gfc_conv_array_data (se->expr);
6603 return;
6604 }
6605
6606 if (!sym->attr.pointer
6607 && sym->as
6608 && sym->as->type != AS_ASSUMED_SHAPE
6609 && !sym->attr.allocatable)
6610 {
6611 /* Some variables are declared directly, others are declared as
6612 pointers and allocated on the heap. */
6613 if (sym->attr.dummy || POINTER_TYPE_P (TREE_TYPE (tmp)))
6614 se->expr = tmp;
6615 else
6616 se->expr = gfc_build_addr_expr (NULL_TREE, tmp);
6617 if (size)
6618 array_parameter_size (tmp, expr, size);
6619 return;
6620 }
6621
6622 if (sym->attr.allocatable)
6623 {
6624 if (sym->attr.dummy || sym->attr.result)
6625 {
6626 gfc_conv_expr_descriptor (se, expr, ss);
6627 tmp = se->expr;
6628 }
6629 if (size)
6630 array_parameter_size (tmp, expr, size);
6631 se->expr = gfc_conv_array_data (tmp);
6632 return;
6633 }
6634 }
6635
6636 /* A convenient reduction in scope. */
6637 contiguous = g77 && !this_array_result && contiguous;
6638
6639 /* There is no need to pack and unpack the array, if it is contiguous
6640 and not a deferred- or assumed-shape array, or if it is simply
6641 contiguous. */
6642 no_pack = ((sym && sym->as
6643 && !sym->attr.pointer
6644 && sym->as->type != AS_DEFERRED
6645 && sym->as->type != AS_ASSUMED_SHAPE)
6646 ||
6647 (ref && ref->u.ar.as
6648 && ref->u.ar.as->type != AS_DEFERRED
6649 && ref->u.ar.as->type != AS_ASSUMED_SHAPE)
6650 ||
6651 gfc_is_simply_contiguous (expr, false));
6652
6653 no_pack = contiguous && no_pack;
6654
6655 /* Array constructors are always contiguous and do not need packing. */
6656 array_constructor = g77 && !this_array_result && expr->expr_type == EXPR_ARRAY;
6657
6658 /* Same is true of contiguous sections from allocatable variables. */
6659 good_allocatable = contiguous
6660 && expr->symtree
6661 && expr->symtree->n.sym->attr.allocatable;
6662
6663 /* Or ultimate allocatable components. */
6664 ultimate_alloc_comp = contiguous && ultimate_alloc_comp;
6665
6666 if (no_pack || array_constructor || good_allocatable || ultimate_alloc_comp)
6667 {
6668 gfc_conv_expr_descriptor (se, expr, ss);
6669 if (expr->ts.type == BT_CHARACTER)
6670 se->string_length = expr->ts.u.cl->backend_decl;
6671 if (size)
6672 array_parameter_size (se->expr, expr, size);
6673 se->expr = gfc_conv_array_data (se->expr);
6674 return;
6675 }
6676
6677 if (this_array_result)
6678 {
6679 /* Result of the enclosing function. */
6680 gfc_conv_expr_descriptor (se, expr, ss);
6681 if (size)
6682 array_parameter_size (se->expr, expr, size);
6683 se->expr = gfc_build_addr_expr (NULL_TREE, se->expr);
6684
6685 if (g77 && TREE_TYPE (TREE_TYPE (se->expr)) != NULL_TREE
6686 && GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (TREE_TYPE (se->expr))))
6687 se->expr = gfc_conv_array_data (build_fold_indirect_ref_loc (input_location,
6688 se->expr));
6689
6690 return;
6691 }
6692 else
6693 {
6694 /* Every other type of array. */
6695 se->want_pointer = 1;
6696 gfc_conv_expr_descriptor (se, expr, ss);
6697 if (size)
6698 array_parameter_size (build_fold_indirect_ref_loc (input_location,
6699 se->expr),
6700 expr, size);
6701 }
6702
6703 /* Deallocate the allocatable components of structures that are
6704 not variable. */
6705 if ((expr->ts.type == BT_DERIVED || expr->ts.type == BT_CLASS)
6706 && expr->ts.u.derived->attr.alloc_comp
6707 && expr->expr_type != EXPR_VARIABLE)
6708 {
6709 tmp = build_fold_indirect_ref_loc (input_location, se->expr);
6710 tmp = gfc_deallocate_alloc_comp (expr->ts.u.derived, tmp, expr->rank);
6711
6712 /* The components shall be deallocated before their containing entity. */
6713 gfc_prepend_expr_to_block (&se->post, tmp);
6714 }
6715
6716 if (g77 || (fsym && fsym->attr.contiguous
6717 && !gfc_is_simply_contiguous (expr, false)))
6718 {
6719 tree origptr = NULL_TREE;
6720
6721 desc = se->expr;
6722
6723 /* For contiguous arrays, save the original value of the descriptor. */
6724 if (!g77)
6725 {
6726 origptr = gfc_create_var (pvoid_type_node, "origptr");
6727 tmp = build_fold_indirect_ref_loc (input_location, desc);
6728 tmp = gfc_conv_array_data (tmp);
6729 tmp = fold_build2_loc (input_location, MODIFY_EXPR,
6730 TREE_TYPE (origptr), origptr,
6731 fold_convert (TREE_TYPE (origptr), tmp));
6732 gfc_add_expr_to_block (&se->pre, tmp);
6733 }
6734
6735 /* Repack the array. */
6736 if (gfc_option.warn_array_temp)
6737 {
6738 if (fsym)
6739 gfc_warning ("Creating array temporary at %L for argument '%s'",
6740 &expr->where, fsym->name);
6741 else
6742 gfc_warning ("Creating array temporary at %L", &expr->where);
6743 }
6744
6745 ptr = build_call_expr_loc (input_location,
6746 gfor_fndecl_in_pack, 1, desc);
6747
6748 if (fsym && fsym->attr.optional && sym && sym->attr.optional)
6749 {
6750 tmp = gfc_conv_expr_present (sym);
6751 ptr = build3_loc (input_location, COND_EXPR, TREE_TYPE (se->expr),
6752 tmp, fold_convert (TREE_TYPE (se->expr), ptr),
6753 fold_convert (TREE_TYPE (se->expr), null_pointer_node));
6754 }
6755
6756 ptr = gfc_evaluate_now (ptr, &se->pre);
6757
6758 /* Use the packed data for the actual argument, except for contiguous arrays,
6759 where the descriptor's data component is set. */
6760 if (g77)
6761 se->expr = ptr;
6762 else
6763 {
6764 tmp = build_fold_indirect_ref_loc (input_location, desc);
6765 gfc_conv_descriptor_data_set (&se->pre, tmp, ptr);
6766 }
6767
6768 if (gfc_option.rtcheck & GFC_RTCHECK_ARRAY_TEMPS)
6769 {
6770 char * msg;
6771
6772 if (fsym && proc_name)
6773 asprintf (&msg, "An array temporary was created for argument "
6774 "'%s' of procedure '%s'", fsym->name, proc_name);
6775 else
6776 asprintf (&msg, "An array temporary was created");
6777
6778 tmp = build_fold_indirect_ref_loc (input_location,
6779 desc);
6780 tmp = gfc_conv_array_data (tmp);
6781 tmp = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
6782 fold_convert (TREE_TYPE (tmp), ptr), tmp);
6783
6784 if (fsym && fsym->attr.optional && sym && sym->attr.optional)
6785 tmp = fold_build2_loc (input_location, TRUTH_AND_EXPR,
6786 boolean_type_node,
6787 gfc_conv_expr_present (sym), tmp);
6788
6789 gfc_trans_runtime_check (false, true, tmp, &se->pre,
6790 &expr->where, msg);
6791 free (msg);
6792 }
6793
6794 gfc_start_block (&block);
6795
6796 /* Copy the data back. */
6797 if (fsym == NULL || fsym->attr.intent != INTENT_IN)
6798 {
6799 tmp = build_call_expr_loc (input_location,
6800 gfor_fndecl_in_unpack, 2, desc, ptr);
6801 gfc_add_expr_to_block (&block, tmp);
6802 }
6803
6804 /* Free the temporary. */
6805 tmp = gfc_call_free (convert (pvoid_type_node, ptr));
6806 gfc_add_expr_to_block (&block, tmp);
6807
6808 stmt = gfc_finish_block (&block);
6809
6810 gfc_init_block (&block);
6811 /* Only if it was repacked. This code needs to be executed before the
6812 loop cleanup code. */
6813 tmp = build_fold_indirect_ref_loc (input_location,
6814 desc);
6815 tmp = gfc_conv_array_data (tmp);
6816 tmp = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
6817 fold_convert (TREE_TYPE (tmp), ptr), tmp);
6818
6819 if (fsym && fsym->attr.optional && sym && sym->attr.optional)
6820 tmp = fold_build2_loc (input_location, TRUTH_AND_EXPR,
6821 boolean_type_node,
6822 gfc_conv_expr_present (sym), tmp);
6823
6824 tmp = build3_v (COND_EXPR, tmp, stmt, build_empty_stmt (input_location));
6825
6826 gfc_add_expr_to_block (&block, tmp);
6827 gfc_add_block_to_block (&block, &se->post);
6828
6829 gfc_init_block (&se->post);
6830
6831 /* Reset the descriptor pointer. */
6832 if (!g77)
6833 {
6834 tmp = build_fold_indirect_ref_loc (input_location, desc);
6835 gfc_conv_descriptor_data_set (&se->post, tmp, origptr);
6836 }
6837
6838 gfc_add_block_to_block (&se->post, &block);
6839 }
6840 }
6841
6842
6843 /* Generate code to deallocate an array, if it is allocated. */
6844
6845 tree
6846 gfc_trans_dealloc_allocated (tree descriptor)
6847 {
6848 tree tmp;
6849 tree var;
6850 stmtblock_t block;
6851
6852 gfc_start_block (&block);
6853
6854 var = gfc_conv_descriptor_data_get (descriptor);
6855 STRIP_NOPS (var);
6856
6857 /* Call array_deallocate with an int * present in the second argument.
6858 Although it is ignored here, it's presence ensures that arrays that
6859 are already deallocated are ignored. */
6860 tmp = gfc_deallocate_with_status (var, NULL_TREE, true, NULL);
6861 gfc_add_expr_to_block (&block, tmp);
6862
6863 /* Zero the data pointer. */
6864 tmp = fold_build2_loc (input_location, MODIFY_EXPR, void_type_node,
6865 var, build_int_cst (TREE_TYPE (var), 0));
6866 gfc_add_expr_to_block (&block, tmp);
6867
6868 return gfc_finish_block (&block);
6869 }
6870
6871
6872 /* This helper function calculates the size in words of a full array. */
6873
6874 static tree
6875 get_full_array_size (stmtblock_t *block, tree decl, int rank)
6876 {
6877 tree idx;
6878 tree nelems;
6879 tree tmp;
6880 idx = gfc_rank_cst[rank - 1];
6881 nelems = gfc_conv_descriptor_ubound_get (decl, idx);
6882 tmp = gfc_conv_descriptor_lbound_get (decl, idx);
6883 tmp = fold_build2_loc (input_location, MINUS_EXPR, gfc_array_index_type,
6884 nelems, tmp);
6885 tmp = fold_build2_loc (input_location, PLUS_EXPR, gfc_array_index_type,
6886 tmp, gfc_index_one_node);
6887 tmp = gfc_evaluate_now (tmp, block);
6888
6889 nelems = gfc_conv_descriptor_stride_get (decl, idx);
6890 tmp = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
6891 nelems, tmp);
6892 return gfc_evaluate_now (tmp, block);
6893 }
6894
6895
6896 /* Allocate dest to the same size as src, and copy src -> dest.
6897 If no_malloc is set, only the copy is done. */
6898
6899 static tree
6900 duplicate_allocatable (tree dest, tree src, tree type, int rank,
6901 bool no_malloc)
6902 {
6903 tree tmp;
6904 tree size;
6905 tree nelems;
6906 tree null_cond;
6907 tree null_data;
6908 stmtblock_t block;
6909
6910 /* If the source is null, set the destination to null. Then,
6911 allocate memory to the destination. */
6912 gfc_init_block (&block);
6913
6914 if (rank == 0)
6915 {
6916 tmp = null_pointer_node;
6917 tmp = fold_build2_loc (input_location, MODIFY_EXPR, type, dest, tmp);
6918 gfc_add_expr_to_block (&block, tmp);
6919 null_data = gfc_finish_block (&block);
6920
6921 gfc_init_block (&block);
6922 size = TYPE_SIZE_UNIT (TREE_TYPE (type));
6923 if (!no_malloc)
6924 {
6925 tmp = gfc_call_malloc (&block, type, size);
6926 tmp = fold_build2_loc (input_location, MODIFY_EXPR, void_type_node,
6927 dest, fold_convert (type, tmp));
6928 gfc_add_expr_to_block (&block, tmp);
6929 }
6930
6931 tmp = builtin_decl_explicit (BUILT_IN_MEMCPY);
6932 tmp = build_call_expr_loc (input_location, tmp, 3,
6933 dest, src, size);
6934 }
6935 else
6936 {
6937 gfc_conv_descriptor_data_set (&block, dest, null_pointer_node);
6938 null_data = gfc_finish_block (&block);
6939
6940 gfc_init_block (&block);
6941 nelems = get_full_array_size (&block, src, rank);
6942 tmp = fold_convert (gfc_array_index_type,
6943 TYPE_SIZE_UNIT (gfc_get_element_type (type)));
6944 size = fold_build2_loc (input_location, MULT_EXPR, gfc_array_index_type,
6945 nelems, tmp);
6946 if (!no_malloc)
6947 {
6948 tmp = TREE_TYPE (gfc_conv_descriptor_data_get (src));
6949 tmp = gfc_call_malloc (&block, tmp, size);
6950 gfc_conv_descriptor_data_set (&block, dest, tmp);
6951 }
6952
6953 /* We know the temporary and the value will be the same length,
6954 so can use memcpy. */
6955 tmp = builtin_decl_explicit (BUILT_IN_MEMCPY);
6956 tmp = build_call_expr_loc (input_location,
6957 tmp, 3, gfc_conv_descriptor_data_get (dest),
6958 gfc_conv_descriptor_data_get (src), size);
6959 }
6960
6961 gfc_add_expr_to_block (&block, tmp);
6962 tmp = gfc_finish_block (&block);
6963
6964 /* Null the destination if the source is null; otherwise do
6965 the allocate and copy. */
6966 if (rank == 0)
6967 null_cond = src;
6968 else
6969 null_cond = gfc_conv_descriptor_data_get (src);
6970
6971 null_cond = convert (pvoid_type_node, null_cond);
6972 null_cond = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
6973 null_cond, null_pointer_node);
6974 return build3_v (COND_EXPR, null_cond, tmp, null_data);
6975 }
6976
6977
6978 /* Allocate dest to the same size as src, and copy data src -> dest. */
6979
6980 tree
6981 gfc_duplicate_allocatable (tree dest, tree src, tree type, int rank)
6982 {
6983 return duplicate_allocatable (dest, src, type, rank, false);
6984 }
6985
6986
6987 /* Copy data src -> dest. */
6988
6989 tree
6990 gfc_copy_allocatable_data (tree dest, tree src, tree type, int rank)
6991 {
6992 return duplicate_allocatable (dest, src, type, rank, true);
6993 }
6994
6995
6996 /* Recursively traverse an object of derived type, generating code to
6997 deallocate, nullify or copy allocatable components. This is the work horse
6998 function for the functions named in this enum. */
6999
7000 enum {DEALLOCATE_ALLOC_COMP = 1, NULLIFY_ALLOC_COMP, COPY_ALLOC_COMP,
7001 COPY_ONLY_ALLOC_COMP};
7002
7003 static tree
7004 structure_alloc_comps (gfc_symbol * der_type, tree decl,
7005 tree dest, int rank, int purpose)
7006 {
7007 gfc_component *c;
7008 gfc_loopinfo loop;
7009 stmtblock_t fnblock;
7010 stmtblock_t loopbody;
7011 tree decl_type;
7012 tree tmp;
7013 tree comp;
7014 tree dcmp;
7015 tree nelems;
7016 tree index;
7017 tree var;
7018 tree cdecl;
7019 tree ctype;
7020 tree vref, dref;
7021 tree null_cond = NULL_TREE;
7022
7023 gfc_init_block (&fnblock);
7024
7025 decl_type = TREE_TYPE (decl);
7026
7027 if ((POINTER_TYPE_P (decl_type) && rank != 0)
7028 || (TREE_CODE (decl_type) == REFERENCE_TYPE && rank == 0))
7029
7030 decl = build_fold_indirect_ref_loc (input_location,
7031 decl);
7032
7033 /* Just in case in gets dereferenced. */
7034 decl_type = TREE_TYPE (decl);
7035
7036 /* If this an array of derived types with allocatable components
7037 build a loop and recursively call this function. */
7038 if (TREE_CODE (decl_type) == ARRAY_TYPE
7039 || GFC_DESCRIPTOR_TYPE_P (decl_type))
7040 {
7041 tmp = gfc_conv_array_data (decl);
7042 var = build_fold_indirect_ref_loc (input_location,
7043 tmp);
7044
7045 /* Get the number of elements - 1 and set the counter. */
7046 if (GFC_DESCRIPTOR_TYPE_P (decl_type))
7047 {
7048 /* Use the descriptor for an allocatable array. Since this
7049 is a full array reference, we only need the descriptor
7050 information from dimension = rank. */
7051 tmp = get_full_array_size (&fnblock, decl, rank);
7052 tmp = fold_build2_loc (input_location, MINUS_EXPR,
7053 gfc_array_index_type, tmp,
7054 gfc_index_one_node);
7055
7056 null_cond = gfc_conv_descriptor_data_get (decl);
7057 null_cond = fold_build2_loc (input_location, NE_EXPR,
7058 boolean_type_node, null_cond,
7059 build_int_cst (TREE_TYPE (null_cond), 0));
7060 }
7061 else
7062 {
7063 /* Otherwise use the TYPE_DOMAIN information. */
7064 tmp = array_type_nelts (decl_type);
7065 tmp = fold_convert (gfc_array_index_type, tmp);
7066 }
7067
7068 /* Remember that this is, in fact, the no. of elements - 1. */
7069 nelems = gfc_evaluate_now (tmp, &fnblock);
7070 index = gfc_create_var (gfc_array_index_type, "S");
7071
7072 /* Build the body of the loop. */
7073 gfc_init_block (&loopbody);
7074
7075 vref = gfc_build_array_ref (var, index, NULL);
7076
7077 if (purpose == COPY_ALLOC_COMP)
7078 {
7079 if (GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (dest)))
7080 {
7081 tmp = gfc_duplicate_allocatable (dest, decl, decl_type, rank);
7082 gfc_add_expr_to_block (&fnblock, tmp);
7083 }
7084 tmp = build_fold_indirect_ref_loc (input_location,
7085 gfc_conv_array_data (dest));
7086 dref = gfc_build_array_ref (tmp, index, NULL);
7087 tmp = structure_alloc_comps (der_type, vref, dref, rank, purpose);
7088 }
7089 else if (purpose == COPY_ONLY_ALLOC_COMP)
7090 {
7091 tmp = build_fold_indirect_ref_loc (input_location,
7092 gfc_conv_array_data (dest));
7093 dref = gfc_build_array_ref (tmp, index, NULL);
7094 tmp = structure_alloc_comps (der_type, vref, dref, rank,
7095 COPY_ALLOC_COMP);
7096 }
7097 else
7098 tmp = structure_alloc_comps (der_type, vref, NULL_TREE, rank, purpose);
7099
7100 gfc_add_expr_to_block (&loopbody, tmp);
7101
7102 /* Build the loop and return. */
7103 gfc_init_loopinfo (&loop);
7104 loop.dimen = 1;
7105 loop.from[0] = gfc_index_zero_node;
7106 loop.loopvar[0] = index;
7107 loop.to[0] = nelems;
7108 gfc_trans_scalarizing_loops (&loop, &loopbody);
7109 gfc_add_block_to_block (&fnblock, &loop.pre);
7110
7111 tmp = gfc_finish_block (&fnblock);
7112 if (null_cond != NULL_TREE)
7113 tmp = build3_v (COND_EXPR, null_cond, tmp,
7114 build_empty_stmt (input_location));
7115
7116 return tmp;
7117 }
7118
7119 /* Otherwise, act on the components or recursively call self to
7120 act on a chain of components. */
7121 for (c = der_type->components; c; c = c->next)
7122 {
7123 bool cmp_has_alloc_comps = (c->ts.type == BT_DERIVED
7124 || c->ts.type == BT_CLASS)
7125 && c->ts.u.derived->attr.alloc_comp;
7126 cdecl = c->backend_decl;
7127 ctype = TREE_TYPE (cdecl);
7128
7129 switch (purpose)
7130 {
7131 case DEALLOCATE_ALLOC_COMP:
7132 if (cmp_has_alloc_comps && !c->attr.pointer)
7133 {
7134 /* Do not deallocate the components of ultimate pointer
7135 components. */
7136 comp = fold_build3_loc (input_location, COMPONENT_REF, ctype,
7137 decl, cdecl, NULL_TREE);
7138 rank = c->as ? c->as->rank : 0;
7139 tmp = structure_alloc_comps (c->ts.u.derived, comp, NULL_TREE,
7140 rank, purpose);
7141 gfc_add_expr_to_block (&fnblock, tmp);
7142 }
7143
7144 if (c->attr.allocatable
7145 && (c->attr.dimension || c->attr.codimension))
7146 {
7147 comp = fold_build3_loc (input_location, COMPONENT_REF, ctype,
7148 decl, cdecl, NULL_TREE);
7149 tmp = gfc_trans_dealloc_allocated (comp);
7150 gfc_add_expr_to_block (&fnblock, tmp);
7151 }
7152 else if (c->attr.allocatable)
7153 {
7154 /* Allocatable scalar components. */
7155 comp = fold_build3_loc (input_location, COMPONENT_REF, ctype,
7156 decl, cdecl, NULL_TREE);
7157
7158 tmp = gfc_deallocate_scalar_with_status (comp, NULL, true, NULL,
7159 c->ts);
7160 gfc_add_expr_to_block (&fnblock, tmp);
7161
7162 tmp = fold_build2_loc (input_location, MODIFY_EXPR,
7163 void_type_node, comp,
7164 build_int_cst (TREE_TYPE (comp), 0));
7165 gfc_add_expr_to_block (&fnblock, tmp);
7166 }
7167 else if (c->ts.type == BT_CLASS && CLASS_DATA (c)->attr.allocatable)
7168 {
7169 /* Allocatable scalar CLASS components. */
7170 comp = fold_build3_loc (input_location, COMPONENT_REF, ctype,
7171 decl, cdecl, NULL_TREE);
7172
7173 /* Add reference to '_data' component. */
7174 tmp = CLASS_DATA (c)->backend_decl;
7175 comp = fold_build3_loc (input_location, COMPONENT_REF,
7176 TREE_TYPE (tmp), comp, tmp, NULL_TREE);
7177
7178 tmp = gfc_deallocate_scalar_with_status (comp, NULL, true, NULL,
7179 CLASS_DATA (c)->ts);
7180 gfc_add_expr_to_block (&fnblock, tmp);
7181
7182 tmp = fold_build2_loc (input_location, MODIFY_EXPR,
7183 void_type_node, comp,
7184 build_int_cst (TREE_TYPE (comp), 0));
7185 gfc_add_expr_to_block (&fnblock, tmp);
7186 }
7187 break;
7188
7189 case NULLIFY_ALLOC_COMP:
7190 if (c->attr.pointer)
7191 continue;
7192 else if (c->attr.allocatable
7193 && (c->attr.dimension|| c->attr.codimension))
7194 {
7195 comp = fold_build3_loc (input_location, COMPONENT_REF, ctype,
7196 decl, cdecl, NULL_TREE);
7197 gfc_conv_descriptor_data_set (&fnblock, comp, null_pointer_node);
7198 }
7199 else if (c->attr.allocatable)
7200 {
7201 /* Allocatable scalar components. */
7202 comp = fold_build3_loc (input_location, COMPONENT_REF, ctype,
7203 decl, cdecl, NULL_TREE);
7204 tmp = fold_build2_loc (input_location, MODIFY_EXPR,
7205 void_type_node, comp,
7206 build_int_cst (TREE_TYPE (comp), 0));
7207 gfc_add_expr_to_block (&fnblock, tmp);
7208 }
7209 else if (c->ts.type == BT_CLASS && CLASS_DATA (c)->attr.allocatable)
7210 {
7211 /* Allocatable scalar CLASS components. */
7212 comp = fold_build3_loc (input_location, COMPONENT_REF, ctype,
7213 decl, cdecl, NULL_TREE);
7214 /* Add reference to '_data' component. */
7215 tmp = CLASS_DATA (c)->backend_decl;
7216 comp = fold_build3_loc (input_location, COMPONENT_REF,
7217 TREE_TYPE (tmp), comp, tmp, NULL_TREE);
7218 tmp = fold_build2_loc (input_location, MODIFY_EXPR,
7219 void_type_node, comp,
7220 build_int_cst (TREE_TYPE (comp), 0));
7221 gfc_add_expr_to_block (&fnblock, tmp);
7222 }
7223 else if (cmp_has_alloc_comps)
7224 {
7225 comp = fold_build3_loc (input_location, COMPONENT_REF, ctype,
7226 decl, cdecl, NULL_TREE);
7227 rank = c->as ? c->as->rank : 0;
7228 tmp = structure_alloc_comps (c->ts.u.derived, comp, NULL_TREE,
7229 rank, purpose);
7230 gfc_add_expr_to_block (&fnblock, tmp);
7231 }
7232 break;
7233
7234 case COPY_ALLOC_COMP:
7235 if (c->attr.pointer)
7236 continue;
7237
7238 /* We need source and destination components. */
7239 comp = fold_build3_loc (input_location, COMPONENT_REF, ctype, decl,
7240 cdecl, NULL_TREE);
7241 dcmp = fold_build3_loc (input_location, COMPONENT_REF, ctype, dest,
7242 cdecl, NULL_TREE);
7243 dcmp = fold_convert (TREE_TYPE (comp), dcmp);
7244
7245 if (c->attr.allocatable && !cmp_has_alloc_comps)
7246 {
7247 rank = c->as ? c->as->rank : 0;
7248 tmp = gfc_duplicate_allocatable (dcmp, comp, ctype, rank);
7249 gfc_add_expr_to_block (&fnblock, tmp);
7250 }
7251
7252 if (cmp_has_alloc_comps)
7253 {
7254 rank = c->as ? c->as->rank : 0;
7255 tmp = fold_convert (TREE_TYPE (dcmp), comp);
7256 gfc_add_modify (&fnblock, dcmp, tmp);
7257 tmp = structure_alloc_comps (c->ts.u.derived, comp, dcmp,
7258 rank, purpose);
7259 gfc_add_expr_to_block (&fnblock, tmp);
7260 }
7261 break;
7262
7263 default:
7264 gcc_unreachable ();
7265 break;
7266 }
7267 }
7268
7269 return gfc_finish_block (&fnblock);
7270 }
7271
7272 /* Recursively traverse an object of derived type, generating code to
7273 nullify allocatable components. */
7274
7275 tree
7276 gfc_nullify_alloc_comp (gfc_symbol * der_type, tree decl, int rank)
7277 {
7278 return structure_alloc_comps (der_type, decl, NULL_TREE, rank,
7279 NULLIFY_ALLOC_COMP);
7280 }
7281
7282
7283 /* Recursively traverse an object of derived type, generating code to
7284 deallocate allocatable components. */
7285
7286 tree
7287 gfc_deallocate_alloc_comp (gfc_symbol * der_type, tree decl, int rank)
7288 {
7289 return structure_alloc_comps (der_type, decl, NULL_TREE, rank,
7290 DEALLOCATE_ALLOC_COMP);
7291 }
7292
7293
7294 /* Recursively traverse an object of derived type, generating code to
7295 copy it and its allocatable components. */
7296
7297 tree
7298 gfc_copy_alloc_comp (gfc_symbol * der_type, tree decl, tree dest, int rank)
7299 {
7300 return structure_alloc_comps (der_type, decl, dest, rank, COPY_ALLOC_COMP);
7301 }
7302
7303
7304 /* Recursively traverse an object of derived type, generating code to
7305 copy only its allocatable components. */
7306
7307 tree
7308 gfc_copy_only_alloc_comp (gfc_symbol * der_type, tree decl, tree dest, int rank)
7309 {
7310 return structure_alloc_comps (der_type, decl, dest, rank, COPY_ONLY_ALLOC_COMP);
7311 }
7312
7313
7314 /* Returns the value of LBOUND for an expression. This could be broken out
7315 from gfc_conv_intrinsic_bound but this seemed to be simpler. This is
7316 called by gfc_alloc_allocatable_for_assignment. */
7317 static tree
7318 get_std_lbound (gfc_expr *expr, tree desc, int dim, bool assumed_size)
7319 {
7320 tree lbound;
7321 tree ubound;
7322 tree stride;
7323 tree cond, cond1, cond3, cond4;
7324 tree tmp;
7325 gfc_ref *ref;
7326
7327 if (GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (desc)))
7328 {
7329 tmp = gfc_rank_cst[dim];
7330 lbound = gfc_conv_descriptor_lbound_get (desc, tmp);
7331 ubound = gfc_conv_descriptor_ubound_get (desc, tmp);
7332 stride = gfc_conv_descriptor_stride_get (desc, tmp);
7333 cond1 = fold_build2_loc (input_location, GE_EXPR, boolean_type_node,
7334 ubound, lbound);
7335 cond3 = fold_build2_loc (input_location, GE_EXPR, boolean_type_node,
7336 stride, gfc_index_zero_node);
7337 cond3 = fold_build2_loc (input_location, TRUTH_AND_EXPR,
7338 boolean_type_node, cond3, cond1);
7339 cond4 = fold_build2_loc (input_location, LT_EXPR, boolean_type_node,
7340 stride, gfc_index_zero_node);
7341 if (assumed_size)
7342 cond = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node,
7343 tmp, build_int_cst (gfc_array_index_type,
7344 expr->rank - 1));
7345 else
7346 cond = boolean_false_node;
7347
7348 cond1 = fold_build2_loc (input_location, TRUTH_OR_EXPR,
7349 boolean_type_node, cond3, cond4);
7350 cond = fold_build2_loc (input_location, TRUTH_OR_EXPR,
7351 boolean_type_node, cond, cond1);
7352
7353 return fold_build3_loc (input_location, COND_EXPR,
7354 gfc_array_index_type, cond,
7355 lbound, gfc_index_one_node);
7356 }
7357 else if (expr->expr_type == EXPR_VARIABLE)
7358 {
7359 tmp = TREE_TYPE (expr->symtree->n.sym->backend_decl);
7360 for (ref = expr->ref; ref; ref = ref->next)
7361 {
7362 if (ref->type == REF_COMPONENT
7363 && ref->u.c.component->as
7364 && ref->next
7365 && ref->next->u.ar.type == AR_FULL)
7366 tmp = TREE_TYPE (ref->u.c.component->backend_decl);
7367 }
7368 return GFC_TYPE_ARRAY_LBOUND(tmp, dim);
7369 }
7370 else if (expr->expr_type == EXPR_FUNCTION)
7371 {
7372 /* A conversion function, so use the argument. */
7373 expr = expr->value.function.actual->expr;
7374 if (expr->expr_type != EXPR_VARIABLE)
7375 return gfc_index_one_node;
7376 desc = TREE_TYPE (expr->symtree->n.sym->backend_decl);
7377 return get_std_lbound (expr, desc, dim, assumed_size);
7378 }
7379
7380 return gfc_index_one_node;
7381 }
7382
7383
7384 /* Returns true if an expression represents an lhs that can be reallocated
7385 on assignment. */
7386
7387 bool
7388 gfc_is_reallocatable_lhs (gfc_expr *expr)
7389 {
7390 gfc_ref * ref;
7391
7392 if (!expr->ref)
7393 return false;
7394
7395 /* An allocatable variable. */
7396 if (expr->symtree->n.sym->attr.allocatable
7397 && expr->ref
7398 && expr->ref->type == REF_ARRAY
7399 && expr->ref->u.ar.type == AR_FULL)
7400 return true;
7401
7402 /* All that can be left are allocatable components. */
7403 if ((expr->symtree->n.sym->ts.type != BT_DERIVED
7404 && expr->symtree->n.sym->ts.type != BT_CLASS)
7405 || !expr->symtree->n.sym->ts.u.derived->attr.alloc_comp)
7406 return false;
7407
7408 /* Find a component ref followed by an array reference. */
7409 for (ref = expr->ref; ref; ref = ref->next)
7410 if (ref->next
7411 && ref->type == REF_COMPONENT
7412 && ref->next->type == REF_ARRAY
7413 && !ref->next->next)
7414 break;
7415
7416 if (!ref)
7417 return false;
7418
7419 /* Return true if valid reallocatable lhs. */
7420 if (ref->u.c.component->attr.allocatable
7421 && ref->next->u.ar.type == AR_FULL)
7422 return true;
7423
7424 return false;
7425 }
7426
7427
7428 /* Allocate the lhs of an assignment to an allocatable array, otherwise
7429 reallocate it. */
7430
7431 tree
7432 gfc_alloc_allocatable_for_assignment (gfc_loopinfo *loop,
7433 gfc_expr *expr1,
7434 gfc_expr *expr2)
7435 {
7436 stmtblock_t realloc_block;
7437 stmtblock_t alloc_block;
7438 stmtblock_t fblock;
7439 gfc_ss *rss;
7440 gfc_ss *lss;
7441 gfc_array_info *linfo;
7442 tree realloc_expr;
7443 tree alloc_expr;
7444 tree size1;
7445 tree size2;
7446 tree array1;
7447 tree cond;
7448 tree tmp;
7449 tree tmp2;
7450 tree lbound;
7451 tree ubound;
7452 tree desc;
7453 tree desc2;
7454 tree offset;
7455 tree jump_label1;
7456 tree jump_label2;
7457 tree neq_size;
7458 tree lbd;
7459 int n;
7460 int dim;
7461 gfc_array_spec * as;
7462
7463 /* x = f(...) with x allocatable. In this case, expr1 is the rhs.
7464 Find the lhs expression in the loop chain and set expr1 and
7465 expr2 accordingly. */
7466 if (expr1->expr_type == EXPR_FUNCTION && expr2 == NULL)
7467 {
7468 expr2 = expr1;
7469 /* Find the ss for the lhs. */
7470 lss = loop->ss;
7471 for (; lss && lss != gfc_ss_terminator; lss = lss->loop_chain)
7472 if (lss->info->expr && lss->info->expr->expr_type == EXPR_VARIABLE)
7473 break;
7474 if (lss == gfc_ss_terminator)
7475 return NULL_TREE;
7476 expr1 = lss->info->expr;
7477 }
7478
7479 /* Bail out if this is not a valid allocate on assignment. */
7480 if (!gfc_is_reallocatable_lhs (expr1)
7481 || (expr2 && !expr2->rank))
7482 return NULL_TREE;
7483
7484 /* Find the ss for the lhs. */
7485 lss = loop->ss;
7486 for (; lss && lss != gfc_ss_terminator; lss = lss->loop_chain)
7487 if (lss->info->expr == expr1)
7488 break;
7489
7490 if (lss == gfc_ss_terminator)
7491 return NULL_TREE;
7492
7493 linfo = &lss->info->data.array;
7494
7495 /* Find an ss for the rhs. For operator expressions, we see the
7496 ss's for the operands. Any one of these will do. */
7497 rss = loop->ss;
7498 for (; rss && rss != gfc_ss_terminator; rss = rss->loop_chain)
7499 if (rss->info->expr != expr1 && rss != loop->temp_ss)
7500 break;
7501
7502 if (expr2 && rss == gfc_ss_terminator)
7503 return NULL_TREE;
7504
7505 gfc_start_block (&fblock);
7506
7507 /* Since the lhs is allocatable, this must be a descriptor type.
7508 Get the data and array size. */
7509 desc = linfo->descriptor;
7510 gcc_assert (GFC_DESCRIPTOR_TYPE_P (TREE_TYPE (desc)));
7511 array1 = gfc_conv_descriptor_data_get (desc);
7512
7513 /* 7.4.1.3 "If variable is an allocated allocatable variable, it is
7514 deallocated if expr is an array of different shape or any of the
7515 corresponding length type parameter values of variable and expr
7516 differ." This assures F95 compatibility. */
7517 jump_label1 = gfc_build_label_decl (NULL_TREE);
7518 jump_label2 = gfc_build_label_decl (NULL_TREE);
7519
7520 /* Allocate if data is NULL. */
7521 cond = fold_build2_loc (input_location, EQ_EXPR, boolean_type_node,
7522 array1, build_int_cst (TREE_TYPE (array1), 0));
7523 tmp = build3_v (COND_EXPR, cond,
7524 build1_v (GOTO_EXPR, jump_label1),
7525 build_empty_stmt (input_location));
7526 gfc_add_expr_to_block (&fblock, tmp);
7527
7528 /* Get arrayspec if expr is a full array. */
7529 if (expr2 && expr2->expr_type == EXPR_FUNCTION
7530 && expr2->value.function.isym
7531 && expr2->value.function.isym->conversion)
7532 {
7533 /* For conversion functions, take the arg. */
7534 gfc_expr *arg = expr2->value.function.actual->expr;
7535 as = gfc_get_full_arrayspec_from_expr (arg);
7536 }
7537 else if (expr2)
7538 as = gfc_get_full_arrayspec_from_expr (expr2);
7539 else
7540 as = NULL;
7541
7542 /* If the lhs shape is not the same as the rhs jump to setting the
7543 bounds and doing the reallocation....... */
7544 for (n = 0; n < expr1->rank; n++)
7545 {
7546 /* Check the shape. */
7547 lbound = gfc_conv_descriptor_lbound_get (desc, gfc_rank_cst[n]);
7548 ubound = gfc_conv_descriptor_ubound_get (desc, gfc_rank_cst[n]);
7549 tmp = fold_build2_loc (input_location, MINUS_EXPR,
7550 gfc_array_index_type,
7551 loop->to[n], loop->from[n]);
7552 tmp = fold_build2_loc (input_location, PLUS_EXPR,
7553 gfc_array_index_type,
7554 tmp, lbound);
7555 tmp = fold_build2_loc (input_location, MINUS_EXPR,
7556 gfc_array_index_type,
7557 tmp, ubound);
7558 cond = fold_build2_loc (input_location, NE_EXPR,
7559 boolean_type_node,
7560 tmp, gfc_index_zero_node);
7561 tmp = build3_v (COND_EXPR, cond,
7562 build1_v (GOTO_EXPR, jump_label1),
7563 build_empty_stmt (input_location));
7564 gfc_add_expr_to_block (&fblock, tmp);
7565 }
7566
7567 /* ....else jump past the (re)alloc code. */
7568 tmp = build1_v (GOTO_EXPR, jump_label2);
7569 gfc_add_expr_to_block (&fblock, tmp);
7570
7571 /* Add the label to start automatic (re)allocation. */
7572 tmp = build1_v (LABEL_EXPR, jump_label1);
7573 gfc_add_expr_to_block (&fblock, tmp);
7574
7575 size1 = gfc_conv_descriptor_size (desc, expr1->rank);
7576
7577 /* Get the rhs size. Fix both sizes. */
7578 if (expr2)
7579 desc2 = rss->info->data.array.descriptor;
7580 else
7581 desc2 = NULL_TREE;
7582 size2 = gfc_index_one_node;
7583 for (n = 0; n < expr2->rank; n++)
7584 {
7585 tmp = fold_build2_loc (input_location, MINUS_EXPR,
7586 gfc_array_index_type,
7587 loop->to[n], loop->from[n]);
7588 tmp = fold_build2_loc (input_location, PLUS_EXPR,
7589 gfc_array_index_type,
7590 tmp, gfc_index_one_node);
7591 size2 = fold_build2_loc (input_location, MULT_EXPR,
7592 gfc_array_index_type,
7593 tmp, size2);
7594 }
7595
7596 size1 = gfc_evaluate_now (size1, &fblock);
7597 size2 = gfc_evaluate_now (size2, &fblock);
7598
7599 cond = fold_build2_loc (input_location, NE_EXPR, boolean_type_node,
7600 size1, size2);
7601 neq_size = gfc_evaluate_now (cond, &fblock);
7602
7603
7604 /* Now modify the lhs descriptor and the associated scalarizer
7605 variables. F2003 7.4.1.3: "If variable is or becomes an
7606 unallocated allocatable variable, then it is allocated with each
7607 deferred type parameter equal to the corresponding type parameters
7608 of expr , with the shape of expr , and with each lower bound equal
7609 to the corresponding element of LBOUND(expr)."
7610 Reuse size1 to keep a dimension-by-dimension track of the
7611 stride of the new array. */
7612 size1 = gfc_index_one_node;
7613 offset = gfc_index_zero_node;
7614
7615 for (n = 0; n < expr2->rank; n++)
7616 {
7617 tmp = fold_build2_loc (input_location, MINUS_EXPR,
7618 gfc_array_index_type,
7619 loop->to[n], loop->from[n]);
7620 tmp = fold_build2_loc (input_location, PLUS_EXPR,
7621 gfc_array_index_type,
7622 tmp, gfc_index_one_node);
7623
7624 lbound = gfc_index_one_node;
7625 ubound = tmp;
7626
7627 if (as)
7628 {
7629 lbd = get_std_lbound (expr2, desc2, n,
7630 as->type == AS_ASSUMED_SIZE);
7631 ubound = fold_build2_loc (input_location,
7632 MINUS_EXPR,
7633 gfc_array_index_type,
7634 ubound, lbound);
7635 ubound = fold_build2_loc (input_location,
7636 PLUS_EXPR,
7637 gfc_array_index_type,
7638 ubound, lbd);
7639 lbound = lbd;
7640 }
7641
7642 gfc_conv_descriptor_lbound_set (&fblock, desc,
7643 gfc_rank_cst[n],
7644 lbound);
7645 gfc_conv_descriptor_ubound_set (&fblock, desc,
7646 gfc_rank_cst[n],
7647 ubound);
7648 gfc_conv_descriptor_stride_set (&fblock, desc,
7649 gfc_rank_cst[n],
7650 size1);
7651 lbound = gfc_conv_descriptor_lbound_get (desc,
7652 gfc_rank_cst[n]);
7653 tmp2 = fold_build2_loc (input_location, MULT_EXPR,
7654 gfc_array_index_type,
7655 lbound, size1);
7656 offset = fold_build2_loc (input_location, MINUS_EXPR,
7657 gfc_array_index_type,
7658 offset, tmp2);
7659 size1 = fold_build2_loc (input_location, MULT_EXPR,
7660 gfc_array_index_type,
7661 tmp, size1);
7662 }
7663
7664 /* Set the lhs descriptor and scalarizer offsets. For rank > 1,
7665 the array offset is saved and the info.offset is used for a
7666 running offset. Use the saved_offset instead. */
7667 tmp = gfc_conv_descriptor_offset (desc);
7668 gfc_add_modify (&fblock, tmp, offset);
7669 if (linfo->saved_offset
7670 && TREE_CODE (linfo->saved_offset) == VAR_DECL)
7671 gfc_add_modify (&fblock, linfo->saved_offset, tmp);
7672
7673 /* Now set the deltas for the lhs. */
7674 for (n = 0; n < expr1->rank; n++)
7675 {
7676 tmp = gfc_conv_descriptor_lbound_get (desc, gfc_rank_cst[n]);
7677 dim = lss->dim[n];
7678 tmp = fold_build2_loc (input_location, MINUS_EXPR,
7679 gfc_array_index_type, tmp,
7680 loop->from[dim]);
7681 if (linfo->delta[dim]
7682 && TREE_CODE (linfo->delta[dim]) == VAR_DECL)
7683 gfc_add_modify (&fblock, linfo->delta[dim], tmp);
7684 }
7685
7686 /* Get the new lhs size in bytes. */
7687 if (expr1->ts.type == BT_CHARACTER && expr1->ts.deferred)
7688 {
7689 tmp = expr2->ts.u.cl->backend_decl;
7690 gcc_assert (expr1->ts.u.cl->backend_decl);
7691 tmp = fold_convert (TREE_TYPE (expr1->ts.u.cl->backend_decl), tmp);
7692 gfc_add_modify (&fblock, expr1->ts.u.cl->backend_decl, tmp);
7693 }
7694 else if (expr1->ts.type == BT_CHARACTER && expr1->ts.u.cl->backend_decl)
7695 {
7696 tmp = TYPE_SIZE_UNIT (TREE_TYPE (gfc_typenode_for_spec (&expr1->ts)));
7697 tmp = fold_build2_loc (input_location, MULT_EXPR,
7698 gfc_array_index_type, tmp,
7699 expr1->ts.u.cl->backend_decl);
7700 }
7701 else
7702 tmp = TYPE_SIZE_UNIT (gfc_typenode_for_spec (&expr1->ts));
7703 tmp = fold_convert (gfc_array_index_type, tmp);
7704 size2 = fold_build2_loc (input_location, MULT_EXPR,
7705 gfc_array_index_type,
7706 tmp, size2);
7707 size2 = fold_convert (size_type_node, size2);
7708 size2 = gfc_evaluate_now (size2, &fblock);
7709
7710 /* Realloc expression. Note that the scalarizer uses desc.data
7711 in the array reference - (*desc.data)[<element>]. */
7712 gfc_init_block (&realloc_block);
7713 tmp = build_call_expr_loc (input_location,
7714 builtin_decl_explicit (BUILT_IN_REALLOC), 2,
7715 fold_convert (pvoid_type_node, array1),
7716 size2);
7717 gfc_conv_descriptor_data_set (&realloc_block,
7718 desc, tmp);
7719 realloc_expr = gfc_finish_block (&realloc_block);
7720
7721 /* Only reallocate if sizes are different. */
7722 tmp = build3_v (COND_EXPR, neq_size, realloc_expr,
7723 build_empty_stmt (input_location));
7724 realloc_expr = tmp;
7725
7726
7727 /* Malloc expression. */
7728 gfc_init_block (&alloc_block);
7729 tmp = build_call_expr_loc (input_location,
7730 builtin_decl_explicit (BUILT_IN_MALLOC),
7731 1, size2);
7732 gfc_conv_descriptor_data_set (&alloc_block,
7733 desc, tmp);
7734 tmp = gfc_conv_descriptor_dtype (desc);
7735 gfc_add_modify (&alloc_block, tmp, gfc_get_dtype (TREE_TYPE (desc)));
7736 alloc_expr = gfc_finish_block (&alloc_block);
7737
7738 /* Malloc if not allocated; realloc otherwise. */
7739 tmp = build_int_cst (TREE_TYPE (array1), 0);
7740 cond = fold_build2_loc (input_location, EQ_EXPR,
7741 boolean_type_node,
7742 array1, tmp);
7743 tmp = build3_v (COND_EXPR, cond, alloc_expr, realloc_expr);
7744 gfc_add_expr_to_block (&fblock, tmp);
7745
7746 /* Make sure that the scalarizer data pointer is updated. */
7747 if (linfo->data
7748 && TREE_CODE (linfo->data) == VAR_DECL)
7749 {
7750 tmp = gfc_conv_descriptor_data_get (desc);
7751 gfc_add_modify (&fblock, linfo->data, tmp);
7752 }
7753
7754 /* Add the exit label. */
7755 tmp = build1_v (LABEL_EXPR, jump_label2);
7756 gfc_add_expr_to_block (&fblock, tmp);
7757
7758 return gfc_finish_block (&fblock);
7759 }
7760
7761
7762 /* NULLIFY an allocatable/pointer array on function entry, free it on exit.
7763 Do likewise, recursively if necessary, with the allocatable components of
7764 derived types. */
7765
7766 void
7767 gfc_trans_deferred_array (gfc_symbol * sym, gfc_wrapped_block * block)
7768 {
7769 tree type;
7770 tree tmp;
7771 tree descriptor;
7772 stmtblock_t init;
7773 stmtblock_t cleanup;
7774 locus loc;
7775 int rank;
7776 bool sym_has_alloc_comp;
7777
7778 sym_has_alloc_comp = (sym->ts.type == BT_DERIVED
7779 || sym->ts.type == BT_CLASS)
7780 && sym->ts.u.derived->attr.alloc_comp;
7781
7782 /* Make sure the frontend gets these right. */
7783 if (!(sym->attr.pointer || sym->attr.allocatable || sym_has_alloc_comp))
7784 fatal_error ("Possible front-end bug: Deferred array size without pointer, "
7785 "allocatable attribute or derived type without allocatable "
7786 "components.");
7787
7788 gfc_save_backend_locus (&loc);
7789 gfc_set_backend_locus (&sym->declared_at);
7790 gfc_init_block (&init);
7791
7792 gcc_assert (TREE_CODE (sym->backend_decl) == VAR_DECL
7793 || TREE_CODE (sym->backend_decl) == PARM_DECL);
7794
7795 if (sym->ts.type == BT_CHARACTER
7796 && !INTEGER_CST_P (sym->ts.u.cl->backend_decl))
7797 {
7798 gfc_conv_string_length (sym->ts.u.cl, NULL, &init);
7799 gfc_trans_vla_type_sizes (sym, &init);
7800 }
7801
7802 /* Dummy, use associated and result variables don't need anything special. */
7803 if (sym->attr.dummy || sym->attr.use_assoc || sym->attr.result)
7804 {
7805 gfc_add_init_cleanup (block, gfc_finish_block (&init), NULL_TREE);
7806 gfc_restore_backend_locus (&loc);
7807 return;
7808 }
7809
7810 descriptor = sym->backend_decl;
7811
7812 /* Although static, derived types with default initializers and
7813 allocatable components must not be nulled wholesale; instead they
7814 are treated component by component. */
7815 if (TREE_STATIC (descriptor) && !sym_has_alloc_comp)
7816 {
7817 /* SAVEd variables are not freed on exit. */
7818 gfc_trans_static_array_pointer (sym);
7819
7820 gfc_add_init_cleanup (block, gfc_finish_block (&init), NULL_TREE);
7821 gfc_restore_backend_locus (&loc);
7822 return;
7823 }
7824
7825 /* Get the descriptor type. */
7826 type = TREE_TYPE (sym->backend_decl);
7827
7828 if (sym_has_alloc_comp && !(sym->attr.pointer || sym->attr.allocatable))
7829 {
7830 if (!sym->attr.save
7831 && !(TREE_STATIC (sym->backend_decl) && sym->attr.is_main_program))
7832 {
7833 if (sym->value == NULL
7834 || !gfc_has_default_initializer (sym->ts.u.derived))
7835 {
7836 rank = sym->as ? sym->as->rank : 0;
7837 tmp = gfc_nullify_alloc_comp (sym->ts.u.derived,
7838 descriptor, rank);
7839 gfc_add_expr_to_block (&init, tmp);
7840 }
7841 else
7842 gfc_init_default_dt (sym, &init, false);
7843 }
7844 }
7845 else if (!GFC_DESCRIPTOR_TYPE_P (type))
7846 {
7847 /* If the backend_decl is not a descriptor, we must have a pointer
7848 to one. */
7849 descriptor = build_fold_indirect_ref_loc (input_location,
7850 sym->backend_decl);
7851 type = TREE_TYPE (descriptor);
7852 }
7853
7854 /* NULLIFY the data pointer. */
7855 if (GFC_DESCRIPTOR_TYPE_P (type) && !sym->attr.save)
7856 gfc_conv_descriptor_data_set (&init, descriptor, null_pointer_node);
7857
7858 gfc_restore_backend_locus (&loc);
7859 gfc_init_block (&cleanup);
7860
7861 /* Allocatable arrays need to be freed when they go out of scope.
7862 The allocatable components of pointers must not be touched. */
7863 if (sym_has_alloc_comp && !(sym->attr.function || sym->attr.result)
7864 && !sym->attr.pointer && !sym->attr.save)
7865 {
7866 int rank;
7867 rank = sym->as ? sym->as->rank : 0;
7868 tmp = gfc_deallocate_alloc_comp (sym->ts.u.derived, descriptor, rank);
7869 gfc_add_expr_to_block (&cleanup, tmp);
7870 }
7871
7872 if (sym->attr.allocatable && (sym->attr.dimension || sym->attr.codimension)
7873 && !sym->attr.save && !sym->attr.result)
7874 {
7875 tmp = gfc_trans_dealloc_allocated (sym->backend_decl);
7876 gfc_add_expr_to_block (&cleanup, tmp);
7877 }
7878
7879 gfc_add_init_cleanup (block, gfc_finish_block (&init),
7880 gfc_finish_block (&cleanup));
7881 }
7882
7883 /************ Expression Walking Functions ******************/
7884
7885 /* Walk a variable reference.
7886
7887 Possible extension - multiple component subscripts.
7888 x(:,:) = foo%a(:)%b(:)
7889 Transforms to
7890 forall (i=..., j=...)
7891 x(i,j) = foo%a(j)%b(i)
7892 end forall
7893 This adds a fair amount of complexity because you need to deal with more
7894 than one ref. Maybe handle in a similar manner to vector subscripts.
7895 Maybe not worth the effort. */
7896
7897
7898 static gfc_ss *
7899 gfc_walk_variable_expr (gfc_ss * ss, gfc_expr * expr)
7900 {
7901 gfc_ref *ref;
7902
7903 for (ref = expr->ref; ref; ref = ref->next)
7904 if (ref->type == REF_ARRAY && ref->u.ar.type != AR_ELEMENT)
7905 break;
7906
7907 return gfc_walk_array_ref (ss, expr, ref);
7908 }
7909
7910
7911 gfc_ss *
7912 gfc_walk_array_ref (gfc_ss * ss, gfc_expr * expr, gfc_ref * ref)
7913 {
7914 gfc_array_ref *ar;
7915 gfc_ss *newss;
7916 int n;
7917
7918 for (; ref; ref = ref->next)
7919 {
7920 if (ref->type == REF_SUBSTRING)
7921 {
7922 ss = gfc_get_scalar_ss (ss, ref->u.ss.start);
7923 ss = gfc_get_scalar_ss (ss, ref->u.ss.end);
7924 }
7925
7926 /* We're only interested in array sections from now on. */
7927 if (ref->type != REF_ARRAY)
7928 continue;
7929
7930 ar = &ref->u.ar;
7931
7932 switch (ar->type)
7933 {
7934 case AR_ELEMENT:
7935 for (n = ar->dimen - 1; n >= 0; n--)
7936 ss = gfc_get_scalar_ss (ss, ar->start[n]);
7937 break;
7938
7939 case AR_FULL:
7940 newss = gfc_get_array_ss (ss, expr, ar->as->rank, GFC_SS_SECTION);
7941 newss->info->data.array.ref = ref;
7942
7943 /* Make sure array is the same as array(:,:), this way
7944 we don't need to special case all the time. */
7945 ar->dimen = ar->as->rank;
7946 for (n = 0; n < ar->dimen; n++)
7947 {
7948 ar->dimen_type[n] = DIMEN_RANGE;
7949
7950 gcc_assert (ar->start[n] == NULL);
7951 gcc_assert (ar->end[n] == NULL);
7952 gcc_assert (ar->stride[n] == NULL);
7953 }
7954 ss = newss;
7955 break;
7956
7957 case AR_SECTION:
7958 newss = gfc_get_array_ss (ss, expr, 0, GFC_SS_SECTION);
7959 newss->info->data.array.ref = ref;
7960
7961 /* We add SS chains for all the subscripts in the section. */
7962 for (n = 0; n < ar->dimen; n++)
7963 {
7964 gfc_ss *indexss;
7965
7966 switch (ar->dimen_type[n])
7967 {
7968 case DIMEN_ELEMENT:
7969 /* Add SS for elemental (scalar) subscripts. */
7970 gcc_assert (ar->start[n]);
7971 indexss = gfc_get_scalar_ss (gfc_ss_terminator, ar->start[n]);
7972 indexss->loop_chain = gfc_ss_terminator;
7973 newss->info->data.array.subscript[n] = indexss;
7974 break;
7975
7976 case DIMEN_RANGE:
7977 /* We don't add anything for sections, just remember this
7978 dimension for later. */
7979 newss->dim[newss->dimen] = n;
7980 newss->dimen++;
7981 break;
7982
7983 case DIMEN_VECTOR:
7984 /* Create a GFC_SS_VECTOR index in which we can store
7985 the vector's descriptor. */
7986 indexss = gfc_get_array_ss (gfc_ss_terminator, ar->start[n],
7987 1, GFC_SS_VECTOR);
7988 indexss->loop_chain = gfc_ss_terminator;
7989 newss->info->data.array.subscript[n] = indexss;
7990 newss->dim[newss->dimen] = n;
7991 newss->dimen++;
7992 break;
7993
7994 default:
7995 /* We should know what sort of section it is by now. */
7996 gcc_unreachable ();
7997 }
7998 }
7999 /* We should have at least one non-elemental dimension,
8000 unless we are creating a descriptor for a (scalar) coarray. */
8001 gcc_assert (newss->dimen > 0
8002 || newss->info->data.array.ref->u.ar.as->corank > 0);
8003 ss = newss;
8004 break;
8005
8006 default:
8007 /* We should know what sort of section it is by now. */
8008 gcc_unreachable ();
8009 }
8010
8011 }
8012 return ss;
8013 }
8014
8015
8016 /* Walk an expression operator. If only one operand of a binary expression is
8017 scalar, we must also add the scalar term to the SS chain. */
8018
8019 static gfc_ss *
8020 gfc_walk_op_expr (gfc_ss * ss, gfc_expr * expr)
8021 {
8022 gfc_ss *head;
8023 gfc_ss *head2;
8024
8025 head = gfc_walk_subexpr (ss, expr->value.op.op1);
8026 if (expr->value.op.op2 == NULL)
8027 head2 = head;
8028 else
8029 head2 = gfc_walk_subexpr (head, expr->value.op.op2);
8030
8031 /* All operands are scalar. Pass back and let the caller deal with it. */
8032 if (head2 == ss)
8033 return head2;
8034
8035 /* All operands require scalarization. */
8036 if (head != ss && (expr->value.op.op2 == NULL || head2 != head))
8037 return head2;
8038
8039 /* One of the operands needs scalarization, the other is scalar.
8040 Create a gfc_ss for the scalar expression. */
8041 if (head == ss)
8042 {
8043 /* First operand is scalar. We build the chain in reverse order, so
8044 add the scalar SS after the second operand. */
8045 head = head2;
8046 while (head && head->next != ss)
8047 head = head->next;
8048 /* Check we haven't somehow broken the chain. */
8049 gcc_assert (head);
8050 head->next = gfc_get_scalar_ss (ss, expr->value.op.op1);
8051 }
8052 else /* head2 == head */
8053 {
8054 gcc_assert (head2 == head);
8055 /* Second operand is scalar. */
8056 head2 = gfc_get_scalar_ss (head2, expr->value.op.op2);
8057 }
8058
8059 return head2;
8060 }
8061
8062
8063 /* Reverse a SS chain. */
8064
8065 gfc_ss *
8066 gfc_reverse_ss (gfc_ss * ss)
8067 {
8068 gfc_ss *next;
8069 gfc_ss *head;
8070
8071 gcc_assert (ss != NULL);
8072
8073 head = gfc_ss_terminator;
8074 while (ss != gfc_ss_terminator)
8075 {
8076 next = ss->next;
8077 /* Check we didn't somehow break the chain. */
8078 gcc_assert (next != NULL);
8079 ss->next = head;
8080 head = ss;
8081 ss = next;
8082 }
8083
8084 return (head);
8085 }
8086
8087
8088 /* Walk the arguments of an elemental function. */
8089
8090 gfc_ss *
8091 gfc_walk_elemental_function_args (gfc_ss * ss, gfc_actual_arglist *arg,
8092 gfc_ss_type type)
8093 {
8094 int scalar;
8095 gfc_ss *head;
8096 gfc_ss *tail;
8097 gfc_ss *newss;
8098
8099 head = gfc_ss_terminator;
8100 tail = NULL;
8101 scalar = 1;
8102 for (; arg; arg = arg->next)
8103 {
8104 if (!arg->expr)
8105 continue;
8106
8107 newss = gfc_walk_subexpr (head, arg->expr);
8108 if (newss == head)
8109 {
8110 /* Scalar argument. */
8111 gcc_assert (type == GFC_SS_SCALAR || type == GFC_SS_REFERENCE);
8112 newss = gfc_get_scalar_ss (head, arg->expr);
8113 newss->info->type = type;
8114 }
8115 else
8116 scalar = 0;
8117
8118 head = newss;
8119 if (!tail)
8120 {
8121 tail = head;
8122 while (tail->next != gfc_ss_terminator)
8123 tail = tail->next;
8124 }
8125 }
8126
8127 if (scalar)
8128 {
8129 /* If all the arguments are scalar we don't need the argument SS. */
8130 gfc_free_ss_chain (head);
8131 /* Pass it back. */
8132 return ss;
8133 }
8134
8135 /* Add it onto the existing chain. */
8136 tail->next = ss;
8137 return head;
8138 }
8139
8140
8141 /* Walk a function call. Scalar functions are passed back, and taken out of
8142 scalarization loops. For elemental functions we walk their arguments.
8143 The result of functions returning arrays is stored in a temporary outside
8144 the loop, so that the function is only called once. Hence we do not need
8145 to walk their arguments. */
8146
8147 static gfc_ss *
8148 gfc_walk_function_expr (gfc_ss * ss, gfc_expr * expr)
8149 {
8150 gfc_intrinsic_sym *isym;
8151 gfc_symbol *sym;
8152 gfc_component *comp = NULL;
8153
8154 isym = expr->value.function.isym;
8155
8156 /* Handle intrinsic functions separately. */
8157 if (isym)
8158 return gfc_walk_intrinsic_function (ss, expr, isym);
8159
8160 sym = expr->value.function.esym;
8161 if (!sym)
8162 sym = expr->symtree->n.sym;
8163
8164 /* A function that returns arrays. */
8165 gfc_is_proc_ptr_comp (expr, &comp);
8166 if ((!comp && gfc_return_by_reference (sym) && sym->result->attr.dimension)
8167 || (comp && comp->attr.dimension))
8168 return gfc_get_array_ss (ss, expr, expr->rank, GFC_SS_FUNCTION);
8169
8170 /* Walk the parameters of an elemental function. For now we always pass
8171 by reference. */
8172 if (sym->attr.elemental)
8173 return gfc_walk_elemental_function_args (ss, expr->value.function.actual,
8174 GFC_SS_REFERENCE);
8175
8176 /* Scalar functions are OK as these are evaluated outside the scalarization
8177 loop. Pass back and let the caller deal with it. */
8178 return ss;
8179 }
8180
8181
8182 /* An array temporary is constructed for array constructors. */
8183
8184 static gfc_ss *
8185 gfc_walk_array_constructor (gfc_ss * ss, gfc_expr * expr)
8186 {
8187 return gfc_get_array_ss (ss, expr, expr->rank, GFC_SS_CONSTRUCTOR);
8188 }
8189
8190
8191 /* Walk an expression. Add walked expressions to the head of the SS chain.
8192 A wholly scalar expression will not be added. */
8193
8194 gfc_ss *
8195 gfc_walk_subexpr (gfc_ss * ss, gfc_expr * expr)
8196 {
8197 gfc_ss *head;
8198
8199 switch (expr->expr_type)
8200 {
8201 case EXPR_VARIABLE:
8202 head = gfc_walk_variable_expr (ss, expr);
8203 return head;
8204
8205 case EXPR_OP:
8206 head = gfc_walk_op_expr (ss, expr);
8207 return head;
8208
8209 case EXPR_FUNCTION:
8210 head = gfc_walk_function_expr (ss, expr);
8211 return head;
8212
8213 case EXPR_CONSTANT:
8214 case EXPR_NULL:
8215 case EXPR_STRUCTURE:
8216 /* Pass back and let the caller deal with it. */
8217 break;
8218
8219 case EXPR_ARRAY:
8220 head = gfc_walk_array_constructor (ss, expr);
8221 return head;
8222
8223 case EXPR_SUBSTRING:
8224 /* Pass back and let the caller deal with it. */
8225 break;
8226
8227 default:
8228 internal_error ("bad expression type during walk (%d)",
8229 expr->expr_type);
8230 }
8231 return ss;
8232 }
8233
8234
8235 /* Entry point for expression walking.
8236 A return value equal to the passed chain means this is
8237 a scalar expression. It is up to the caller to take whatever action is
8238 necessary to translate these. */
8239
8240 gfc_ss *
8241 gfc_walk_expr (gfc_expr * expr)
8242 {
8243 gfc_ss *res;
8244
8245 res = gfc_walk_subexpr (gfc_ss_terminator, expr);
8246 return gfc_reverse_ss (res);
8247 }