]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/stor-layout.c
hwint.c: New.
[thirdparty/gcc.git] / gcc / stor-layout.c
1 /* C-compiler utilities for types and variables storage layout
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1996, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "tree.h"
28 #include "rtl.h"
29 #include "tm_p.h"
30 #include "flags.h"
31 #include "function.h"
32 #include "expr.h"
33 #include "output.h"
34 #include "diagnostic-core.h"
35 #include "ggc.h"
36 #include "target.h"
37 #include "langhooks.h"
38 #include "regs.h"
39 #include "params.h"
40 #include "cgraph.h"
41 #include "tree-inline.h"
42 #include "tree-dump.h"
43 #include "gimple.h"
44
45 /* Data type for the expressions representing sizes of data types.
46 It is the first integer type laid out. */
47 tree sizetype_tab[(int) TYPE_KIND_LAST];
48
49 /* If nonzero, this is an upper limit on alignment of structure fields.
50 The value is measured in bits. */
51 unsigned int maximum_field_alignment = TARGET_DEFAULT_PACK_STRUCT * BITS_PER_UNIT;
52
53 /* Nonzero if all REFERENCE_TYPEs are internal and hence should be allocated
54 in the address spaces' address_mode, not pointer_mode. Set only by
55 internal_reference_types called only by a front end. */
56 static int reference_types_internal = 0;
57
58 static tree self_referential_size (tree);
59 static void finalize_record_size (record_layout_info);
60 static void finalize_type_size (tree);
61 static void place_union_field (record_layout_info, tree);
62 #if defined (PCC_BITFIELD_TYPE_MATTERS) || defined (BITFIELD_NBYTES_LIMITED)
63 static int excess_unit_span (HOST_WIDE_INT, HOST_WIDE_INT, HOST_WIDE_INT,
64 HOST_WIDE_INT, tree);
65 #endif
66 extern void debug_rli (record_layout_info);
67 \f
68 /* SAVE_EXPRs for sizes of types and decls, waiting to be expanded. */
69
70 static GTY(()) VEC(tree,gc) *pending_sizes;
71
72 /* Show that REFERENCE_TYPES are internal and should use address_mode.
73 Called only by front end. */
74
75 void
76 internal_reference_types (void)
77 {
78 reference_types_internal = 1;
79 }
80
81 /* Get a VEC of all the objects put on the pending sizes list. */
82
83 VEC(tree,gc) *
84 get_pending_sizes (void)
85 {
86 VEC(tree,gc) *chain = pending_sizes;
87
88 pending_sizes = 0;
89 return chain;
90 }
91
92 /* Add EXPR to the pending sizes list. */
93
94 void
95 put_pending_size (tree expr)
96 {
97 /* Strip any simple arithmetic from EXPR to see if it has an underlying
98 SAVE_EXPR. */
99 expr = skip_simple_arithmetic (expr);
100
101 if (TREE_CODE (expr) == SAVE_EXPR)
102 VEC_safe_push (tree, gc, pending_sizes, expr);
103 }
104
105 /* Put a chain of objects into the pending sizes list, which must be
106 empty. */
107
108 void
109 put_pending_sizes (VEC(tree,gc) *chain)
110 {
111 gcc_assert (!pending_sizes);
112 pending_sizes = chain;
113 }
114
115 /* Given a size SIZE that may not be a constant, return a SAVE_EXPR
116 to serve as the actual size-expression for a type or decl. */
117
118 tree
119 variable_size (tree size)
120 {
121 tree save;
122
123 /* Obviously. */
124 if (TREE_CONSTANT (size))
125 return size;
126
127 /* If the size is self-referential, we can't make a SAVE_EXPR (see
128 save_expr for the rationale). But we can do something else. */
129 if (CONTAINS_PLACEHOLDER_P (size))
130 return self_referential_size (size);
131
132 /* If the language-processor is to take responsibility for variable-sized
133 items (e.g., languages which have elaboration procedures like Ada),
134 just return SIZE unchanged. */
135 if (lang_hooks.decls.global_bindings_p () < 0)
136 return size;
137
138 size = save_expr (size);
139
140 /* If an array with a variable number of elements is declared, and
141 the elements require destruction, we will emit a cleanup for the
142 array. That cleanup is run both on normal exit from the block
143 and in the exception-handler for the block. Normally, when code
144 is used in both ordinary code and in an exception handler it is
145 `unsaved', i.e., all SAVE_EXPRs are recalculated. However, we do
146 not wish to do that here; the array-size is the same in both
147 places. */
148 save = skip_simple_arithmetic (size);
149
150 if (cfun && cfun->dont_save_pending_sizes_p)
151 /* The front-end doesn't want us to keep a list of the expressions
152 that determine sizes for variable size objects. Trust it. */
153 return size;
154
155 if (lang_hooks.decls.global_bindings_p ())
156 {
157 if (TREE_CONSTANT (size))
158 error ("type size can%'t be explicitly evaluated");
159 else
160 error ("variable-size type declared outside of any function");
161
162 return size_one_node;
163 }
164
165 put_pending_size (save);
166
167 return size;
168 }
169
170 /* An array of functions used for self-referential size computation. */
171 static GTY(()) VEC (tree, gc) *size_functions;
172
173 /* Look inside EXPR into simple arithmetic operations involving constants.
174 Return the outermost non-arithmetic or non-constant node. */
175
176 static tree
177 skip_simple_constant_arithmetic (tree expr)
178 {
179 while (true)
180 {
181 if (UNARY_CLASS_P (expr))
182 expr = TREE_OPERAND (expr, 0);
183 else if (BINARY_CLASS_P (expr))
184 {
185 if (TREE_CONSTANT (TREE_OPERAND (expr, 1)))
186 expr = TREE_OPERAND (expr, 0);
187 else if (TREE_CONSTANT (TREE_OPERAND (expr, 0)))
188 expr = TREE_OPERAND (expr, 1);
189 else
190 break;
191 }
192 else
193 break;
194 }
195
196 return expr;
197 }
198
199 /* Similar to copy_tree_r but do not copy component references involving
200 PLACEHOLDER_EXPRs. These nodes are spotted in find_placeholder_in_expr
201 and substituted in substitute_in_expr. */
202
203 static tree
204 copy_self_referential_tree_r (tree *tp, int *walk_subtrees, void *data)
205 {
206 enum tree_code code = TREE_CODE (*tp);
207
208 /* Stop at types, decls, constants like copy_tree_r. */
209 if (TREE_CODE_CLASS (code) == tcc_type
210 || TREE_CODE_CLASS (code) == tcc_declaration
211 || TREE_CODE_CLASS (code) == tcc_constant)
212 {
213 *walk_subtrees = 0;
214 return NULL_TREE;
215 }
216
217 /* This is the pattern built in ada/make_aligning_type. */
218 else if (code == ADDR_EXPR
219 && TREE_CODE (TREE_OPERAND (*tp, 0)) == PLACEHOLDER_EXPR)
220 {
221 *walk_subtrees = 0;
222 return NULL_TREE;
223 }
224
225 /* Default case: the component reference. */
226 else if (code == COMPONENT_REF)
227 {
228 tree inner;
229 for (inner = TREE_OPERAND (*tp, 0);
230 REFERENCE_CLASS_P (inner);
231 inner = TREE_OPERAND (inner, 0))
232 ;
233
234 if (TREE_CODE (inner) == PLACEHOLDER_EXPR)
235 {
236 *walk_subtrees = 0;
237 return NULL_TREE;
238 }
239 }
240
241 /* We're not supposed to have them in self-referential size trees
242 because we wouldn't properly control when they are evaluated.
243 However, not creating superfluous SAVE_EXPRs requires accurate
244 tracking of readonly-ness all the way down to here, which we
245 cannot always guarantee in practice. So punt in this case. */
246 else if (code == SAVE_EXPR)
247 return error_mark_node;
248
249 return copy_tree_r (tp, walk_subtrees, data);
250 }
251
252 /* Given a SIZE expression that is self-referential, return an equivalent
253 expression to serve as the actual size expression for a type. */
254
255 static tree
256 self_referential_size (tree size)
257 {
258 static unsigned HOST_WIDE_INT fnno = 0;
259 VEC (tree, heap) *self_refs = NULL;
260 tree param_type_list = NULL, param_decl_list = NULL;
261 tree t, ref, return_type, fntype, fnname, fndecl;
262 unsigned int i;
263 char buf[128];
264 VEC(tree,gc) *args = NULL;
265
266 /* Do not factor out simple operations. */
267 t = skip_simple_constant_arithmetic (size);
268 if (TREE_CODE (t) == CALL_EXPR)
269 return size;
270
271 /* Collect the list of self-references in the expression. */
272 find_placeholder_in_expr (size, &self_refs);
273 gcc_assert (VEC_length (tree, self_refs) > 0);
274
275 /* Obtain a private copy of the expression. */
276 t = size;
277 if (walk_tree (&t, copy_self_referential_tree_r, NULL, NULL) != NULL_TREE)
278 return size;
279 size = t;
280
281 /* Build the parameter and argument lists in parallel; also
282 substitute the former for the latter in the expression. */
283 args = VEC_alloc (tree, gc, VEC_length (tree, self_refs));
284 FOR_EACH_VEC_ELT (tree, self_refs, i, ref)
285 {
286 tree subst, param_name, param_type, param_decl;
287
288 if (DECL_P (ref))
289 {
290 /* We shouldn't have true variables here. */
291 gcc_assert (TREE_READONLY (ref));
292 subst = ref;
293 }
294 /* This is the pattern built in ada/make_aligning_type. */
295 else if (TREE_CODE (ref) == ADDR_EXPR)
296 subst = ref;
297 /* Default case: the component reference. */
298 else
299 subst = TREE_OPERAND (ref, 1);
300
301 sprintf (buf, "p%d", i);
302 param_name = get_identifier (buf);
303 param_type = TREE_TYPE (ref);
304 param_decl
305 = build_decl (input_location, PARM_DECL, param_name, param_type);
306 if (targetm.calls.promote_prototypes (NULL_TREE)
307 && INTEGRAL_TYPE_P (param_type)
308 && TYPE_PRECISION (param_type) < TYPE_PRECISION (integer_type_node))
309 DECL_ARG_TYPE (param_decl) = integer_type_node;
310 else
311 DECL_ARG_TYPE (param_decl) = param_type;
312 DECL_ARTIFICIAL (param_decl) = 1;
313 TREE_READONLY (param_decl) = 1;
314
315 size = substitute_in_expr (size, subst, param_decl);
316
317 param_type_list = tree_cons (NULL_TREE, param_type, param_type_list);
318 param_decl_list = chainon (param_decl, param_decl_list);
319 VEC_quick_push (tree, args, ref);
320 }
321
322 VEC_free (tree, heap, self_refs);
323
324 /* Append 'void' to indicate that the number of parameters is fixed. */
325 param_type_list = tree_cons (NULL_TREE, void_type_node, param_type_list);
326
327 /* The 3 lists have been created in reverse order. */
328 param_type_list = nreverse (param_type_list);
329 param_decl_list = nreverse (param_decl_list);
330
331 /* Build the function type. */
332 return_type = TREE_TYPE (size);
333 fntype = build_function_type (return_type, param_type_list);
334
335 /* Build the function declaration. */
336 sprintf (buf, "SZ"HOST_WIDE_INT_PRINT_UNSIGNED, fnno++);
337 fnname = get_file_function_name (buf);
338 fndecl = build_decl (input_location, FUNCTION_DECL, fnname, fntype);
339 for (t = param_decl_list; t; t = DECL_CHAIN (t))
340 DECL_CONTEXT (t) = fndecl;
341 DECL_ARGUMENTS (fndecl) = param_decl_list;
342 DECL_RESULT (fndecl)
343 = build_decl (input_location, RESULT_DECL, 0, return_type);
344 DECL_CONTEXT (DECL_RESULT (fndecl)) = fndecl;
345
346 /* The function has been created by the compiler and we don't
347 want to emit debug info for it. */
348 DECL_ARTIFICIAL (fndecl) = 1;
349 DECL_IGNORED_P (fndecl) = 1;
350
351 /* It is supposed to be "const" and never throw. */
352 TREE_READONLY (fndecl) = 1;
353 TREE_NOTHROW (fndecl) = 1;
354
355 /* We want it to be inlined when this is deemed profitable, as
356 well as discarded if every call has been integrated. */
357 DECL_DECLARED_INLINE_P (fndecl) = 1;
358
359 /* It is made up of a unique return statement. */
360 DECL_INITIAL (fndecl) = make_node (BLOCK);
361 BLOCK_SUPERCONTEXT (DECL_INITIAL (fndecl)) = fndecl;
362 t = build2 (MODIFY_EXPR, return_type, DECL_RESULT (fndecl), size);
363 DECL_SAVED_TREE (fndecl) = build1 (RETURN_EXPR, void_type_node, t);
364 TREE_STATIC (fndecl) = 1;
365
366 /* Put it onto the list of size functions. */
367 VEC_safe_push (tree, gc, size_functions, fndecl);
368
369 /* Replace the original expression with a call to the size function. */
370 return build_call_expr_loc_vec (input_location, fndecl, args);
371 }
372
373 /* Take, queue and compile all the size functions. It is essential that
374 the size functions be gimplified at the very end of the compilation
375 in order to guarantee transparent handling of self-referential sizes.
376 Otherwise the GENERIC inliner would not be able to inline them back
377 at each of their call sites, thus creating artificial non-constant
378 size expressions which would trigger nasty problems later on. */
379
380 void
381 finalize_size_functions (void)
382 {
383 unsigned int i;
384 tree fndecl;
385
386 for (i = 0; VEC_iterate(tree, size_functions, i, fndecl); i++)
387 {
388 dump_function (TDI_original, fndecl);
389 gimplify_function_tree (fndecl);
390 dump_function (TDI_generic, fndecl);
391 cgraph_finalize_function (fndecl, false);
392 }
393
394 VEC_free (tree, gc, size_functions);
395 }
396 \f
397 /* Return the machine mode to use for a nonscalar of SIZE bits. The
398 mode must be in class MCLASS, and have exactly that many value bits;
399 it may have padding as well. If LIMIT is nonzero, modes of wider
400 than MAX_FIXED_MODE_SIZE will not be used. */
401
402 enum machine_mode
403 mode_for_size (unsigned int size, enum mode_class mclass, int limit)
404 {
405 enum machine_mode mode;
406
407 if (limit && size > MAX_FIXED_MODE_SIZE)
408 return BLKmode;
409
410 /* Get the first mode which has this size, in the specified class. */
411 for (mode = GET_CLASS_NARROWEST_MODE (mclass); mode != VOIDmode;
412 mode = GET_MODE_WIDER_MODE (mode))
413 if (GET_MODE_PRECISION (mode) == size)
414 return mode;
415
416 return BLKmode;
417 }
418
419 /* Similar, except passed a tree node. */
420
421 enum machine_mode
422 mode_for_size_tree (const_tree size, enum mode_class mclass, int limit)
423 {
424 unsigned HOST_WIDE_INT uhwi;
425 unsigned int ui;
426
427 if (!host_integerp (size, 1))
428 return BLKmode;
429 uhwi = tree_low_cst (size, 1);
430 ui = uhwi;
431 if (uhwi != ui)
432 return BLKmode;
433 return mode_for_size (ui, mclass, limit);
434 }
435
436 /* Similar, but never return BLKmode; return the narrowest mode that
437 contains at least the requested number of value bits. */
438
439 enum machine_mode
440 smallest_mode_for_size (unsigned int size, enum mode_class mclass)
441 {
442 enum machine_mode mode;
443
444 /* Get the first mode which has at least this size, in the
445 specified class. */
446 for (mode = GET_CLASS_NARROWEST_MODE (mclass); mode != VOIDmode;
447 mode = GET_MODE_WIDER_MODE (mode))
448 if (GET_MODE_PRECISION (mode) >= size)
449 return mode;
450
451 gcc_unreachable ();
452 }
453
454 /* Find an integer mode of the exact same size, or BLKmode on failure. */
455
456 enum machine_mode
457 int_mode_for_mode (enum machine_mode mode)
458 {
459 switch (GET_MODE_CLASS (mode))
460 {
461 case MODE_INT:
462 case MODE_PARTIAL_INT:
463 break;
464
465 case MODE_COMPLEX_INT:
466 case MODE_COMPLEX_FLOAT:
467 case MODE_FLOAT:
468 case MODE_DECIMAL_FLOAT:
469 case MODE_VECTOR_INT:
470 case MODE_VECTOR_FLOAT:
471 case MODE_FRACT:
472 case MODE_ACCUM:
473 case MODE_UFRACT:
474 case MODE_UACCUM:
475 case MODE_VECTOR_FRACT:
476 case MODE_VECTOR_ACCUM:
477 case MODE_VECTOR_UFRACT:
478 case MODE_VECTOR_UACCUM:
479 mode = mode_for_size (GET_MODE_BITSIZE (mode), MODE_INT, 0);
480 break;
481
482 case MODE_RANDOM:
483 if (mode == BLKmode)
484 break;
485
486 /* ... fall through ... */
487
488 case MODE_CC:
489 default:
490 gcc_unreachable ();
491 }
492
493 return mode;
494 }
495
496 /* Find a mode that is suitable for representing a vector with
497 NUNITS elements of mode INNERMODE. Returns BLKmode if there
498 is no suitable mode. */
499
500 enum machine_mode
501 mode_for_vector (enum machine_mode innermode, unsigned nunits)
502 {
503 enum machine_mode mode;
504
505 /* First, look for a supported vector type. */
506 if (SCALAR_FLOAT_MODE_P (innermode))
507 mode = MIN_MODE_VECTOR_FLOAT;
508 else if (SCALAR_FRACT_MODE_P (innermode))
509 mode = MIN_MODE_VECTOR_FRACT;
510 else if (SCALAR_UFRACT_MODE_P (innermode))
511 mode = MIN_MODE_VECTOR_UFRACT;
512 else if (SCALAR_ACCUM_MODE_P (innermode))
513 mode = MIN_MODE_VECTOR_ACCUM;
514 else if (SCALAR_UACCUM_MODE_P (innermode))
515 mode = MIN_MODE_VECTOR_UACCUM;
516 else
517 mode = MIN_MODE_VECTOR_INT;
518
519 /* Do not check vector_mode_supported_p here. We'll do that
520 later in vector_type_mode. */
521 for (; mode != VOIDmode ; mode = GET_MODE_WIDER_MODE (mode))
522 if (GET_MODE_NUNITS (mode) == nunits
523 && GET_MODE_INNER (mode) == innermode)
524 break;
525
526 /* For integers, try mapping it to a same-sized scalar mode. */
527 if (mode == VOIDmode
528 && GET_MODE_CLASS (innermode) == MODE_INT)
529 mode = mode_for_size (nunits * GET_MODE_BITSIZE (innermode),
530 MODE_INT, 0);
531
532 if (mode == VOIDmode
533 || (GET_MODE_CLASS (mode) == MODE_INT
534 && !have_regs_of_mode[mode]))
535 return BLKmode;
536
537 return mode;
538 }
539
540 /* Return the alignment of MODE. This will be bounded by 1 and
541 BIGGEST_ALIGNMENT. */
542
543 unsigned int
544 get_mode_alignment (enum machine_mode mode)
545 {
546 return MIN (BIGGEST_ALIGNMENT, MAX (1, mode_base_align[mode]*BITS_PER_UNIT));
547 }
548
549 \f
550 /* Subroutine of layout_decl: Force alignment required for the data type.
551 But if the decl itself wants greater alignment, don't override that. */
552
553 static inline void
554 do_type_align (tree type, tree decl)
555 {
556 if (TYPE_ALIGN (type) > DECL_ALIGN (decl))
557 {
558 DECL_ALIGN (decl) = TYPE_ALIGN (type);
559 if (TREE_CODE (decl) == FIELD_DECL)
560 DECL_USER_ALIGN (decl) = TYPE_USER_ALIGN (type);
561 }
562 }
563
564 /* Set the size, mode and alignment of a ..._DECL node.
565 TYPE_DECL does need this for C++.
566 Note that LABEL_DECL and CONST_DECL nodes do not need this,
567 and FUNCTION_DECL nodes have them set up in a special (and simple) way.
568 Don't call layout_decl for them.
569
570 KNOWN_ALIGN is the amount of alignment we can assume this
571 decl has with no special effort. It is relevant only for FIELD_DECLs
572 and depends on the previous fields.
573 All that matters about KNOWN_ALIGN is which powers of 2 divide it.
574 If KNOWN_ALIGN is 0, it means, "as much alignment as you like":
575 the record will be aligned to suit. */
576
577 void
578 layout_decl (tree decl, unsigned int known_align)
579 {
580 tree type = TREE_TYPE (decl);
581 enum tree_code code = TREE_CODE (decl);
582 rtx rtl = NULL_RTX;
583 location_t loc = DECL_SOURCE_LOCATION (decl);
584
585 if (code == CONST_DECL)
586 return;
587
588 gcc_assert (code == VAR_DECL || code == PARM_DECL || code == RESULT_DECL
589 || code == TYPE_DECL ||code == FIELD_DECL);
590
591 rtl = DECL_RTL_IF_SET (decl);
592
593 if (type == error_mark_node)
594 type = void_type_node;
595
596 /* Usually the size and mode come from the data type without change,
597 however, the front-end may set the explicit width of the field, so its
598 size may not be the same as the size of its type. This happens with
599 bitfields, of course (an `int' bitfield may be only 2 bits, say), but it
600 also happens with other fields. For example, the C++ front-end creates
601 zero-sized fields corresponding to empty base classes, and depends on
602 layout_type setting DECL_FIELD_BITPOS correctly for the field. Set the
603 size in bytes from the size in bits. If we have already set the mode,
604 don't set it again since we can be called twice for FIELD_DECLs. */
605
606 DECL_UNSIGNED (decl) = TYPE_UNSIGNED (type);
607 if (DECL_MODE (decl) == VOIDmode)
608 DECL_MODE (decl) = TYPE_MODE (type);
609
610 if (DECL_SIZE (decl) == 0)
611 {
612 DECL_SIZE (decl) = TYPE_SIZE (type);
613 DECL_SIZE_UNIT (decl) = TYPE_SIZE_UNIT (type);
614 }
615 else if (DECL_SIZE_UNIT (decl) == 0)
616 DECL_SIZE_UNIT (decl)
617 = fold_convert_loc (loc, sizetype,
618 size_binop_loc (loc, CEIL_DIV_EXPR, DECL_SIZE (decl),
619 bitsize_unit_node));
620
621 if (code != FIELD_DECL)
622 /* For non-fields, update the alignment from the type. */
623 do_type_align (type, decl);
624 else
625 /* For fields, it's a bit more complicated... */
626 {
627 bool old_user_align = DECL_USER_ALIGN (decl);
628 bool zero_bitfield = false;
629 bool packed_p = DECL_PACKED (decl);
630 unsigned int mfa;
631
632 if (DECL_BIT_FIELD (decl))
633 {
634 DECL_BIT_FIELD_TYPE (decl) = type;
635
636 /* A zero-length bit-field affects the alignment of the next
637 field. In essence such bit-fields are not influenced by
638 any packing due to #pragma pack or attribute packed. */
639 if (integer_zerop (DECL_SIZE (decl))
640 && ! targetm.ms_bitfield_layout_p (DECL_FIELD_CONTEXT (decl)))
641 {
642 zero_bitfield = true;
643 packed_p = false;
644 #ifdef PCC_BITFIELD_TYPE_MATTERS
645 if (PCC_BITFIELD_TYPE_MATTERS)
646 do_type_align (type, decl);
647 else
648 #endif
649 {
650 #ifdef EMPTY_FIELD_BOUNDARY
651 if (EMPTY_FIELD_BOUNDARY > DECL_ALIGN (decl))
652 {
653 DECL_ALIGN (decl) = EMPTY_FIELD_BOUNDARY;
654 DECL_USER_ALIGN (decl) = 0;
655 }
656 #endif
657 }
658 }
659
660 /* See if we can use an ordinary integer mode for a bit-field.
661 Conditions are: a fixed size that is correct for another mode,
662 occupying a complete byte or bytes on proper boundary,
663 and not volatile or not -fstrict-volatile-bitfields. */
664 if (TYPE_SIZE (type) != 0
665 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
666 && GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
667 && !(TREE_THIS_VOLATILE (decl)
668 && flag_strict_volatile_bitfields > 0))
669 {
670 enum machine_mode xmode
671 = mode_for_size_tree (DECL_SIZE (decl), MODE_INT, 1);
672 unsigned int xalign = GET_MODE_ALIGNMENT (xmode);
673
674 if (xmode != BLKmode
675 && !(xalign > BITS_PER_UNIT && DECL_PACKED (decl))
676 && (known_align == 0 || known_align >= xalign))
677 {
678 DECL_ALIGN (decl) = MAX (xalign, DECL_ALIGN (decl));
679 DECL_MODE (decl) = xmode;
680 DECL_BIT_FIELD (decl) = 0;
681 }
682 }
683
684 /* Turn off DECL_BIT_FIELD if we won't need it set. */
685 if (TYPE_MODE (type) == BLKmode && DECL_MODE (decl) == BLKmode
686 && known_align >= TYPE_ALIGN (type)
687 && DECL_ALIGN (decl) >= TYPE_ALIGN (type))
688 DECL_BIT_FIELD (decl) = 0;
689 }
690 else if (packed_p && DECL_USER_ALIGN (decl))
691 /* Don't touch DECL_ALIGN. For other packed fields, go ahead and
692 round up; we'll reduce it again below. We want packing to
693 supersede USER_ALIGN inherited from the type, but defer to
694 alignment explicitly specified on the field decl. */;
695 else
696 do_type_align (type, decl);
697
698 /* If the field is packed and not explicitly aligned, give it the
699 minimum alignment. Note that do_type_align may set
700 DECL_USER_ALIGN, so we need to check old_user_align instead. */
701 if (packed_p
702 && !old_user_align)
703 DECL_ALIGN (decl) = MIN (DECL_ALIGN (decl), BITS_PER_UNIT);
704
705 if (! packed_p && ! DECL_USER_ALIGN (decl))
706 {
707 /* Some targets (i.e. i386, VMS) limit struct field alignment
708 to a lower boundary than alignment of variables unless
709 it was overridden by attribute aligned. */
710 #ifdef BIGGEST_FIELD_ALIGNMENT
711 DECL_ALIGN (decl)
712 = MIN (DECL_ALIGN (decl), (unsigned) BIGGEST_FIELD_ALIGNMENT);
713 #endif
714 #ifdef ADJUST_FIELD_ALIGN
715 DECL_ALIGN (decl) = ADJUST_FIELD_ALIGN (decl, DECL_ALIGN (decl));
716 #endif
717 }
718
719 if (zero_bitfield)
720 mfa = initial_max_fld_align * BITS_PER_UNIT;
721 else
722 mfa = maximum_field_alignment;
723 /* Should this be controlled by DECL_USER_ALIGN, too? */
724 if (mfa != 0)
725 DECL_ALIGN (decl) = MIN (DECL_ALIGN (decl), mfa);
726 }
727
728 /* Evaluate nonconstant size only once, either now or as soon as safe. */
729 if (DECL_SIZE (decl) != 0 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
730 DECL_SIZE (decl) = variable_size (DECL_SIZE (decl));
731 if (DECL_SIZE_UNIT (decl) != 0
732 && TREE_CODE (DECL_SIZE_UNIT (decl)) != INTEGER_CST)
733 DECL_SIZE_UNIT (decl) = variable_size (DECL_SIZE_UNIT (decl));
734
735 /* If requested, warn about definitions of large data objects. */
736 if (warn_larger_than
737 && (code == VAR_DECL || code == PARM_DECL)
738 && ! DECL_EXTERNAL (decl))
739 {
740 tree size = DECL_SIZE_UNIT (decl);
741
742 if (size != 0 && TREE_CODE (size) == INTEGER_CST
743 && compare_tree_int (size, larger_than_size) > 0)
744 {
745 int size_as_int = TREE_INT_CST_LOW (size);
746
747 if (compare_tree_int (size, size_as_int) == 0)
748 warning (OPT_Wlarger_than_, "size of %q+D is %d bytes", decl, size_as_int);
749 else
750 warning (OPT_Wlarger_than_, "size of %q+D is larger than %wd bytes",
751 decl, larger_than_size);
752 }
753 }
754
755 /* If the RTL was already set, update its mode and mem attributes. */
756 if (rtl)
757 {
758 PUT_MODE (rtl, DECL_MODE (decl));
759 SET_DECL_RTL (decl, 0);
760 set_mem_attributes (rtl, decl, 1);
761 SET_DECL_RTL (decl, rtl);
762 }
763 }
764
765 /* Given a VAR_DECL, PARM_DECL or RESULT_DECL, clears the results of
766 a previous call to layout_decl and calls it again. */
767
768 void
769 relayout_decl (tree decl)
770 {
771 DECL_SIZE (decl) = DECL_SIZE_UNIT (decl) = 0;
772 DECL_MODE (decl) = VOIDmode;
773 if (!DECL_USER_ALIGN (decl))
774 DECL_ALIGN (decl) = 0;
775 SET_DECL_RTL (decl, 0);
776
777 layout_decl (decl, 0);
778 }
779 \f
780 /* Begin laying out type T, which may be a RECORD_TYPE, UNION_TYPE, or
781 QUAL_UNION_TYPE. Return a pointer to a struct record_layout_info which
782 is to be passed to all other layout functions for this record. It is the
783 responsibility of the caller to call `free' for the storage returned.
784 Note that garbage collection is not permitted until we finish laying
785 out the record. */
786
787 record_layout_info
788 start_record_layout (tree t)
789 {
790 record_layout_info rli = XNEW (struct record_layout_info_s);
791
792 rli->t = t;
793
794 /* If the type has a minimum specified alignment (via an attribute
795 declaration, for example) use it -- otherwise, start with a
796 one-byte alignment. */
797 rli->record_align = MAX (BITS_PER_UNIT, TYPE_ALIGN (t));
798 rli->unpacked_align = rli->record_align;
799 rli->offset_align = MAX (rli->record_align, BIGGEST_ALIGNMENT);
800
801 #ifdef STRUCTURE_SIZE_BOUNDARY
802 /* Packed structures don't need to have minimum size. */
803 if (! TYPE_PACKED (t))
804 {
805 unsigned tmp;
806
807 /* #pragma pack overrides STRUCTURE_SIZE_BOUNDARY. */
808 tmp = (unsigned) STRUCTURE_SIZE_BOUNDARY;
809 if (maximum_field_alignment != 0)
810 tmp = MIN (tmp, maximum_field_alignment);
811 rli->record_align = MAX (rli->record_align, tmp);
812 }
813 #endif
814
815 rli->offset = size_zero_node;
816 rli->bitpos = bitsize_zero_node;
817 rli->prev_field = 0;
818 rli->pending_statics = NULL;
819 rli->packed_maybe_necessary = 0;
820 rli->remaining_in_alignment = 0;
821
822 return rli;
823 }
824
825 /* These four routines perform computations that convert between
826 the offset/bitpos forms and byte and bit offsets. */
827
828 tree
829 bit_from_pos (tree offset, tree bitpos)
830 {
831 return size_binop (PLUS_EXPR, bitpos,
832 size_binop (MULT_EXPR,
833 fold_convert (bitsizetype, offset),
834 bitsize_unit_node));
835 }
836
837 tree
838 byte_from_pos (tree offset, tree bitpos)
839 {
840 return size_binop (PLUS_EXPR, offset,
841 fold_convert (sizetype,
842 size_binop (TRUNC_DIV_EXPR, bitpos,
843 bitsize_unit_node)));
844 }
845
846 void
847 pos_from_bit (tree *poffset, tree *pbitpos, unsigned int off_align,
848 tree pos)
849 {
850 *poffset = size_binop (MULT_EXPR,
851 fold_convert (sizetype,
852 size_binop (FLOOR_DIV_EXPR, pos,
853 bitsize_int (off_align))),
854 size_int (off_align / BITS_PER_UNIT));
855 *pbitpos = size_binop (FLOOR_MOD_EXPR, pos, bitsize_int (off_align));
856 }
857
858 /* Given a pointer to bit and byte offsets and an offset alignment,
859 normalize the offsets so they are within the alignment. */
860
861 void
862 normalize_offset (tree *poffset, tree *pbitpos, unsigned int off_align)
863 {
864 /* If the bit position is now larger than it should be, adjust it
865 downwards. */
866 if (compare_tree_int (*pbitpos, off_align) >= 0)
867 {
868 tree extra_aligns = size_binop (FLOOR_DIV_EXPR, *pbitpos,
869 bitsize_int (off_align));
870
871 *poffset
872 = size_binop (PLUS_EXPR, *poffset,
873 size_binop (MULT_EXPR,
874 fold_convert (sizetype, extra_aligns),
875 size_int (off_align / BITS_PER_UNIT)));
876
877 *pbitpos
878 = size_binop (FLOOR_MOD_EXPR, *pbitpos, bitsize_int (off_align));
879 }
880 }
881
882 /* Print debugging information about the information in RLI. */
883
884 DEBUG_FUNCTION void
885 debug_rli (record_layout_info rli)
886 {
887 print_node_brief (stderr, "type", rli->t, 0);
888 print_node_brief (stderr, "\noffset", rli->offset, 0);
889 print_node_brief (stderr, " bitpos", rli->bitpos, 0);
890
891 fprintf (stderr, "\naligns: rec = %u, unpack = %u, off = %u\n",
892 rli->record_align, rli->unpacked_align,
893 rli->offset_align);
894
895 /* The ms_struct code is the only that uses this. */
896 if (targetm.ms_bitfield_layout_p (rli->t))
897 fprintf (stderr, "remaining in alignment = %u\n", rli->remaining_in_alignment);
898
899 if (rli->packed_maybe_necessary)
900 fprintf (stderr, "packed may be necessary\n");
901
902 if (!VEC_empty (tree, rli->pending_statics))
903 {
904 fprintf (stderr, "pending statics:\n");
905 debug_vec_tree (rli->pending_statics);
906 }
907 }
908
909 /* Given an RLI with a possibly-incremented BITPOS, adjust OFFSET and
910 BITPOS if necessary to keep BITPOS below OFFSET_ALIGN. */
911
912 void
913 normalize_rli (record_layout_info rli)
914 {
915 normalize_offset (&rli->offset, &rli->bitpos, rli->offset_align);
916 }
917
918 /* Returns the size in bytes allocated so far. */
919
920 tree
921 rli_size_unit_so_far (record_layout_info rli)
922 {
923 return byte_from_pos (rli->offset, rli->bitpos);
924 }
925
926 /* Returns the size in bits allocated so far. */
927
928 tree
929 rli_size_so_far (record_layout_info rli)
930 {
931 return bit_from_pos (rli->offset, rli->bitpos);
932 }
933
934 /* FIELD is about to be added to RLI->T. The alignment (in bits) of
935 the next available location within the record is given by KNOWN_ALIGN.
936 Update the variable alignment fields in RLI, and return the alignment
937 to give the FIELD. */
938
939 unsigned int
940 update_alignment_for_field (record_layout_info rli, tree field,
941 unsigned int known_align)
942 {
943 /* The alignment required for FIELD. */
944 unsigned int desired_align;
945 /* The type of this field. */
946 tree type = TREE_TYPE (field);
947 /* True if the field was explicitly aligned by the user. */
948 bool user_align;
949 bool is_bitfield;
950
951 /* Do not attempt to align an ERROR_MARK node */
952 if (TREE_CODE (type) == ERROR_MARK)
953 return 0;
954
955 /* Lay out the field so we know what alignment it needs. */
956 layout_decl (field, known_align);
957 desired_align = DECL_ALIGN (field);
958 user_align = DECL_USER_ALIGN (field);
959
960 is_bitfield = (type != error_mark_node
961 && DECL_BIT_FIELD_TYPE (field)
962 && ! integer_zerop (TYPE_SIZE (type)));
963
964 /* Record must have at least as much alignment as any field.
965 Otherwise, the alignment of the field within the record is
966 meaningless. */
967 if (targetm.ms_bitfield_layout_p (rli->t))
968 {
969 /* Here, the alignment of the underlying type of a bitfield can
970 affect the alignment of a record; even a zero-sized field
971 can do this. The alignment should be to the alignment of
972 the type, except that for zero-size bitfields this only
973 applies if there was an immediately prior, nonzero-size
974 bitfield. (That's the way it is, experimentally.) */
975 if ((!is_bitfield && !DECL_PACKED (field))
976 || (!integer_zerop (DECL_SIZE (field))
977 ? !DECL_PACKED (field)
978 : (rli->prev_field
979 && DECL_BIT_FIELD_TYPE (rli->prev_field)
980 && ! integer_zerop (DECL_SIZE (rli->prev_field)))))
981 {
982 unsigned int type_align = TYPE_ALIGN (type);
983 type_align = MAX (type_align, desired_align);
984 if (maximum_field_alignment != 0)
985 type_align = MIN (type_align, maximum_field_alignment);
986 rli->record_align = MAX (rli->record_align, type_align);
987 rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type));
988 }
989 }
990 #ifdef PCC_BITFIELD_TYPE_MATTERS
991 else if (is_bitfield && PCC_BITFIELD_TYPE_MATTERS)
992 {
993 /* Named bit-fields cause the entire structure to have the
994 alignment implied by their type. Some targets also apply the same
995 rules to unnamed bitfields. */
996 if (DECL_NAME (field) != 0
997 || targetm.align_anon_bitfield ())
998 {
999 unsigned int type_align = TYPE_ALIGN (type);
1000
1001 #ifdef ADJUST_FIELD_ALIGN
1002 if (! TYPE_USER_ALIGN (type))
1003 type_align = ADJUST_FIELD_ALIGN (field, type_align);
1004 #endif
1005
1006 /* Targets might chose to handle unnamed and hence possibly
1007 zero-width bitfield. Those are not influenced by #pragmas
1008 or packed attributes. */
1009 if (integer_zerop (DECL_SIZE (field)))
1010 {
1011 if (initial_max_fld_align)
1012 type_align = MIN (type_align,
1013 initial_max_fld_align * BITS_PER_UNIT);
1014 }
1015 else if (maximum_field_alignment != 0)
1016 type_align = MIN (type_align, maximum_field_alignment);
1017 else if (DECL_PACKED (field))
1018 type_align = MIN (type_align, BITS_PER_UNIT);
1019
1020 /* The alignment of the record is increased to the maximum
1021 of the current alignment, the alignment indicated on the
1022 field (i.e., the alignment specified by an __aligned__
1023 attribute), and the alignment indicated by the type of
1024 the field. */
1025 rli->record_align = MAX (rli->record_align, desired_align);
1026 rli->record_align = MAX (rli->record_align, type_align);
1027
1028 if (warn_packed)
1029 rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type));
1030 user_align |= TYPE_USER_ALIGN (type);
1031 }
1032 }
1033 #endif
1034 else
1035 {
1036 rli->record_align = MAX (rli->record_align, desired_align);
1037 rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type));
1038 }
1039
1040 TYPE_USER_ALIGN (rli->t) |= user_align;
1041
1042 return desired_align;
1043 }
1044
1045 /* Called from place_field to handle unions. */
1046
1047 static void
1048 place_union_field (record_layout_info rli, tree field)
1049 {
1050 update_alignment_for_field (rli, field, /*known_align=*/0);
1051
1052 DECL_FIELD_OFFSET (field) = size_zero_node;
1053 DECL_FIELD_BIT_OFFSET (field) = bitsize_zero_node;
1054 SET_DECL_OFFSET_ALIGN (field, BIGGEST_ALIGNMENT);
1055
1056 /* If this is an ERROR_MARK return *after* having set the
1057 field at the start of the union. This helps when parsing
1058 invalid fields. */
1059 if (TREE_CODE (TREE_TYPE (field)) == ERROR_MARK)
1060 return;
1061
1062 /* We assume the union's size will be a multiple of a byte so we don't
1063 bother with BITPOS. */
1064 if (TREE_CODE (rli->t) == UNION_TYPE)
1065 rli->offset = size_binop (MAX_EXPR, rli->offset, DECL_SIZE_UNIT (field));
1066 else if (TREE_CODE (rli->t) == QUAL_UNION_TYPE)
1067 rli->offset = fold_build3 (COND_EXPR, sizetype, DECL_QUALIFIER (field),
1068 DECL_SIZE_UNIT (field), rli->offset);
1069 }
1070
1071 #if defined (PCC_BITFIELD_TYPE_MATTERS) || defined (BITFIELD_NBYTES_LIMITED)
1072 /* A bitfield of SIZE with a required access alignment of ALIGN is allocated
1073 at BYTE_OFFSET / BIT_OFFSET. Return nonzero if the field would span more
1074 units of alignment than the underlying TYPE. */
1075 static int
1076 excess_unit_span (HOST_WIDE_INT byte_offset, HOST_WIDE_INT bit_offset,
1077 HOST_WIDE_INT size, HOST_WIDE_INT align, tree type)
1078 {
1079 /* Note that the calculation of OFFSET might overflow; we calculate it so
1080 that we still get the right result as long as ALIGN is a power of two. */
1081 unsigned HOST_WIDE_INT offset = byte_offset * BITS_PER_UNIT + bit_offset;
1082
1083 offset = offset % align;
1084 return ((offset + size + align - 1) / align
1085 > ((unsigned HOST_WIDE_INT) tree_low_cst (TYPE_SIZE (type), 1)
1086 / align));
1087 }
1088 #endif
1089
1090 /* RLI contains information about the layout of a RECORD_TYPE. FIELD
1091 is a FIELD_DECL to be added after those fields already present in
1092 T. (FIELD is not actually added to the TYPE_FIELDS list here;
1093 callers that desire that behavior must manually perform that step.) */
1094
1095 void
1096 place_field (record_layout_info rli, tree field)
1097 {
1098 /* The alignment required for FIELD. */
1099 unsigned int desired_align;
1100 /* The alignment FIELD would have if we just dropped it into the
1101 record as it presently stands. */
1102 unsigned int known_align;
1103 unsigned int actual_align;
1104 /* The type of this field. */
1105 tree type = TREE_TYPE (field);
1106
1107 gcc_assert (TREE_CODE (field) != ERROR_MARK);
1108
1109 /* If FIELD is static, then treat it like a separate variable, not
1110 really like a structure field. If it is a FUNCTION_DECL, it's a
1111 method. In both cases, all we do is lay out the decl, and we do
1112 it *after* the record is laid out. */
1113 if (TREE_CODE (field) == VAR_DECL)
1114 {
1115 VEC_safe_push (tree, gc, rli->pending_statics, field);
1116 return;
1117 }
1118
1119 /* Enumerators and enum types which are local to this class need not
1120 be laid out. Likewise for initialized constant fields. */
1121 else if (TREE_CODE (field) != FIELD_DECL)
1122 return;
1123
1124 /* Unions are laid out very differently than records, so split
1125 that code off to another function. */
1126 else if (TREE_CODE (rli->t) != RECORD_TYPE)
1127 {
1128 place_union_field (rli, field);
1129 return;
1130 }
1131
1132 else if (TREE_CODE (type) == ERROR_MARK)
1133 {
1134 /* Place this field at the current allocation position, so we
1135 maintain monotonicity. */
1136 DECL_FIELD_OFFSET (field) = rli->offset;
1137 DECL_FIELD_BIT_OFFSET (field) = rli->bitpos;
1138 SET_DECL_OFFSET_ALIGN (field, rli->offset_align);
1139 return;
1140 }
1141
1142 /* Work out the known alignment so far. Note that A & (-A) is the
1143 value of the least-significant bit in A that is one. */
1144 if (! integer_zerop (rli->bitpos))
1145 known_align = (tree_low_cst (rli->bitpos, 1)
1146 & - tree_low_cst (rli->bitpos, 1));
1147 else if (integer_zerop (rli->offset))
1148 known_align = 0;
1149 else if (host_integerp (rli->offset, 1))
1150 known_align = (BITS_PER_UNIT
1151 * (tree_low_cst (rli->offset, 1)
1152 & - tree_low_cst (rli->offset, 1)));
1153 else
1154 known_align = rli->offset_align;
1155
1156 desired_align = update_alignment_for_field (rli, field, known_align);
1157 if (known_align == 0)
1158 known_align = MAX (BIGGEST_ALIGNMENT, rli->record_align);
1159
1160 if (warn_packed && DECL_PACKED (field))
1161 {
1162 if (known_align >= TYPE_ALIGN (type))
1163 {
1164 if (TYPE_ALIGN (type) > desired_align)
1165 {
1166 if (STRICT_ALIGNMENT)
1167 warning (OPT_Wattributes, "packed attribute causes "
1168 "inefficient alignment for %q+D", field);
1169 /* Don't warn if DECL_PACKED was set by the type. */
1170 else if (!TYPE_PACKED (rli->t))
1171 warning (OPT_Wattributes, "packed attribute is "
1172 "unnecessary for %q+D", field);
1173 }
1174 }
1175 else
1176 rli->packed_maybe_necessary = 1;
1177 }
1178
1179 /* Does this field automatically have alignment it needs by virtue
1180 of the fields that precede it and the record's own alignment?
1181 We already align ms_struct fields, so don't re-align them. */
1182 if (known_align < desired_align
1183 && !targetm.ms_bitfield_layout_p (rli->t))
1184 {
1185 /* No, we need to skip space before this field.
1186 Bump the cumulative size to multiple of field alignment. */
1187
1188 if (DECL_SOURCE_LOCATION (field) != BUILTINS_LOCATION)
1189 warning (OPT_Wpadded, "padding struct to align %q+D", field);
1190
1191 /* If the alignment is still within offset_align, just align
1192 the bit position. */
1193 if (desired_align < rli->offset_align)
1194 rli->bitpos = round_up (rli->bitpos, desired_align);
1195 else
1196 {
1197 /* First adjust OFFSET by the partial bits, then align. */
1198 rli->offset
1199 = size_binop (PLUS_EXPR, rli->offset,
1200 fold_convert (sizetype,
1201 size_binop (CEIL_DIV_EXPR, rli->bitpos,
1202 bitsize_unit_node)));
1203 rli->bitpos = bitsize_zero_node;
1204
1205 rli->offset = round_up (rli->offset, desired_align / BITS_PER_UNIT);
1206 }
1207
1208 if (! TREE_CONSTANT (rli->offset))
1209 rli->offset_align = desired_align;
1210
1211 }
1212
1213 /* Handle compatibility with PCC. Note that if the record has any
1214 variable-sized fields, we need not worry about compatibility. */
1215 #ifdef PCC_BITFIELD_TYPE_MATTERS
1216 if (PCC_BITFIELD_TYPE_MATTERS
1217 && ! targetm.ms_bitfield_layout_p (rli->t)
1218 && TREE_CODE (field) == FIELD_DECL
1219 && type != error_mark_node
1220 && DECL_BIT_FIELD (field)
1221 && (! DECL_PACKED (field)
1222 /* Enter for these packed fields only to issue a warning. */
1223 || TYPE_ALIGN (type) <= BITS_PER_UNIT)
1224 && maximum_field_alignment == 0
1225 && ! integer_zerop (DECL_SIZE (field))
1226 && host_integerp (DECL_SIZE (field), 1)
1227 && host_integerp (rli->offset, 1)
1228 && host_integerp (TYPE_SIZE (type), 1))
1229 {
1230 unsigned int type_align = TYPE_ALIGN (type);
1231 tree dsize = DECL_SIZE (field);
1232 HOST_WIDE_INT field_size = tree_low_cst (dsize, 1);
1233 HOST_WIDE_INT offset = tree_low_cst (rli->offset, 0);
1234 HOST_WIDE_INT bit_offset = tree_low_cst (rli->bitpos, 0);
1235
1236 #ifdef ADJUST_FIELD_ALIGN
1237 if (! TYPE_USER_ALIGN (type))
1238 type_align = ADJUST_FIELD_ALIGN (field, type_align);
1239 #endif
1240
1241 /* A bit field may not span more units of alignment of its type
1242 than its type itself. Advance to next boundary if necessary. */
1243 if (excess_unit_span (offset, bit_offset, field_size, type_align, type))
1244 {
1245 if (DECL_PACKED (field))
1246 {
1247 if (warn_packed_bitfield_compat == 1)
1248 inform
1249 (input_location,
1250 "offset of packed bit-field %qD has changed in GCC 4.4",
1251 field);
1252 }
1253 else
1254 rli->bitpos = round_up (rli->bitpos, type_align);
1255 }
1256
1257 if (! DECL_PACKED (field))
1258 TYPE_USER_ALIGN (rli->t) |= TYPE_USER_ALIGN (type);
1259 }
1260 #endif
1261
1262 #ifdef BITFIELD_NBYTES_LIMITED
1263 if (BITFIELD_NBYTES_LIMITED
1264 && ! targetm.ms_bitfield_layout_p (rli->t)
1265 && TREE_CODE (field) == FIELD_DECL
1266 && type != error_mark_node
1267 && DECL_BIT_FIELD_TYPE (field)
1268 && ! DECL_PACKED (field)
1269 && ! integer_zerop (DECL_SIZE (field))
1270 && host_integerp (DECL_SIZE (field), 1)
1271 && host_integerp (rli->offset, 1)
1272 && host_integerp (TYPE_SIZE (type), 1))
1273 {
1274 unsigned int type_align = TYPE_ALIGN (type);
1275 tree dsize = DECL_SIZE (field);
1276 HOST_WIDE_INT field_size = tree_low_cst (dsize, 1);
1277 HOST_WIDE_INT offset = tree_low_cst (rli->offset, 0);
1278 HOST_WIDE_INT bit_offset = tree_low_cst (rli->bitpos, 0);
1279
1280 #ifdef ADJUST_FIELD_ALIGN
1281 if (! TYPE_USER_ALIGN (type))
1282 type_align = ADJUST_FIELD_ALIGN (field, type_align);
1283 #endif
1284
1285 if (maximum_field_alignment != 0)
1286 type_align = MIN (type_align, maximum_field_alignment);
1287 /* ??? This test is opposite the test in the containing if
1288 statement, so this code is unreachable currently. */
1289 else if (DECL_PACKED (field))
1290 type_align = MIN (type_align, BITS_PER_UNIT);
1291
1292 /* A bit field may not span the unit of alignment of its type.
1293 Advance to next boundary if necessary. */
1294 if (excess_unit_span (offset, bit_offset, field_size, type_align, type))
1295 rli->bitpos = round_up (rli->bitpos, type_align);
1296
1297 TYPE_USER_ALIGN (rli->t) |= TYPE_USER_ALIGN (type);
1298 }
1299 #endif
1300
1301 /* See the docs for TARGET_MS_BITFIELD_LAYOUT_P for details.
1302 A subtlety:
1303 When a bit field is inserted into a packed record, the whole
1304 size of the underlying type is used by one or more same-size
1305 adjacent bitfields. (That is, if its long:3, 32 bits is
1306 used in the record, and any additional adjacent long bitfields are
1307 packed into the same chunk of 32 bits. However, if the size
1308 changes, a new field of that size is allocated.) In an unpacked
1309 record, this is the same as using alignment, but not equivalent
1310 when packing.
1311
1312 Note: for compatibility, we use the type size, not the type alignment
1313 to determine alignment, since that matches the documentation */
1314
1315 if (targetm.ms_bitfield_layout_p (rli->t))
1316 {
1317 tree prev_saved = rli->prev_field;
1318 tree prev_type = prev_saved ? DECL_BIT_FIELD_TYPE (prev_saved) : NULL;
1319
1320 /* This is a bitfield if it exists. */
1321 if (rli->prev_field)
1322 {
1323 /* If both are bitfields, nonzero, and the same size, this is
1324 the middle of a run. Zero declared size fields are special
1325 and handled as "end of run". (Note: it's nonzero declared
1326 size, but equal type sizes!) (Since we know that both
1327 the current and previous fields are bitfields by the
1328 time we check it, DECL_SIZE must be present for both.) */
1329 if (DECL_BIT_FIELD_TYPE (field)
1330 && !integer_zerop (DECL_SIZE (field))
1331 && !integer_zerop (DECL_SIZE (rli->prev_field))
1332 && host_integerp (DECL_SIZE (rli->prev_field), 0)
1333 && host_integerp (TYPE_SIZE (type), 0)
1334 && simple_cst_equal (TYPE_SIZE (type), TYPE_SIZE (prev_type)))
1335 {
1336 /* We're in the middle of a run of equal type size fields; make
1337 sure we realign if we run out of bits. (Not decl size,
1338 type size!) */
1339 HOST_WIDE_INT bitsize = tree_low_cst (DECL_SIZE (field), 1);
1340
1341 if (rli->remaining_in_alignment < bitsize)
1342 {
1343 HOST_WIDE_INT typesize = tree_low_cst (TYPE_SIZE (type), 1);
1344
1345 /* out of bits; bump up to next 'word'. */
1346 rli->bitpos
1347 = size_binop (PLUS_EXPR, rli->bitpos,
1348 bitsize_int (rli->remaining_in_alignment));
1349 rli->prev_field = field;
1350 if (typesize < bitsize)
1351 rli->remaining_in_alignment = 0;
1352 else
1353 rli->remaining_in_alignment = typesize - bitsize;
1354 }
1355 else
1356 rli->remaining_in_alignment -= bitsize;
1357 }
1358 else
1359 {
1360 /* End of a run: if leaving a run of bitfields of the same type
1361 size, we have to "use up" the rest of the bits of the type
1362 size.
1363
1364 Compute the new position as the sum of the size for the prior
1365 type and where we first started working on that type.
1366 Note: since the beginning of the field was aligned then
1367 of course the end will be too. No round needed. */
1368
1369 if (!integer_zerop (DECL_SIZE (rli->prev_field)))
1370 {
1371 rli->bitpos
1372 = size_binop (PLUS_EXPR, rli->bitpos,
1373 bitsize_int (rli->remaining_in_alignment));
1374 }
1375 else
1376 /* We "use up" size zero fields; the code below should behave
1377 as if the prior field was not a bitfield. */
1378 prev_saved = NULL;
1379
1380 /* Cause a new bitfield to be captured, either this time (if
1381 currently a bitfield) or next time we see one. */
1382 if (!DECL_BIT_FIELD_TYPE(field)
1383 || integer_zerop (DECL_SIZE (field)))
1384 rli->prev_field = NULL;
1385 }
1386
1387 normalize_rli (rli);
1388 }
1389
1390 /* If we're starting a new run of same size type bitfields
1391 (or a run of non-bitfields), set up the "first of the run"
1392 fields.
1393
1394 That is, if the current field is not a bitfield, or if there
1395 was a prior bitfield the type sizes differ, or if there wasn't
1396 a prior bitfield the size of the current field is nonzero.
1397
1398 Note: we must be sure to test ONLY the type size if there was
1399 a prior bitfield and ONLY for the current field being zero if
1400 there wasn't. */
1401
1402 if (!DECL_BIT_FIELD_TYPE (field)
1403 || (prev_saved != NULL
1404 ? !simple_cst_equal (TYPE_SIZE (type), TYPE_SIZE (prev_type))
1405 : !integer_zerop (DECL_SIZE (field)) ))
1406 {
1407 /* Never smaller than a byte for compatibility. */
1408 unsigned int type_align = BITS_PER_UNIT;
1409
1410 /* (When not a bitfield), we could be seeing a flex array (with
1411 no DECL_SIZE). Since we won't be using remaining_in_alignment
1412 until we see a bitfield (and come by here again) we just skip
1413 calculating it. */
1414 if (DECL_SIZE (field) != NULL
1415 && host_integerp (TYPE_SIZE (TREE_TYPE (field)), 1)
1416 && host_integerp (DECL_SIZE (field), 1))
1417 {
1418 unsigned HOST_WIDE_INT bitsize
1419 = tree_low_cst (DECL_SIZE (field), 1);
1420 unsigned HOST_WIDE_INT typesize
1421 = tree_low_cst (TYPE_SIZE (TREE_TYPE (field)), 1);
1422
1423 if (typesize < bitsize)
1424 rli->remaining_in_alignment = 0;
1425 else
1426 rli->remaining_in_alignment = typesize - bitsize;
1427 }
1428
1429 /* Now align (conventionally) for the new type. */
1430 type_align = TYPE_ALIGN (TREE_TYPE (field));
1431
1432 if (maximum_field_alignment != 0)
1433 type_align = MIN (type_align, maximum_field_alignment);
1434
1435 rli->bitpos = round_up (rli->bitpos, type_align);
1436
1437 /* If we really aligned, don't allow subsequent bitfields
1438 to undo that. */
1439 rli->prev_field = NULL;
1440 }
1441 }
1442
1443 /* Offset so far becomes the position of this field after normalizing. */
1444 normalize_rli (rli);
1445 DECL_FIELD_OFFSET (field) = rli->offset;
1446 DECL_FIELD_BIT_OFFSET (field) = rli->bitpos;
1447 SET_DECL_OFFSET_ALIGN (field, rli->offset_align);
1448
1449 /* If this field ended up more aligned than we thought it would be (we
1450 approximate this by seeing if its position changed), lay out the field
1451 again; perhaps we can use an integral mode for it now. */
1452 if (! integer_zerop (DECL_FIELD_BIT_OFFSET (field)))
1453 actual_align = (tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1)
1454 & - tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1));
1455 else if (integer_zerop (DECL_FIELD_OFFSET (field)))
1456 actual_align = MAX (BIGGEST_ALIGNMENT, rli->record_align);
1457 else if (host_integerp (DECL_FIELD_OFFSET (field), 1))
1458 actual_align = (BITS_PER_UNIT
1459 * (tree_low_cst (DECL_FIELD_OFFSET (field), 1)
1460 & - tree_low_cst (DECL_FIELD_OFFSET (field), 1)));
1461 else
1462 actual_align = DECL_OFFSET_ALIGN (field);
1463 /* ACTUAL_ALIGN is still the actual alignment *within the record* .
1464 store / extract bit field operations will check the alignment of the
1465 record against the mode of bit fields. */
1466
1467 if (known_align != actual_align)
1468 layout_decl (field, actual_align);
1469
1470 if (rli->prev_field == NULL && DECL_BIT_FIELD_TYPE (field))
1471 rli->prev_field = field;
1472
1473 /* Now add size of this field to the size of the record. If the size is
1474 not constant, treat the field as being a multiple of bytes and just
1475 adjust the offset, resetting the bit position. Otherwise, apportion the
1476 size amongst the bit position and offset. First handle the case of an
1477 unspecified size, which can happen when we have an invalid nested struct
1478 definition, such as struct j { struct j { int i; } }. The error message
1479 is printed in finish_struct. */
1480 if (DECL_SIZE (field) == 0)
1481 /* Do nothing. */;
1482 else if (TREE_CODE (DECL_SIZE (field)) != INTEGER_CST
1483 || TREE_OVERFLOW (DECL_SIZE (field)))
1484 {
1485 rli->offset
1486 = size_binop (PLUS_EXPR, rli->offset,
1487 fold_convert (sizetype,
1488 size_binop (CEIL_DIV_EXPR, rli->bitpos,
1489 bitsize_unit_node)));
1490 rli->offset
1491 = size_binop (PLUS_EXPR, rli->offset, DECL_SIZE_UNIT (field));
1492 rli->bitpos = bitsize_zero_node;
1493 rli->offset_align = MIN (rli->offset_align, desired_align);
1494 }
1495 else if (targetm.ms_bitfield_layout_p (rli->t))
1496 {
1497 rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos, DECL_SIZE (field));
1498
1499 /* If we ended a bitfield before the full length of the type then
1500 pad the struct out to the full length of the last type. */
1501 if ((DECL_CHAIN (field) == NULL
1502 || TREE_CODE (DECL_CHAIN (field)) != FIELD_DECL)
1503 && DECL_BIT_FIELD_TYPE (field)
1504 && !integer_zerop (DECL_SIZE (field)))
1505 rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos,
1506 bitsize_int (rli->remaining_in_alignment));
1507
1508 normalize_rli (rli);
1509 }
1510 else
1511 {
1512 rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos, DECL_SIZE (field));
1513 normalize_rli (rli);
1514 }
1515 }
1516
1517 /* Assuming that all the fields have been laid out, this function uses
1518 RLI to compute the final TYPE_SIZE, TYPE_ALIGN, etc. for the type
1519 indicated by RLI. */
1520
1521 static void
1522 finalize_record_size (record_layout_info rli)
1523 {
1524 tree unpadded_size, unpadded_size_unit;
1525
1526 /* Now we want just byte and bit offsets, so set the offset alignment
1527 to be a byte and then normalize. */
1528 rli->offset_align = BITS_PER_UNIT;
1529 normalize_rli (rli);
1530
1531 /* Determine the desired alignment. */
1532 #ifdef ROUND_TYPE_ALIGN
1533 TYPE_ALIGN (rli->t) = ROUND_TYPE_ALIGN (rli->t, TYPE_ALIGN (rli->t),
1534 rli->record_align);
1535 #else
1536 TYPE_ALIGN (rli->t) = MAX (TYPE_ALIGN (rli->t), rli->record_align);
1537 #endif
1538
1539 /* Compute the size so far. Be sure to allow for extra bits in the
1540 size in bytes. We have guaranteed above that it will be no more
1541 than a single byte. */
1542 unpadded_size = rli_size_so_far (rli);
1543 unpadded_size_unit = rli_size_unit_so_far (rli);
1544 if (! integer_zerop (rli->bitpos))
1545 unpadded_size_unit
1546 = size_binop (PLUS_EXPR, unpadded_size_unit, size_one_node);
1547
1548 /* Round the size up to be a multiple of the required alignment. */
1549 TYPE_SIZE (rli->t) = round_up (unpadded_size, TYPE_ALIGN (rli->t));
1550 TYPE_SIZE_UNIT (rli->t)
1551 = round_up (unpadded_size_unit, TYPE_ALIGN_UNIT (rli->t));
1552
1553 if (TREE_CONSTANT (unpadded_size)
1554 && simple_cst_equal (unpadded_size, TYPE_SIZE (rli->t)) == 0
1555 && input_location != BUILTINS_LOCATION)
1556 warning (OPT_Wpadded, "padding struct size to alignment boundary");
1557
1558 if (warn_packed && TREE_CODE (rli->t) == RECORD_TYPE
1559 && TYPE_PACKED (rli->t) && ! rli->packed_maybe_necessary
1560 && TREE_CONSTANT (unpadded_size))
1561 {
1562 tree unpacked_size;
1563
1564 #ifdef ROUND_TYPE_ALIGN
1565 rli->unpacked_align
1566 = ROUND_TYPE_ALIGN (rli->t, TYPE_ALIGN (rli->t), rli->unpacked_align);
1567 #else
1568 rli->unpacked_align = MAX (TYPE_ALIGN (rli->t), rli->unpacked_align);
1569 #endif
1570
1571 unpacked_size = round_up (TYPE_SIZE (rli->t), rli->unpacked_align);
1572 if (simple_cst_equal (unpacked_size, TYPE_SIZE (rli->t)))
1573 {
1574 if (TYPE_NAME (rli->t))
1575 {
1576 tree name;
1577
1578 if (TREE_CODE (TYPE_NAME (rli->t)) == IDENTIFIER_NODE)
1579 name = TYPE_NAME (rli->t);
1580 else
1581 name = DECL_NAME (TYPE_NAME (rli->t));
1582
1583 if (STRICT_ALIGNMENT)
1584 warning (OPT_Wpacked, "packed attribute causes inefficient "
1585 "alignment for %qE", name);
1586 else
1587 warning (OPT_Wpacked,
1588 "packed attribute is unnecessary for %qE", name);
1589 }
1590 else
1591 {
1592 if (STRICT_ALIGNMENT)
1593 warning (OPT_Wpacked,
1594 "packed attribute causes inefficient alignment");
1595 else
1596 warning (OPT_Wpacked, "packed attribute is unnecessary");
1597 }
1598 }
1599 }
1600 }
1601
1602 /* Compute the TYPE_MODE for the TYPE (which is a RECORD_TYPE). */
1603
1604 void
1605 compute_record_mode (tree type)
1606 {
1607 tree field;
1608 enum machine_mode mode = VOIDmode;
1609
1610 /* Most RECORD_TYPEs have BLKmode, so we start off assuming that.
1611 However, if possible, we use a mode that fits in a register
1612 instead, in order to allow for better optimization down the
1613 line. */
1614 SET_TYPE_MODE (type, BLKmode);
1615
1616 if (! host_integerp (TYPE_SIZE (type), 1))
1617 return;
1618
1619 /* A record which has any BLKmode members must itself be
1620 BLKmode; it can't go in a register. Unless the member is
1621 BLKmode only because it isn't aligned. */
1622 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
1623 {
1624 if (TREE_CODE (field) != FIELD_DECL)
1625 continue;
1626
1627 if (TREE_CODE (TREE_TYPE (field)) == ERROR_MARK
1628 || (TYPE_MODE (TREE_TYPE (field)) == BLKmode
1629 && ! TYPE_NO_FORCE_BLK (TREE_TYPE (field))
1630 && !(TYPE_SIZE (TREE_TYPE (field)) != 0
1631 && integer_zerop (TYPE_SIZE (TREE_TYPE (field)))))
1632 || ! host_integerp (bit_position (field), 1)
1633 || DECL_SIZE (field) == 0
1634 || ! host_integerp (DECL_SIZE (field), 1))
1635 return;
1636
1637 /* If this field is the whole struct, remember its mode so
1638 that, say, we can put a double in a class into a DF
1639 register instead of forcing it to live in the stack. */
1640 if (simple_cst_equal (TYPE_SIZE (type), DECL_SIZE (field)))
1641 mode = DECL_MODE (field);
1642
1643 #ifdef MEMBER_TYPE_FORCES_BLK
1644 /* With some targets, eg. c4x, it is sub-optimal
1645 to access an aligned BLKmode structure as a scalar. */
1646
1647 if (MEMBER_TYPE_FORCES_BLK (field, mode))
1648 return;
1649 #endif /* MEMBER_TYPE_FORCES_BLK */
1650 }
1651
1652 /* If we only have one real field; use its mode if that mode's size
1653 matches the type's size. This only applies to RECORD_TYPE. This
1654 does not apply to unions. */
1655 if (TREE_CODE (type) == RECORD_TYPE && mode != VOIDmode
1656 && host_integerp (TYPE_SIZE (type), 1)
1657 && GET_MODE_BITSIZE (mode) == TREE_INT_CST_LOW (TYPE_SIZE (type)))
1658 SET_TYPE_MODE (type, mode);
1659 else
1660 SET_TYPE_MODE (type, mode_for_size_tree (TYPE_SIZE (type), MODE_INT, 1));
1661
1662 /* If structure's known alignment is less than what the scalar
1663 mode would need, and it matters, then stick with BLKmode. */
1664 if (TYPE_MODE (type) != BLKmode
1665 && STRICT_ALIGNMENT
1666 && ! (TYPE_ALIGN (type) >= BIGGEST_ALIGNMENT
1667 || TYPE_ALIGN (type) >= GET_MODE_ALIGNMENT (TYPE_MODE (type))))
1668 {
1669 /* If this is the only reason this type is BLKmode, then
1670 don't force containing types to be BLKmode. */
1671 TYPE_NO_FORCE_BLK (type) = 1;
1672 SET_TYPE_MODE (type, BLKmode);
1673 }
1674 }
1675
1676 /* Compute TYPE_SIZE and TYPE_ALIGN for TYPE, once it has been laid
1677 out. */
1678
1679 static void
1680 finalize_type_size (tree type)
1681 {
1682 /* Normally, use the alignment corresponding to the mode chosen.
1683 However, where strict alignment is not required, avoid
1684 over-aligning structures, since most compilers do not do this
1685 alignment. */
1686
1687 if (TYPE_MODE (type) != BLKmode && TYPE_MODE (type) != VOIDmode
1688 && (STRICT_ALIGNMENT
1689 || (TREE_CODE (type) != RECORD_TYPE && TREE_CODE (type) != UNION_TYPE
1690 && TREE_CODE (type) != QUAL_UNION_TYPE
1691 && TREE_CODE (type) != ARRAY_TYPE)))
1692 {
1693 unsigned mode_align = GET_MODE_ALIGNMENT (TYPE_MODE (type));
1694
1695 /* Don't override a larger alignment requirement coming from a user
1696 alignment of one of the fields. */
1697 if (mode_align >= TYPE_ALIGN (type))
1698 {
1699 TYPE_ALIGN (type) = mode_align;
1700 TYPE_USER_ALIGN (type) = 0;
1701 }
1702 }
1703
1704 /* Do machine-dependent extra alignment. */
1705 #ifdef ROUND_TYPE_ALIGN
1706 TYPE_ALIGN (type)
1707 = ROUND_TYPE_ALIGN (type, TYPE_ALIGN (type), BITS_PER_UNIT);
1708 #endif
1709
1710 /* If we failed to find a simple way to calculate the unit size
1711 of the type, find it by division. */
1712 if (TYPE_SIZE_UNIT (type) == 0 && TYPE_SIZE (type) != 0)
1713 /* TYPE_SIZE (type) is computed in bitsizetype. After the division, the
1714 result will fit in sizetype. We will get more efficient code using
1715 sizetype, so we force a conversion. */
1716 TYPE_SIZE_UNIT (type)
1717 = fold_convert (sizetype,
1718 size_binop (FLOOR_DIV_EXPR, TYPE_SIZE (type),
1719 bitsize_unit_node));
1720
1721 if (TYPE_SIZE (type) != 0)
1722 {
1723 TYPE_SIZE (type) = round_up (TYPE_SIZE (type), TYPE_ALIGN (type));
1724 TYPE_SIZE_UNIT (type)
1725 = round_up (TYPE_SIZE_UNIT (type), TYPE_ALIGN_UNIT (type));
1726 }
1727
1728 /* Evaluate nonconstant sizes only once, either now or as soon as safe. */
1729 if (TYPE_SIZE (type) != 0 && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
1730 TYPE_SIZE (type) = variable_size (TYPE_SIZE (type));
1731 if (TYPE_SIZE_UNIT (type) != 0
1732 && TREE_CODE (TYPE_SIZE_UNIT (type)) != INTEGER_CST)
1733 TYPE_SIZE_UNIT (type) = variable_size (TYPE_SIZE_UNIT (type));
1734
1735 /* Also layout any other variants of the type. */
1736 if (TYPE_NEXT_VARIANT (type)
1737 || type != TYPE_MAIN_VARIANT (type))
1738 {
1739 tree variant;
1740 /* Record layout info of this variant. */
1741 tree size = TYPE_SIZE (type);
1742 tree size_unit = TYPE_SIZE_UNIT (type);
1743 unsigned int align = TYPE_ALIGN (type);
1744 unsigned int user_align = TYPE_USER_ALIGN (type);
1745 enum machine_mode mode = TYPE_MODE (type);
1746
1747 /* Copy it into all variants. */
1748 for (variant = TYPE_MAIN_VARIANT (type);
1749 variant != 0;
1750 variant = TYPE_NEXT_VARIANT (variant))
1751 {
1752 TYPE_SIZE (variant) = size;
1753 TYPE_SIZE_UNIT (variant) = size_unit;
1754 TYPE_ALIGN (variant) = align;
1755 TYPE_USER_ALIGN (variant) = user_align;
1756 SET_TYPE_MODE (variant, mode);
1757 }
1758 }
1759 }
1760
1761 /* Do all of the work required to layout the type indicated by RLI,
1762 once the fields have been laid out. This function will call `free'
1763 for RLI, unless FREE_P is false. Passing a value other than false
1764 for FREE_P is bad practice; this option only exists to support the
1765 G++ 3.2 ABI. */
1766
1767 void
1768 finish_record_layout (record_layout_info rli, int free_p)
1769 {
1770 tree variant;
1771
1772 /* Compute the final size. */
1773 finalize_record_size (rli);
1774
1775 /* Compute the TYPE_MODE for the record. */
1776 compute_record_mode (rli->t);
1777
1778 /* Perform any last tweaks to the TYPE_SIZE, etc. */
1779 finalize_type_size (rli->t);
1780
1781 /* Propagate TYPE_PACKED to variants. With C++ templates,
1782 handle_packed_attribute is too early to do this. */
1783 for (variant = TYPE_NEXT_VARIANT (rli->t); variant;
1784 variant = TYPE_NEXT_VARIANT (variant))
1785 TYPE_PACKED (variant) = TYPE_PACKED (rli->t);
1786
1787 /* Lay out any static members. This is done now because their type
1788 may use the record's type. */
1789 while (!VEC_empty (tree, rli->pending_statics))
1790 layout_decl (VEC_pop (tree, rli->pending_statics), 0);
1791
1792 /* Clean up. */
1793 if (free_p)
1794 {
1795 VEC_free (tree, gc, rli->pending_statics);
1796 free (rli);
1797 }
1798 }
1799 \f
1800
1801 /* Finish processing a builtin RECORD_TYPE type TYPE. It's name is
1802 NAME, its fields are chained in reverse on FIELDS.
1803
1804 If ALIGN_TYPE is non-null, it is given the same alignment as
1805 ALIGN_TYPE. */
1806
1807 void
1808 finish_builtin_struct (tree type, const char *name, tree fields,
1809 tree align_type)
1810 {
1811 tree tail, next;
1812
1813 for (tail = NULL_TREE; fields; tail = fields, fields = next)
1814 {
1815 DECL_FIELD_CONTEXT (fields) = type;
1816 next = DECL_CHAIN (fields);
1817 DECL_CHAIN (fields) = tail;
1818 }
1819 TYPE_FIELDS (type) = tail;
1820
1821 if (align_type)
1822 {
1823 TYPE_ALIGN (type) = TYPE_ALIGN (align_type);
1824 TYPE_USER_ALIGN (type) = TYPE_USER_ALIGN (align_type);
1825 }
1826
1827 layout_type (type);
1828 #if 0 /* not yet, should get fixed properly later */
1829 TYPE_NAME (type) = make_type_decl (get_identifier (name), type);
1830 #else
1831 TYPE_NAME (type) = build_decl (BUILTINS_LOCATION,
1832 TYPE_DECL, get_identifier (name), type);
1833 #endif
1834 TYPE_STUB_DECL (type) = TYPE_NAME (type);
1835 layout_decl (TYPE_NAME (type), 0);
1836 }
1837
1838 /* Calculate the mode, size, and alignment for TYPE.
1839 For an array type, calculate the element separation as well.
1840 Record TYPE on the chain of permanent or temporary types
1841 so that dbxout will find out about it.
1842
1843 TYPE_SIZE of a type is nonzero if the type has been laid out already.
1844 layout_type does nothing on such a type.
1845
1846 If the type is incomplete, its TYPE_SIZE remains zero. */
1847
1848 void
1849 layout_type (tree type)
1850 {
1851 gcc_assert (type);
1852
1853 if (type == error_mark_node)
1854 return;
1855
1856 /* Do nothing if type has been laid out before. */
1857 if (TYPE_SIZE (type))
1858 return;
1859
1860 switch (TREE_CODE (type))
1861 {
1862 case LANG_TYPE:
1863 /* This kind of type is the responsibility
1864 of the language-specific code. */
1865 gcc_unreachable ();
1866
1867 case BOOLEAN_TYPE: /* Used for Java, Pascal, and Chill. */
1868 if (TYPE_PRECISION (type) == 0)
1869 TYPE_PRECISION (type) = 1; /* default to one byte/boolean. */
1870
1871 /* ... fall through ... */
1872
1873 case INTEGER_TYPE:
1874 case ENUMERAL_TYPE:
1875 if (TREE_CODE (TYPE_MIN_VALUE (type)) == INTEGER_CST
1876 && tree_int_cst_sgn (TYPE_MIN_VALUE (type)) >= 0)
1877 TYPE_UNSIGNED (type) = 1;
1878
1879 SET_TYPE_MODE (type,
1880 smallest_mode_for_size (TYPE_PRECISION (type), MODE_INT));
1881 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
1882 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
1883 break;
1884
1885 case REAL_TYPE:
1886 SET_TYPE_MODE (type,
1887 mode_for_size (TYPE_PRECISION (type), MODE_FLOAT, 0));
1888 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
1889 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
1890 break;
1891
1892 case FIXED_POINT_TYPE:
1893 /* TYPE_MODE (type) has been set already. */
1894 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
1895 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
1896 break;
1897
1898 case COMPLEX_TYPE:
1899 TYPE_UNSIGNED (type) = TYPE_UNSIGNED (TREE_TYPE (type));
1900 SET_TYPE_MODE (type,
1901 mode_for_size (2 * TYPE_PRECISION (TREE_TYPE (type)),
1902 (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE
1903 ? MODE_COMPLEX_FLOAT : MODE_COMPLEX_INT),
1904 0));
1905 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
1906 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
1907 break;
1908
1909 case VECTOR_TYPE:
1910 {
1911 int nunits = TYPE_VECTOR_SUBPARTS (type);
1912 tree innertype = TREE_TYPE (type);
1913
1914 gcc_assert (!(nunits & (nunits - 1)));
1915
1916 /* Find an appropriate mode for the vector type. */
1917 if (TYPE_MODE (type) == VOIDmode)
1918 SET_TYPE_MODE (type,
1919 mode_for_vector (TYPE_MODE (innertype), nunits));
1920
1921 TYPE_SATURATING (type) = TYPE_SATURATING (TREE_TYPE (type));
1922 TYPE_UNSIGNED (type) = TYPE_UNSIGNED (TREE_TYPE (type));
1923 TYPE_SIZE_UNIT (type) = int_const_binop (MULT_EXPR,
1924 TYPE_SIZE_UNIT (innertype),
1925 size_int (nunits), 0);
1926 TYPE_SIZE (type) = int_const_binop (MULT_EXPR, TYPE_SIZE (innertype),
1927 bitsize_int (nunits), 0);
1928
1929 /* Always naturally align vectors. This prevents ABI changes
1930 depending on whether or not native vector modes are supported. */
1931 TYPE_ALIGN (type) = tree_low_cst (TYPE_SIZE (type), 0);
1932 break;
1933 }
1934
1935 case VOID_TYPE:
1936 /* This is an incomplete type and so doesn't have a size. */
1937 TYPE_ALIGN (type) = 1;
1938 TYPE_USER_ALIGN (type) = 0;
1939 SET_TYPE_MODE (type, VOIDmode);
1940 break;
1941
1942 case OFFSET_TYPE:
1943 TYPE_SIZE (type) = bitsize_int (POINTER_SIZE);
1944 TYPE_SIZE_UNIT (type) = size_int (POINTER_SIZE / BITS_PER_UNIT);
1945 /* A pointer might be MODE_PARTIAL_INT,
1946 but ptrdiff_t must be integral. */
1947 SET_TYPE_MODE (type, mode_for_size (POINTER_SIZE, MODE_INT, 0));
1948 TYPE_PRECISION (type) = POINTER_SIZE;
1949 break;
1950
1951 case FUNCTION_TYPE:
1952 case METHOD_TYPE:
1953 /* It's hard to see what the mode and size of a function ought to
1954 be, but we do know the alignment is FUNCTION_BOUNDARY, so
1955 make it consistent with that. */
1956 SET_TYPE_MODE (type, mode_for_size (FUNCTION_BOUNDARY, MODE_INT, 0));
1957 TYPE_SIZE (type) = bitsize_int (FUNCTION_BOUNDARY);
1958 TYPE_SIZE_UNIT (type) = size_int (FUNCTION_BOUNDARY / BITS_PER_UNIT);
1959 break;
1960
1961 case POINTER_TYPE:
1962 case REFERENCE_TYPE:
1963 {
1964 enum machine_mode mode = TYPE_MODE (type);
1965 if (TREE_CODE (type) == REFERENCE_TYPE && reference_types_internal)
1966 {
1967 addr_space_t as = TYPE_ADDR_SPACE (TREE_TYPE (type));
1968 mode = targetm.addr_space.address_mode (as);
1969 }
1970
1971 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (mode));
1972 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (mode));
1973 TYPE_UNSIGNED (type) = 1;
1974 TYPE_PRECISION (type) = GET_MODE_BITSIZE (mode);
1975 }
1976 break;
1977
1978 case ARRAY_TYPE:
1979 {
1980 tree index = TYPE_DOMAIN (type);
1981 tree element = TREE_TYPE (type);
1982
1983 build_pointer_type (element);
1984
1985 /* We need to know both bounds in order to compute the size. */
1986 if (index && TYPE_MAX_VALUE (index) && TYPE_MIN_VALUE (index)
1987 && TYPE_SIZE (element))
1988 {
1989 tree ub = TYPE_MAX_VALUE (index);
1990 tree lb = TYPE_MIN_VALUE (index);
1991 tree element_size = TYPE_SIZE (element);
1992 tree length;
1993
1994 /* Make sure that an array of zero-sized element is zero-sized
1995 regardless of its extent. */
1996 if (integer_zerop (element_size))
1997 length = size_zero_node;
1998
1999 /* The initial subtraction should happen in the original type so
2000 that (possible) negative values are handled appropriately. */
2001 else
2002 length
2003 = size_binop (PLUS_EXPR, size_one_node,
2004 fold_convert (sizetype,
2005 fold_build2 (MINUS_EXPR,
2006 TREE_TYPE (lb),
2007 ub, lb)));
2008
2009 TYPE_SIZE (type) = size_binop (MULT_EXPR, element_size,
2010 fold_convert (bitsizetype,
2011 length));
2012
2013 /* If we know the size of the element, calculate the total size
2014 directly, rather than do some division thing below. This
2015 optimization helps Fortran assumed-size arrays (where the
2016 size of the array is determined at runtime) substantially. */
2017 if (TYPE_SIZE_UNIT (element))
2018 TYPE_SIZE_UNIT (type)
2019 = size_binop (MULT_EXPR, TYPE_SIZE_UNIT (element), length);
2020 }
2021
2022 /* Now round the alignment and size,
2023 using machine-dependent criteria if any. */
2024
2025 #ifdef ROUND_TYPE_ALIGN
2026 TYPE_ALIGN (type)
2027 = ROUND_TYPE_ALIGN (type, TYPE_ALIGN (element), BITS_PER_UNIT);
2028 #else
2029 TYPE_ALIGN (type) = MAX (TYPE_ALIGN (element), BITS_PER_UNIT);
2030 #endif
2031 if (!TYPE_SIZE (element))
2032 /* We don't know the size of the underlying element type, so
2033 our alignment calculations will be wrong, forcing us to
2034 fall back on structural equality. */
2035 SET_TYPE_STRUCTURAL_EQUALITY (type);
2036 TYPE_USER_ALIGN (type) = TYPE_USER_ALIGN (element);
2037 SET_TYPE_MODE (type, BLKmode);
2038 if (TYPE_SIZE (type) != 0
2039 #ifdef MEMBER_TYPE_FORCES_BLK
2040 && ! MEMBER_TYPE_FORCES_BLK (type, VOIDmode)
2041 #endif
2042 /* BLKmode elements force BLKmode aggregate;
2043 else extract/store fields may lose. */
2044 && (TYPE_MODE (TREE_TYPE (type)) != BLKmode
2045 || TYPE_NO_FORCE_BLK (TREE_TYPE (type))))
2046 {
2047 /* One-element arrays get the component type's mode. */
2048 if (simple_cst_equal (TYPE_SIZE (type),
2049 TYPE_SIZE (TREE_TYPE (type))))
2050 SET_TYPE_MODE (type, TYPE_MODE (TREE_TYPE (type)));
2051 else
2052 SET_TYPE_MODE (type, mode_for_size_tree (TYPE_SIZE (type),
2053 MODE_INT, 1));
2054
2055 if (TYPE_MODE (type) != BLKmode
2056 && STRICT_ALIGNMENT && TYPE_ALIGN (type) < BIGGEST_ALIGNMENT
2057 && TYPE_ALIGN (type) < GET_MODE_ALIGNMENT (TYPE_MODE (type)))
2058 {
2059 TYPE_NO_FORCE_BLK (type) = 1;
2060 SET_TYPE_MODE (type, BLKmode);
2061 }
2062 }
2063 /* When the element size is constant, check that it is at least as
2064 large as the element alignment. */
2065 if (TYPE_SIZE_UNIT (element)
2066 && TREE_CODE (TYPE_SIZE_UNIT (element)) == INTEGER_CST
2067 /* If TYPE_SIZE_UNIT overflowed, then it is certainly larger than
2068 TYPE_ALIGN_UNIT. */
2069 && !TREE_OVERFLOW (TYPE_SIZE_UNIT (element))
2070 && !integer_zerop (TYPE_SIZE_UNIT (element))
2071 && compare_tree_int (TYPE_SIZE_UNIT (element),
2072 TYPE_ALIGN_UNIT (element)) < 0)
2073 error ("alignment of array elements is greater than element size");
2074 break;
2075 }
2076
2077 case RECORD_TYPE:
2078 case UNION_TYPE:
2079 case QUAL_UNION_TYPE:
2080 {
2081 tree field;
2082 record_layout_info rli;
2083
2084 /* Initialize the layout information. */
2085 rli = start_record_layout (type);
2086
2087 /* If this is a QUAL_UNION_TYPE, we want to process the fields
2088 in the reverse order in building the COND_EXPR that denotes
2089 its size. We reverse them again later. */
2090 if (TREE_CODE (type) == QUAL_UNION_TYPE)
2091 TYPE_FIELDS (type) = nreverse (TYPE_FIELDS (type));
2092
2093 /* Place all the fields. */
2094 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
2095 place_field (rli, field);
2096
2097 if (TREE_CODE (type) == QUAL_UNION_TYPE)
2098 TYPE_FIELDS (type) = nreverse (TYPE_FIELDS (type));
2099
2100 /* Finish laying out the record. */
2101 finish_record_layout (rli, /*free_p=*/true);
2102 }
2103 break;
2104
2105 default:
2106 gcc_unreachable ();
2107 }
2108
2109 /* Compute the final TYPE_SIZE, TYPE_ALIGN, etc. for TYPE. For
2110 records and unions, finish_record_layout already called this
2111 function. */
2112 if (TREE_CODE (type) != RECORD_TYPE
2113 && TREE_CODE (type) != UNION_TYPE
2114 && TREE_CODE (type) != QUAL_UNION_TYPE)
2115 finalize_type_size (type);
2116
2117 /* We should never see alias sets on incomplete aggregates. And we
2118 should not call layout_type on not incomplete aggregates. */
2119 if (AGGREGATE_TYPE_P (type))
2120 gcc_assert (!TYPE_ALIAS_SET_KNOWN_P (type));
2121 }
2122
2123 /* Vector types need to re-check the target flags each time we report
2124 the machine mode. We need to do this because attribute target can
2125 change the result of vector_mode_supported_p and have_regs_of_mode
2126 on a per-function basis. Thus the TYPE_MODE of a VECTOR_TYPE can
2127 change on a per-function basis. */
2128 /* ??? Possibly a better solution is to run through all the types
2129 referenced by a function and re-compute the TYPE_MODE once, rather
2130 than make the TYPE_MODE macro call a function. */
2131
2132 enum machine_mode
2133 vector_type_mode (const_tree t)
2134 {
2135 enum machine_mode mode;
2136
2137 gcc_assert (TREE_CODE (t) == VECTOR_TYPE);
2138
2139 mode = t->type.mode;
2140 if (VECTOR_MODE_P (mode)
2141 && (!targetm.vector_mode_supported_p (mode)
2142 || !have_regs_of_mode[mode]))
2143 {
2144 enum machine_mode innermode = TREE_TYPE (t)->type.mode;
2145
2146 /* For integers, try mapping it to a same-sized scalar mode. */
2147 if (GET_MODE_CLASS (innermode) == MODE_INT)
2148 {
2149 mode = mode_for_size (TYPE_VECTOR_SUBPARTS (t)
2150 * GET_MODE_BITSIZE (innermode), MODE_INT, 0);
2151
2152 if (mode != VOIDmode && have_regs_of_mode[mode])
2153 return mode;
2154 }
2155
2156 return BLKmode;
2157 }
2158
2159 return mode;
2160 }
2161 \f
2162 /* Create and return a type for signed integers of PRECISION bits. */
2163
2164 tree
2165 make_signed_type (int precision)
2166 {
2167 tree type = make_node (INTEGER_TYPE);
2168
2169 TYPE_PRECISION (type) = precision;
2170
2171 fixup_signed_type (type);
2172 return type;
2173 }
2174
2175 /* Create and return a type for unsigned integers of PRECISION bits. */
2176
2177 tree
2178 make_unsigned_type (int precision)
2179 {
2180 tree type = make_node (INTEGER_TYPE);
2181
2182 TYPE_PRECISION (type) = precision;
2183
2184 fixup_unsigned_type (type);
2185 return type;
2186 }
2187 \f
2188 /* Create and return a type for fract of PRECISION bits, UNSIGNEDP,
2189 and SATP. */
2190
2191 tree
2192 make_fract_type (int precision, int unsignedp, int satp)
2193 {
2194 tree type = make_node (FIXED_POINT_TYPE);
2195
2196 TYPE_PRECISION (type) = precision;
2197
2198 if (satp)
2199 TYPE_SATURATING (type) = 1;
2200
2201 /* Lay out the type: set its alignment, size, etc. */
2202 if (unsignedp)
2203 {
2204 TYPE_UNSIGNED (type) = 1;
2205 SET_TYPE_MODE (type, mode_for_size (precision, MODE_UFRACT, 0));
2206 }
2207 else
2208 SET_TYPE_MODE (type, mode_for_size (precision, MODE_FRACT, 0));
2209 layout_type (type);
2210
2211 return type;
2212 }
2213
2214 /* Create and return a type for accum of PRECISION bits, UNSIGNEDP,
2215 and SATP. */
2216
2217 tree
2218 make_accum_type (int precision, int unsignedp, int satp)
2219 {
2220 tree type = make_node (FIXED_POINT_TYPE);
2221
2222 TYPE_PRECISION (type) = precision;
2223
2224 if (satp)
2225 TYPE_SATURATING (type) = 1;
2226
2227 /* Lay out the type: set its alignment, size, etc. */
2228 if (unsignedp)
2229 {
2230 TYPE_UNSIGNED (type) = 1;
2231 SET_TYPE_MODE (type, mode_for_size (precision, MODE_UACCUM, 0));
2232 }
2233 else
2234 SET_TYPE_MODE (type, mode_for_size (precision, MODE_ACCUM, 0));
2235 layout_type (type);
2236
2237 return type;
2238 }
2239
2240 /* Initialize sizetype and bitsizetype to a reasonable and temporary
2241 value to enable integer types to be created. */
2242
2243 void
2244 initialize_sizetypes (void)
2245 {
2246 tree t = make_node (INTEGER_TYPE);
2247 int precision = GET_MODE_BITSIZE (SImode);
2248
2249 SET_TYPE_MODE (t, SImode);
2250 TYPE_ALIGN (t) = GET_MODE_ALIGNMENT (SImode);
2251 TYPE_IS_SIZETYPE (t) = 1;
2252 TYPE_UNSIGNED (t) = 1;
2253 TYPE_SIZE (t) = build_int_cst (t, precision);
2254 TYPE_SIZE_UNIT (t) = build_int_cst (t, GET_MODE_SIZE (SImode));
2255 TYPE_PRECISION (t) = precision;
2256
2257 set_min_and_max_values_for_integral_type (t, precision, true);
2258
2259 sizetype = t;
2260 bitsizetype = build_distinct_type_copy (t);
2261 }
2262
2263 /* Make sizetype a version of TYPE, and initialize *sizetype accordingly.
2264 We do this by overwriting the stub sizetype and bitsizetype nodes created
2265 by initialize_sizetypes. This makes sure that (a) anything stubby about
2266 them no longer exists and (b) any INTEGER_CSTs created with such a type,
2267 remain valid. */
2268
2269 void
2270 set_sizetype (tree type)
2271 {
2272 tree t, max;
2273 int oprecision = TYPE_PRECISION (type);
2274 /* The *bitsizetype types use a precision that avoids overflows when
2275 calculating signed sizes / offsets in bits. However, when
2276 cross-compiling from a 32 bit to a 64 bit host, we are limited to 64 bit
2277 precision. */
2278 int precision
2279 = MIN (oprecision + BITS_PER_UNIT_LOG + 1, MAX_FIXED_MODE_SIZE);
2280 precision
2281 = GET_MODE_PRECISION (smallest_mode_for_size (precision, MODE_INT));
2282 if (precision > HOST_BITS_PER_WIDE_INT * 2)
2283 precision = HOST_BITS_PER_WIDE_INT * 2;
2284
2285 /* sizetype must be an unsigned type. */
2286 gcc_assert (TYPE_UNSIGNED (type));
2287
2288 t = build_distinct_type_copy (type);
2289 /* We want to use sizetype's cache, as we will be replacing that type. */
2290 TYPE_CACHED_VALUES (t) = TYPE_CACHED_VALUES (sizetype);
2291 TYPE_CACHED_VALUES_P (t) = TYPE_CACHED_VALUES_P (sizetype);
2292 TREE_TYPE (TYPE_CACHED_VALUES (t)) = type;
2293 TYPE_UID (t) = TYPE_UID (sizetype);
2294 TYPE_IS_SIZETYPE (t) = 1;
2295
2296 /* Replace our original stub sizetype. */
2297 memcpy (sizetype, t, tree_size (sizetype));
2298 TYPE_MAIN_VARIANT (sizetype) = sizetype;
2299 TYPE_CANONICAL (sizetype) = sizetype;
2300
2301 /* sizetype is unsigned but we need to fix TYPE_MAX_VALUE so that it is
2302 sign-extended in a way consistent with force_fit_type. */
2303 max = TYPE_MAX_VALUE (sizetype);
2304 TYPE_MAX_VALUE (sizetype)
2305 = double_int_to_tree (sizetype, tree_to_double_int (max));
2306
2307 t = make_node (INTEGER_TYPE);
2308 TYPE_NAME (t) = get_identifier ("bit_size_type");
2309 /* We want to use bitsizetype's cache, as we will be replacing that type. */
2310 TYPE_CACHED_VALUES (t) = TYPE_CACHED_VALUES (bitsizetype);
2311 TYPE_CACHED_VALUES_P (t) = TYPE_CACHED_VALUES_P (bitsizetype);
2312 TYPE_PRECISION (t) = precision;
2313 TYPE_UID (t) = TYPE_UID (bitsizetype);
2314 TYPE_IS_SIZETYPE (t) = 1;
2315
2316 /* Replace our original stub bitsizetype. */
2317 memcpy (bitsizetype, t, tree_size (bitsizetype));
2318 TYPE_MAIN_VARIANT (bitsizetype) = bitsizetype;
2319 TYPE_CANONICAL (bitsizetype) = bitsizetype;
2320
2321 fixup_unsigned_type (bitsizetype);
2322
2323 /* Create the signed variants of *sizetype. */
2324 ssizetype = make_signed_type (oprecision);
2325 TYPE_IS_SIZETYPE (ssizetype) = 1;
2326 sbitsizetype = make_signed_type (precision);
2327 TYPE_IS_SIZETYPE (sbitsizetype) = 1;
2328 }
2329 \f
2330 /* TYPE is an integral type, i.e., an INTEGRAL_TYPE, ENUMERAL_TYPE
2331 or BOOLEAN_TYPE. Set TYPE_MIN_VALUE and TYPE_MAX_VALUE
2332 for TYPE, based on the PRECISION and whether or not the TYPE
2333 IS_UNSIGNED. PRECISION need not correspond to a width supported
2334 natively by the hardware; for example, on a machine with 8-bit,
2335 16-bit, and 32-bit register modes, PRECISION might be 7, 23, or
2336 61. */
2337
2338 void
2339 set_min_and_max_values_for_integral_type (tree type,
2340 int precision,
2341 bool is_unsigned)
2342 {
2343 tree min_value;
2344 tree max_value;
2345
2346 if (is_unsigned)
2347 {
2348 min_value = build_int_cst (type, 0);
2349 max_value
2350 = build_int_cst_wide (type, precision - HOST_BITS_PER_WIDE_INT >= 0
2351 ? -1
2352 : ((HOST_WIDE_INT) 1 << precision) - 1,
2353 precision - HOST_BITS_PER_WIDE_INT > 0
2354 ? ((unsigned HOST_WIDE_INT) ~0
2355 >> (HOST_BITS_PER_WIDE_INT
2356 - (precision - HOST_BITS_PER_WIDE_INT)))
2357 : 0);
2358 }
2359 else
2360 {
2361 min_value
2362 = build_int_cst_wide (type,
2363 (precision - HOST_BITS_PER_WIDE_INT > 0
2364 ? 0
2365 : (HOST_WIDE_INT) (-1) << (precision - 1)),
2366 (((HOST_WIDE_INT) (-1)
2367 << (precision - HOST_BITS_PER_WIDE_INT - 1 > 0
2368 ? precision - HOST_BITS_PER_WIDE_INT - 1
2369 : 0))));
2370 max_value
2371 = build_int_cst_wide (type,
2372 (precision - HOST_BITS_PER_WIDE_INT > 0
2373 ? -1
2374 : ((HOST_WIDE_INT) 1 << (precision - 1)) - 1),
2375 (precision - HOST_BITS_PER_WIDE_INT - 1 > 0
2376 ? (((HOST_WIDE_INT) 1
2377 << (precision - HOST_BITS_PER_WIDE_INT - 1))) - 1
2378 : 0));
2379 }
2380
2381 TYPE_MIN_VALUE (type) = min_value;
2382 TYPE_MAX_VALUE (type) = max_value;
2383 }
2384
2385 /* Set the extreme values of TYPE based on its precision in bits,
2386 then lay it out. Used when make_signed_type won't do
2387 because the tree code is not INTEGER_TYPE.
2388 E.g. for Pascal, when the -fsigned-char option is given. */
2389
2390 void
2391 fixup_signed_type (tree type)
2392 {
2393 int precision = TYPE_PRECISION (type);
2394
2395 /* We can not represent properly constants greater then
2396 2 * HOST_BITS_PER_WIDE_INT, still we need the types
2397 as they are used by i386 vector extensions and friends. */
2398 if (precision > HOST_BITS_PER_WIDE_INT * 2)
2399 precision = HOST_BITS_PER_WIDE_INT * 2;
2400
2401 set_min_and_max_values_for_integral_type (type, precision,
2402 /*is_unsigned=*/false);
2403
2404 /* Lay out the type: set its alignment, size, etc. */
2405 layout_type (type);
2406 }
2407
2408 /* Set the extreme values of TYPE based on its precision in bits,
2409 then lay it out. This is used both in `make_unsigned_type'
2410 and for enumeral types. */
2411
2412 void
2413 fixup_unsigned_type (tree type)
2414 {
2415 int precision = TYPE_PRECISION (type);
2416
2417 /* We can not represent properly constants greater then
2418 2 * HOST_BITS_PER_WIDE_INT, still we need the types
2419 as they are used by i386 vector extensions and friends. */
2420 if (precision > HOST_BITS_PER_WIDE_INT * 2)
2421 precision = HOST_BITS_PER_WIDE_INT * 2;
2422
2423 TYPE_UNSIGNED (type) = 1;
2424
2425 set_min_and_max_values_for_integral_type (type, precision,
2426 /*is_unsigned=*/true);
2427
2428 /* Lay out the type: set its alignment, size, etc. */
2429 layout_type (type);
2430 }
2431 \f
2432 /* Find the best machine mode to use when referencing a bit field of length
2433 BITSIZE bits starting at BITPOS.
2434
2435 The underlying object is known to be aligned to a boundary of ALIGN bits.
2436 If LARGEST_MODE is not VOIDmode, it means that we should not use a mode
2437 larger than LARGEST_MODE (usually SImode).
2438
2439 If no mode meets all these conditions, we return VOIDmode.
2440
2441 If VOLATILEP is false and SLOW_BYTE_ACCESS is false, we return the
2442 smallest mode meeting these conditions.
2443
2444 If VOLATILEP is false and SLOW_BYTE_ACCESS is true, we return the
2445 largest mode (but a mode no wider than UNITS_PER_WORD) that meets
2446 all the conditions.
2447
2448 If VOLATILEP is true the narrow_volatile_bitfields target hook is used to
2449 decide which of the above modes should be used. */
2450
2451 enum machine_mode
2452 get_best_mode (int bitsize, int bitpos, unsigned int align,
2453 enum machine_mode largest_mode, int volatilep)
2454 {
2455 enum machine_mode mode;
2456 unsigned int unit = 0;
2457
2458 /* Find the narrowest integer mode that contains the bit field. */
2459 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
2460 mode = GET_MODE_WIDER_MODE (mode))
2461 {
2462 unit = GET_MODE_BITSIZE (mode);
2463 if ((bitpos % unit) + bitsize <= unit)
2464 break;
2465 }
2466
2467 if (mode == VOIDmode
2468 /* It is tempting to omit the following line
2469 if STRICT_ALIGNMENT is true.
2470 But that is incorrect, since if the bitfield uses part of 3 bytes
2471 and we use a 4-byte mode, we could get a spurious segv
2472 if the extra 4th byte is past the end of memory.
2473 (Though at least one Unix compiler ignores this problem:
2474 that on the Sequent 386 machine. */
2475 || MIN (unit, BIGGEST_ALIGNMENT) > align
2476 || (largest_mode != VOIDmode && unit > GET_MODE_BITSIZE (largest_mode)))
2477 return VOIDmode;
2478
2479 if ((SLOW_BYTE_ACCESS && ! volatilep)
2480 || (volatilep && !targetm.narrow_volatile_bitfield ()))
2481 {
2482 enum machine_mode wide_mode = VOIDmode, tmode;
2483
2484 for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT); tmode != VOIDmode;
2485 tmode = GET_MODE_WIDER_MODE (tmode))
2486 {
2487 unit = GET_MODE_BITSIZE (tmode);
2488 if (bitpos / unit == (bitpos + bitsize - 1) / unit
2489 && unit <= BITS_PER_WORD
2490 && unit <= MIN (align, BIGGEST_ALIGNMENT)
2491 && (largest_mode == VOIDmode
2492 || unit <= GET_MODE_BITSIZE (largest_mode)))
2493 wide_mode = tmode;
2494 }
2495
2496 if (wide_mode != VOIDmode)
2497 return wide_mode;
2498 }
2499
2500 return mode;
2501 }
2502
2503 /* Gets minimal and maximal values for MODE (signed or unsigned depending on
2504 SIGN). The returned constants are made to be usable in TARGET_MODE. */
2505
2506 void
2507 get_mode_bounds (enum machine_mode mode, int sign,
2508 enum machine_mode target_mode,
2509 rtx *mmin, rtx *mmax)
2510 {
2511 unsigned size = GET_MODE_BITSIZE (mode);
2512 unsigned HOST_WIDE_INT min_val, max_val;
2513
2514 gcc_assert (size <= HOST_BITS_PER_WIDE_INT);
2515
2516 if (sign)
2517 {
2518 min_val = -((unsigned HOST_WIDE_INT) 1 << (size - 1));
2519 max_val = ((unsigned HOST_WIDE_INT) 1 << (size - 1)) - 1;
2520 }
2521 else
2522 {
2523 min_val = 0;
2524 max_val = ((unsigned HOST_WIDE_INT) 1 << (size - 1) << 1) - 1;
2525 }
2526
2527 *mmin = gen_int_mode (min_val, target_mode);
2528 *mmax = gen_int_mode (max_val, target_mode);
2529 }
2530
2531 #include "gt-stor-layout.h"