]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/stor-layout.c
Update copyright years.
[thirdparty/gcc.git] / gcc / stor-layout.c
1 /* C-compiler utilities for types and variables storage layout
2 Copyright (C) 1987-2021 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "target.h"
25 #include "function.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "memmodel.h"
29 #include "tm_p.h"
30 #include "stringpool.h"
31 #include "regs.h"
32 #include "emit-rtl.h"
33 #include "cgraph.h"
34 #include "diagnostic-core.h"
35 #include "fold-const.h"
36 #include "stor-layout.h"
37 #include "varasm.h"
38 #include "print-tree.h"
39 #include "langhooks.h"
40 #include "tree-inline.h"
41 #include "dumpfile.h"
42 #include "gimplify.h"
43 #include "attribs.h"
44 #include "debug.h"
45 #include "calls.h"
46
47 /* Data type for the expressions representing sizes of data types.
48 It is the first integer type laid out. */
49 tree sizetype_tab[(int) stk_type_kind_last];
50
51 /* If nonzero, this is an upper limit on alignment of structure fields.
52 The value is measured in bits. */
53 unsigned int maximum_field_alignment = TARGET_DEFAULT_PACK_STRUCT * BITS_PER_UNIT;
54
55 static tree self_referential_size (tree);
56 static void finalize_record_size (record_layout_info);
57 static void finalize_type_size (tree);
58 static void place_union_field (record_layout_info, tree);
59 static int excess_unit_span (HOST_WIDE_INT, HOST_WIDE_INT, HOST_WIDE_INT,
60 HOST_WIDE_INT, tree);
61 extern void debug_rli (record_layout_info);
62 \f
63 /* Given a size SIZE that may not be a constant, return a SAVE_EXPR
64 to serve as the actual size-expression for a type or decl. */
65
66 tree
67 variable_size (tree size)
68 {
69 /* Obviously. */
70 if (TREE_CONSTANT (size))
71 return size;
72
73 /* If the size is self-referential, we can't make a SAVE_EXPR (see
74 save_expr for the rationale). But we can do something else. */
75 if (CONTAINS_PLACEHOLDER_P (size))
76 return self_referential_size (size);
77
78 /* If we are in the global binding level, we can't make a SAVE_EXPR
79 since it may end up being shared across functions, so it is up
80 to the front-end to deal with this case. */
81 if (lang_hooks.decls.global_bindings_p ())
82 return size;
83
84 return save_expr (size);
85 }
86
87 /* An array of functions used for self-referential size computation. */
88 static GTY(()) vec<tree, va_gc> *size_functions;
89
90 /* Return true if T is a self-referential component reference. */
91
92 static bool
93 self_referential_component_ref_p (tree t)
94 {
95 if (TREE_CODE (t) != COMPONENT_REF)
96 return false;
97
98 while (REFERENCE_CLASS_P (t))
99 t = TREE_OPERAND (t, 0);
100
101 return (TREE_CODE (t) == PLACEHOLDER_EXPR);
102 }
103
104 /* Similar to copy_tree_r but do not copy component references involving
105 PLACEHOLDER_EXPRs. These nodes are spotted in find_placeholder_in_expr
106 and substituted in substitute_in_expr. */
107
108 static tree
109 copy_self_referential_tree_r (tree *tp, int *walk_subtrees, void *data)
110 {
111 enum tree_code code = TREE_CODE (*tp);
112
113 /* Stop at types, decls, constants like copy_tree_r. */
114 if (TREE_CODE_CLASS (code) == tcc_type
115 || TREE_CODE_CLASS (code) == tcc_declaration
116 || TREE_CODE_CLASS (code) == tcc_constant)
117 {
118 *walk_subtrees = 0;
119 return NULL_TREE;
120 }
121
122 /* This is the pattern built in ada/make_aligning_type. */
123 else if (code == ADDR_EXPR
124 && TREE_CODE (TREE_OPERAND (*tp, 0)) == PLACEHOLDER_EXPR)
125 {
126 *walk_subtrees = 0;
127 return NULL_TREE;
128 }
129
130 /* Default case: the component reference. */
131 else if (self_referential_component_ref_p (*tp))
132 {
133 *walk_subtrees = 0;
134 return NULL_TREE;
135 }
136
137 /* We're not supposed to have them in self-referential size trees
138 because we wouldn't properly control when they are evaluated.
139 However, not creating superfluous SAVE_EXPRs requires accurate
140 tracking of readonly-ness all the way down to here, which we
141 cannot always guarantee in practice. So punt in this case. */
142 else if (code == SAVE_EXPR)
143 return error_mark_node;
144
145 else if (code == STATEMENT_LIST)
146 gcc_unreachable ();
147
148 return copy_tree_r (tp, walk_subtrees, data);
149 }
150
151 /* Given a SIZE expression that is self-referential, return an equivalent
152 expression to serve as the actual size expression for a type. */
153
154 static tree
155 self_referential_size (tree size)
156 {
157 static unsigned HOST_WIDE_INT fnno = 0;
158 vec<tree> self_refs = vNULL;
159 tree param_type_list = NULL, param_decl_list = NULL;
160 tree t, ref, return_type, fntype, fnname, fndecl;
161 unsigned int i;
162 char buf[128];
163 vec<tree, va_gc> *args = NULL;
164
165 /* Do not factor out simple operations. */
166 t = skip_simple_constant_arithmetic (size);
167 if (TREE_CODE (t) == CALL_EXPR || self_referential_component_ref_p (t))
168 return size;
169
170 /* Collect the list of self-references in the expression. */
171 find_placeholder_in_expr (size, &self_refs);
172 gcc_assert (self_refs.length () > 0);
173
174 /* Obtain a private copy of the expression. */
175 t = size;
176 if (walk_tree (&t, copy_self_referential_tree_r, NULL, NULL) != NULL_TREE)
177 return size;
178 size = t;
179
180 /* Build the parameter and argument lists in parallel; also
181 substitute the former for the latter in the expression. */
182 vec_alloc (args, self_refs.length ());
183 FOR_EACH_VEC_ELT (self_refs, i, ref)
184 {
185 tree subst, param_name, param_type, param_decl;
186
187 if (DECL_P (ref))
188 {
189 /* We shouldn't have true variables here. */
190 gcc_assert (TREE_READONLY (ref));
191 subst = ref;
192 }
193 /* This is the pattern built in ada/make_aligning_type. */
194 else if (TREE_CODE (ref) == ADDR_EXPR)
195 subst = ref;
196 /* Default case: the component reference. */
197 else
198 subst = TREE_OPERAND (ref, 1);
199
200 sprintf (buf, "p%d", i);
201 param_name = get_identifier (buf);
202 param_type = TREE_TYPE (ref);
203 param_decl
204 = build_decl (input_location, PARM_DECL, param_name, param_type);
205 DECL_ARG_TYPE (param_decl) = param_type;
206 DECL_ARTIFICIAL (param_decl) = 1;
207 TREE_READONLY (param_decl) = 1;
208
209 size = substitute_in_expr (size, subst, param_decl);
210
211 param_type_list = tree_cons (NULL_TREE, param_type, param_type_list);
212 param_decl_list = chainon (param_decl, param_decl_list);
213 args->quick_push (ref);
214 }
215
216 self_refs.release ();
217
218 /* Append 'void' to indicate that the number of parameters is fixed. */
219 param_type_list = tree_cons (NULL_TREE, void_type_node, param_type_list);
220
221 /* The 3 lists have been created in reverse order. */
222 param_type_list = nreverse (param_type_list);
223 param_decl_list = nreverse (param_decl_list);
224
225 /* Build the function type. */
226 return_type = TREE_TYPE (size);
227 fntype = build_function_type (return_type, param_type_list);
228
229 /* Build the function declaration. */
230 sprintf (buf, "SZ" HOST_WIDE_INT_PRINT_UNSIGNED, fnno++);
231 fnname = get_file_function_name (buf);
232 fndecl = build_decl (input_location, FUNCTION_DECL, fnname, fntype);
233 for (t = param_decl_list; t; t = DECL_CHAIN (t))
234 DECL_CONTEXT (t) = fndecl;
235 DECL_ARGUMENTS (fndecl) = param_decl_list;
236 DECL_RESULT (fndecl)
237 = build_decl (input_location, RESULT_DECL, 0, return_type);
238 DECL_CONTEXT (DECL_RESULT (fndecl)) = fndecl;
239
240 /* The function has been created by the compiler and we don't
241 want to emit debug info for it. */
242 DECL_ARTIFICIAL (fndecl) = 1;
243 DECL_IGNORED_P (fndecl) = 1;
244
245 /* It is supposed to be "const" and never throw. */
246 TREE_READONLY (fndecl) = 1;
247 TREE_NOTHROW (fndecl) = 1;
248
249 /* We want it to be inlined when this is deemed profitable, as
250 well as discarded if every call has been integrated. */
251 DECL_DECLARED_INLINE_P (fndecl) = 1;
252
253 /* It is made up of a unique return statement. */
254 DECL_INITIAL (fndecl) = make_node (BLOCK);
255 BLOCK_SUPERCONTEXT (DECL_INITIAL (fndecl)) = fndecl;
256 t = build2 (MODIFY_EXPR, return_type, DECL_RESULT (fndecl), size);
257 DECL_SAVED_TREE (fndecl) = build1 (RETURN_EXPR, void_type_node, t);
258 TREE_STATIC (fndecl) = 1;
259
260 /* Put it onto the list of size functions. */
261 vec_safe_push (size_functions, fndecl);
262
263 /* Replace the original expression with a call to the size function. */
264 return build_call_expr_loc_vec (UNKNOWN_LOCATION, fndecl, args);
265 }
266
267 /* Take, queue and compile all the size functions. It is essential that
268 the size functions be gimplified at the very end of the compilation
269 in order to guarantee transparent handling of self-referential sizes.
270 Otherwise the GENERIC inliner would not be able to inline them back
271 at each of their call sites, thus creating artificial non-constant
272 size expressions which would trigger nasty problems later on. */
273
274 void
275 finalize_size_functions (void)
276 {
277 unsigned int i;
278 tree fndecl;
279
280 for (i = 0; size_functions && size_functions->iterate (i, &fndecl); i++)
281 {
282 allocate_struct_function (fndecl, false);
283 set_cfun (NULL);
284 dump_function (TDI_original, fndecl);
285
286 /* As these functions are used to describe the layout of variable-length
287 structures, debug info generation needs their implementation. */
288 debug_hooks->size_function (fndecl);
289 gimplify_function_tree (fndecl);
290 cgraph_node::finalize_function (fndecl, false);
291 }
292
293 vec_free (size_functions);
294 }
295 \f
296 /* Return a machine mode of class MCLASS with SIZE bits of precision,
297 if one exists. The mode may have padding bits as well the SIZE
298 value bits. If LIMIT is nonzero, disregard modes wider than
299 MAX_FIXED_MODE_SIZE. */
300
301 opt_machine_mode
302 mode_for_size (poly_uint64 size, enum mode_class mclass, int limit)
303 {
304 machine_mode mode;
305 int i;
306
307 if (limit && maybe_gt (size, (unsigned int) MAX_FIXED_MODE_SIZE))
308 return opt_machine_mode ();
309
310 /* Get the first mode which has this size, in the specified class. */
311 FOR_EACH_MODE_IN_CLASS (mode, mclass)
312 if (known_eq (GET_MODE_PRECISION (mode), size))
313 return mode;
314
315 if (mclass == MODE_INT || mclass == MODE_PARTIAL_INT)
316 for (i = 0; i < NUM_INT_N_ENTS; i ++)
317 if (known_eq (int_n_data[i].bitsize, size)
318 && int_n_enabled_p[i])
319 return int_n_data[i].m;
320
321 return opt_machine_mode ();
322 }
323
324 /* Similar, except passed a tree node. */
325
326 opt_machine_mode
327 mode_for_size_tree (const_tree size, enum mode_class mclass, int limit)
328 {
329 unsigned HOST_WIDE_INT uhwi;
330 unsigned int ui;
331
332 if (!tree_fits_uhwi_p (size))
333 return opt_machine_mode ();
334 uhwi = tree_to_uhwi (size);
335 ui = uhwi;
336 if (uhwi != ui)
337 return opt_machine_mode ();
338 return mode_for_size (ui, mclass, limit);
339 }
340
341 /* Return the narrowest mode of class MCLASS that contains at least
342 SIZE bits. Abort if no such mode exists. */
343
344 machine_mode
345 smallest_mode_for_size (poly_uint64 size, enum mode_class mclass)
346 {
347 machine_mode mode = VOIDmode;
348 int i;
349
350 /* Get the first mode which has at least this size, in the
351 specified class. */
352 FOR_EACH_MODE_IN_CLASS (mode, mclass)
353 if (known_ge (GET_MODE_PRECISION (mode), size))
354 break;
355
356 gcc_assert (mode != VOIDmode);
357
358 if (mclass == MODE_INT || mclass == MODE_PARTIAL_INT)
359 for (i = 0; i < NUM_INT_N_ENTS; i ++)
360 if (known_ge (int_n_data[i].bitsize, size)
361 && known_lt (int_n_data[i].bitsize, GET_MODE_PRECISION (mode))
362 && int_n_enabled_p[i])
363 mode = int_n_data[i].m;
364
365 return mode;
366 }
367
368 /* Return an integer mode of exactly the same size as MODE, if one exists. */
369
370 opt_scalar_int_mode
371 int_mode_for_mode (machine_mode mode)
372 {
373 switch (GET_MODE_CLASS (mode))
374 {
375 case MODE_INT:
376 case MODE_PARTIAL_INT:
377 return as_a <scalar_int_mode> (mode);
378
379 case MODE_COMPLEX_INT:
380 case MODE_COMPLEX_FLOAT:
381 case MODE_FLOAT:
382 case MODE_DECIMAL_FLOAT:
383 case MODE_FRACT:
384 case MODE_ACCUM:
385 case MODE_UFRACT:
386 case MODE_UACCUM:
387 case MODE_VECTOR_BOOL:
388 case MODE_VECTOR_INT:
389 case MODE_VECTOR_FLOAT:
390 case MODE_VECTOR_FRACT:
391 case MODE_VECTOR_ACCUM:
392 case MODE_VECTOR_UFRACT:
393 case MODE_VECTOR_UACCUM:
394 return int_mode_for_size (GET_MODE_BITSIZE (mode), 0);
395
396 case MODE_OPAQUE:
397 return opt_scalar_int_mode ();
398
399 case MODE_RANDOM:
400 if (mode == BLKmode)
401 return opt_scalar_int_mode ();
402
403 /* fall through */
404
405 case MODE_CC:
406 default:
407 gcc_unreachable ();
408 }
409 }
410
411 /* Find a mode that can be used for efficient bitwise operations on MODE,
412 if one exists. */
413
414 opt_machine_mode
415 bitwise_mode_for_mode (machine_mode mode)
416 {
417 /* Quick exit if we already have a suitable mode. */
418 scalar_int_mode int_mode;
419 if (is_a <scalar_int_mode> (mode, &int_mode)
420 && GET_MODE_BITSIZE (int_mode) <= MAX_FIXED_MODE_SIZE)
421 return int_mode;
422
423 /* Reuse the sanity checks from int_mode_for_mode. */
424 gcc_checking_assert ((int_mode_for_mode (mode), true));
425
426 poly_int64 bitsize = GET_MODE_BITSIZE (mode);
427
428 /* Try to replace complex modes with complex modes. In general we
429 expect both components to be processed independently, so we only
430 care whether there is a register for the inner mode. */
431 if (COMPLEX_MODE_P (mode))
432 {
433 machine_mode trial = mode;
434 if ((GET_MODE_CLASS (trial) == MODE_COMPLEX_INT
435 || mode_for_size (bitsize, MODE_COMPLEX_INT, false).exists (&trial))
436 && have_regs_of_mode[GET_MODE_INNER (trial)])
437 return trial;
438 }
439
440 /* Try to replace vector modes with vector modes. Also try using vector
441 modes if an integer mode would be too big. */
442 if (VECTOR_MODE_P (mode)
443 || maybe_gt (bitsize, MAX_FIXED_MODE_SIZE))
444 {
445 machine_mode trial = mode;
446 if ((GET_MODE_CLASS (trial) == MODE_VECTOR_INT
447 || mode_for_size (bitsize, MODE_VECTOR_INT, 0).exists (&trial))
448 && have_regs_of_mode[trial]
449 && targetm.vector_mode_supported_p (trial))
450 return trial;
451 }
452
453 /* Otherwise fall back on integers while honoring MAX_FIXED_MODE_SIZE. */
454 return mode_for_size (bitsize, MODE_INT, true);
455 }
456
457 /* Find a type that can be used for efficient bitwise operations on MODE.
458 Return null if no such mode exists. */
459
460 tree
461 bitwise_type_for_mode (machine_mode mode)
462 {
463 if (!bitwise_mode_for_mode (mode).exists (&mode))
464 return NULL_TREE;
465
466 unsigned int inner_size = GET_MODE_UNIT_BITSIZE (mode);
467 tree inner_type = build_nonstandard_integer_type (inner_size, true);
468
469 if (VECTOR_MODE_P (mode))
470 return build_vector_type_for_mode (inner_type, mode);
471
472 if (COMPLEX_MODE_P (mode))
473 return build_complex_type (inner_type);
474
475 gcc_checking_assert (GET_MODE_INNER (mode) == mode);
476 return inner_type;
477 }
478
479 /* Find a mode that is suitable for representing a vector with NUNITS
480 elements of mode INNERMODE, if one exists. The returned mode can be
481 either an integer mode or a vector mode. */
482
483 opt_machine_mode
484 mode_for_vector (scalar_mode innermode, poly_uint64 nunits)
485 {
486 machine_mode mode;
487
488 /* First, look for a supported vector type. */
489 if (SCALAR_FLOAT_MODE_P (innermode))
490 mode = MIN_MODE_VECTOR_FLOAT;
491 else if (SCALAR_FRACT_MODE_P (innermode))
492 mode = MIN_MODE_VECTOR_FRACT;
493 else if (SCALAR_UFRACT_MODE_P (innermode))
494 mode = MIN_MODE_VECTOR_UFRACT;
495 else if (SCALAR_ACCUM_MODE_P (innermode))
496 mode = MIN_MODE_VECTOR_ACCUM;
497 else if (SCALAR_UACCUM_MODE_P (innermode))
498 mode = MIN_MODE_VECTOR_UACCUM;
499 else
500 mode = MIN_MODE_VECTOR_INT;
501
502 /* Do not check vector_mode_supported_p here. We'll do that
503 later in vector_type_mode. */
504 FOR_EACH_MODE_FROM (mode, mode)
505 if (known_eq (GET_MODE_NUNITS (mode), nunits)
506 && GET_MODE_INNER (mode) == innermode)
507 return mode;
508
509 /* For integers, try mapping it to a same-sized scalar mode. */
510 if (GET_MODE_CLASS (innermode) == MODE_INT)
511 {
512 poly_uint64 nbits = nunits * GET_MODE_BITSIZE (innermode);
513 if (int_mode_for_size (nbits, 0).exists (&mode)
514 && have_regs_of_mode[mode])
515 return mode;
516 }
517
518 return opt_machine_mode ();
519 }
520
521 /* If a piece of code is using vector mode VECTOR_MODE and also wants
522 to operate on elements of mode ELEMENT_MODE, return the vector mode
523 it should use for those elements. If NUNITS is nonzero, ensure that
524 the mode has exactly NUNITS elements, otherwise pick whichever vector
525 size pairs the most naturally with VECTOR_MODE; this may mean choosing
526 a mode with a different size and/or number of elements, depending on
527 what the target prefers. Return an empty opt_machine_mode if there
528 is no supported vector mode with the required properties.
529
530 Unlike mode_for_vector. any returned mode is guaranteed to satisfy
531 both VECTOR_MODE_P and targetm.vector_mode_supported_p. */
532
533 opt_machine_mode
534 related_vector_mode (machine_mode vector_mode, scalar_mode element_mode,
535 poly_uint64 nunits)
536 {
537 gcc_assert (VECTOR_MODE_P (vector_mode));
538 return targetm.vectorize.related_mode (vector_mode, element_mode, nunits);
539 }
540
541 /* If a piece of code is using vector mode VECTOR_MODE and also wants
542 to operate on integer vectors with the same element size and number
543 of elements, return the vector mode it should use. Return an empty
544 opt_machine_mode if there is no supported vector mode with the
545 required properties.
546
547 Unlike mode_for_vector. any returned mode is guaranteed to satisfy
548 both VECTOR_MODE_P and targetm.vector_mode_supported_p. */
549
550 opt_machine_mode
551 related_int_vector_mode (machine_mode vector_mode)
552 {
553 gcc_assert (VECTOR_MODE_P (vector_mode));
554 scalar_int_mode int_mode;
555 if (int_mode_for_mode (GET_MODE_INNER (vector_mode)).exists (&int_mode))
556 return related_vector_mode (vector_mode, int_mode,
557 GET_MODE_NUNITS (vector_mode));
558 return opt_machine_mode ();
559 }
560
561 /* Return the alignment of MODE. This will be bounded by 1 and
562 BIGGEST_ALIGNMENT. */
563
564 unsigned int
565 get_mode_alignment (machine_mode mode)
566 {
567 return MIN (BIGGEST_ALIGNMENT, MAX (1, mode_base_align[mode]*BITS_PER_UNIT));
568 }
569
570 /* Return the natural mode of an array, given that it is SIZE bytes in
571 total and has elements of type ELEM_TYPE. */
572
573 static machine_mode
574 mode_for_array (tree elem_type, tree size)
575 {
576 tree elem_size;
577 poly_uint64 int_size, int_elem_size;
578 unsigned HOST_WIDE_INT num_elems;
579 bool limit_p;
580
581 /* One-element arrays get the component type's mode. */
582 elem_size = TYPE_SIZE (elem_type);
583 if (simple_cst_equal (size, elem_size))
584 return TYPE_MODE (elem_type);
585
586 limit_p = true;
587 if (poly_int_tree_p (size, &int_size)
588 && poly_int_tree_p (elem_size, &int_elem_size)
589 && maybe_ne (int_elem_size, 0U)
590 && constant_multiple_p (int_size, int_elem_size, &num_elems))
591 {
592 machine_mode elem_mode = TYPE_MODE (elem_type);
593 machine_mode mode;
594 if (targetm.array_mode (elem_mode, num_elems).exists (&mode))
595 return mode;
596 if (targetm.array_mode_supported_p (elem_mode, num_elems))
597 limit_p = false;
598 }
599 return mode_for_size_tree (size, MODE_INT, limit_p).else_blk ();
600 }
601 \f
602 /* Subroutine of layout_decl: Force alignment required for the data type.
603 But if the decl itself wants greater alignment, don't override that. */
604
605 static inline void
606 do_type_align (tree type, tree decl)
607 {
608 if (TYPE_ALIGN (type) > DECL_ALIGN (decl))
609 {
610 SET_DECL_ALIGN (decl, TYPE_ALIGN (type));
611 if (TREE_CODE (decl) == FIELD_DECL)
612 DECL_USER_ALIGN (decl) = TYPE_USER_ALIGN (type);
613 }
614 if (TYPE_WARN_IF_NOT_ALIGN (type) > DECL_WARN_IF_NOT_ALIGN (decl))
615 SET_DECL_WARN_IF_NOT_ALIGN (decl, TYPE_WARN_IF_NOT_ALIGN (type));
616 }
617
618 /* Set the size, mode and alignment of a ..._DECL node.
619 TYPE_DECL does need this for C++.
620 Note that LABEL_DECL and CONST_DECL nodes do not need this,
621 and FUNCTION_DECL nodes have them set up in a special (and simple) way.
622 Don't call layout_decl for them.
623
624 KNOWN_ALIGN is the amount of alignment we can assume this
625 decl has with no special effort. It is relevant only for FIELD_DECLs
626 and depends on the previous fields.
627 All that matters about KNOWN_ALIGN is which powers of 2 divide it.
628 If KNOWN_ALIGN is 0, it means, "as much alignment as you like":
629 the record will be aligned to suit. */
630
631 void
632 layout_decl (tree decl, unsigned int known_align)
633 {
634 tree type = TREE_TYPE (decl);
635 enum tree_code code = TREE_CODE (decl);
636 rtx rtl = NULL_RTX;
637 location_t loc = DECL_SOURCE_LOCATION (decl);
638
639 if (code == CONST_DECL)
640 return;
641
642 gcc_assert (code == VAR_DECL || code == PARM_DECL || code == RESULT_DECL
643 || code == TYPE_DECL || code == FIELD_DECL);
644
645 rtl = DECL_RTL_IF_SET (decl);
646
647 if (type == error_mark_node)
648 type = void_type_node;
649
650 /* Usually the size and mode come from the data type without change,
651 however, the front-end may set the explicit width of the field, so its
652 size may not be the same as the size of its type. This happens with
653 bitfields, of course (an `int' bitfield may be only 2 bits, say), but it
654 also happens with other fields. For example, the C++ front-end creates
655 zero-sized fields corresponding to empty base classes, and depends on
656 layout_type setting DECL_FIELD_BITPOS correctly for the field. Set the
657 size in bytes from the size in bits. If we have already set the mode,
658 don't set it again since we can be called twice for FIELD_DECLs. */
659
660 DECL_UNSIGNED (decl) = TYPE_UNSIGNED (type);
661 if (DECL_MODE (decl) == VOIDmode)
662 SET_DECL_MODE (decl, TYPE_MODE (type));
663
664 if (DECL_SIZE (decl) == 0)
665 {
666 DECL_SIZE (decl) = TYPE_SIZE (type);
667 DECL_SIZE_UNIT (decl) = TYPE_SIZE_UNIT (type);
668 }
669 else if (DECL_SIZE_UNIT (decl) == 0)
670 DECL_SIZE_UNIT (decl)
671 = fold_convert_loc (loc, sizetype,
672 size_binop_loc (loc, CEIL_DIV_EXPR, DECL_SIZE (decl),
673 bitsize_unit_node));
674
675 if (code != FIELD_DECL)
676 /* For non-fields, update the alignment from the type. */
677 do_type_align (type, decl);
678 else
679 /* For fields, it's a bit more complicated... */
680 {
681 bool old_user_align = DECL_USER_ALIGN (decl);
682 bool zero_bitfield = false;
683 bool packed_p = DECL_PACKED (decl);
684 unsigned int mfa;
685
686 if (DECL_BIT_FIELD (decl))
687 {
688 DECL_BIT_FIELD_TYPE (decl) = type;
689
690 /* A zero-length bit-field affects the alignment of the next
691 field. In essence such bit-fields are not influenced by
692 any packing due to #pragma pack or attribute packed. */
693 if (integer_zerop (DECL_SIZE (decl))
694 && ! targetm.ms_bitfield_layout_p (DECL_FIELD_CONTEXT (decl)))
695 {
696 zero_bitfield = true;
697 packed_p = false;
698 if (PCC_BITFIELD_TYPE_MATTERS)
699 do_type_align (type, decl);
700 else
701 {
702 #ifdef EMPTY_FIELD_BOUNDARY
703 if (EMPTY_FIELD_BOUNDARY > DECL_ALIGN (decl))
704 {
705 SET_DECL_ALIGN (decl, EMPTY_FIELD_BOUNDARY);
706 DECL_USER_ALIGN (decl) = 0;
707 }
708 #endif
709 }
710 }
711
712 /* See if we can use an ordinary integer mode for a bit-field.
713 Conditions are: a fixed size that is correct for another mode,
714 occupying a complete byte or bytes on proper boundary. */
715 if (TYPE_SIZE (type) != 0
716 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
717 && GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT)
718 {
719 machine_mode xmode;
720 if (mode_for_size_tree (DECL_SIZE (decl),
721 MODE_INT, 1).exists (&xmode))
722 {
723 unsigned int xalign = GET_MODE_ALIGNMENT (xmode);
724 if (!(xalign > BITS_PER_UNIT && DECL_PACKED (decl))
725 && (known_align == 0 || known_align >= xalign))
726 {
727 SET_DECL_ALIGN (decl, MAX (xalign, DECL_ALIGN (decl)));
728 SET_DECL_MODE (decl, xmode);
729 DECL_BIT_FIELD (decl) = 0;
730 }
731 }
732 }
733
734 /* Turn off DECL_BIT_FIELD if we won't need it set. */
735 if (TYPE_MODE (type) == BLKmode && DECL_MODE (decl) == BLKmode
736 && known_align >= TYPE_ALIGN (type)
737 && DECL_ALIGN (decl) >= TYPE_ALIGN (type))
738 DECL_BIT_FIELD (decl) = 0;
739 }
740 else if (packed_p && DECL_USER_ALIGN (decl))
741 /* Don't touch DECL_ALIGN. For other packed fields, go ahead and
742 round up; we'll reduce it again below. We want packing to
743 supersede USER_ALIGN inherited from the type, but defer to
744 alignment explicitly specified on the field decl. */;
745 else
746 do_type_align (type, decl);
747
748 /* If the field is packed and not explicitly aligned, give it the
749 minimum alignment. Note that do_type_align may set
750 DECL_USER_ALIGN, so we need to check old_user_align instead. */
751 if (packed_p
752 && !old_user_align)
753 SET_DECL_ALIGN (decl, MIN (DECL_ALIGN (decl), BITS_PER_UNIT));
754
755 if (! packed_p && ! DECL_USER_ALIGN (decl))
756 {
757 /* Some targets (i.e. i386, VMS) limit struct field alignment
758 to a lower boundary than alignment of variables unless
759 it was overridden by attribute aligned. */
760 #ifdef BIGGEST_FIELD_ALIGNMENT
761 SET_DECL_ALIGN (decl, MIN (DECL_ALIGN (decl),
762 (unsigned) BIGGEST_FIELD_ALIGNMENT));
763 #endif
764 #ifdef ADJUST_FIELD_ALIGN
765 SET_DECL_ALIGN (decl, ADJUST_FIELD_ALIGN (decl, TREE_TYPE (decl),
766 DECL_ALIGN (decl)));
767 #endif
768 }
769
770 if (zero_bitfield)
771 mfa = initial_max_fld_align * BITS_PER_UNIT;
772 else
773 mfa = maximum_field_alignment;
774 /* Should this be controlled by DECL_USER_ALIGN, too? */
775 if (mfa != 0)
776 SET_DECL_ALIGN (decl, MIN (DECL_ALIGN (decl), mfa));
777 }
778
779 /* Evaluate nonconstant size only once, either now or as soon as safe. */
780 if (DECL_SIZE (decl) != 0 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
781 DECL_SIZE (decl) = variable_size (DECL_SIZE (decl));
782 if (DECL_SIZE_UNIT (decl) != 0
783 && TREE_CODE (DECL_SIZE_UNIT (decl)) != INTEGER_CST)
784 DECL_SIZE_UNIT (decl) = variable_size (DECL_SIZE_UNIT (decl));
785
786 /* If requested, warn about definitions of large data objects. */
787 if ((code == PARM_DECL || (code == VAR_DECL && !DECL_NONLOCAL_FRAME (decl)))
788 && !DECL_EXTERNAL (decl))
789 {
790 tree size = DECL_SIZE_UNIT (decl);
791
792 if (size != 0 && TREE_CODE (size) == INTEGER_CST)
793 {
794 /* -Wlarger-than= argument of HOST_WIDE_INT_MAX is treated
795 as if PTRDIFF_MAX had been specified, with the value
796 being that on the target rather than the host. */
797 unsigned HOST_WIDE_INT max_size = warn_larger_than_size;
798 if (max_size == HOST_WIDE_INT_MAX)
799 max_size = tree_to_shwi (TYPE_MAX_VALUE (ptrdiff_type_node));
800
801 if (compare_tree_int (size, max_size) > 0)
802 warning (OPT_Wlarger_than_, "size of %q+D %E bytes exceeds "
803 "maximum object size %wu",
804 decl, size, max_size);
805 }
806 }
807
808 /* If the RTL was already set, update its mode and mem attributes. */
809 if (rtl)
810 {
811 PUT_MODE (rtl, DECL_MODE (decl));
812 SET_DECL_RTL (decl, 0);
813 if (MEM_P (rtl))
814 set_mem_attributes (rtl, decl, 1);
815 SET_DECL_RTL (decl, rtl);
816 }
817 }
818
819 /* Given a VAR_DECL, PARM_DECL, RESULT_DECL, or FIELD_DECL, clears the
820 results of a previous call to layout_decl and calls it again. */
821
822 void
823 relayout_decl (tree decl)
824 {
825 DECL_SIZE (decl) = DECL_SIZE_UNIT (decl) = 0;
826 SET_DECL_MODE (decl, VOIDmode);
827 if (!DECL_USER_ALIGN (decl))
828 SET_DECL_ALIGN (decl, 0);
829 if (DECL_RTL_SET_P (decl))
830 SET_DECL_RTL (decl, 0);
831
832 layout_decl (decl, 0);
833 }
834 \f
835 /* Begin laying out type T, which may be a RECORD_TYPE, UNION_TYPE, or
836 QUAL_UNION_TYPE. Return a pointer to a struct record_layout_info which
837 is to be passed to all other layout functions for this record. It is the
838 responsibility of the caller to call `free' for the storage returned.
839 Note that garbage collection is not permitted until we finish laying
840 out the record. */
841
842 record_layout_info
843 start_record_layout (tree t)
844 {
845 record_layout_info rli = XNEW (struct record_layout_info_s);
846
847 rli->t = t;
848
849 /* If the type has a minimum specified alignment (via an attribute
850 declaration, for example) use it -- otherwise, start with a
851 one-byte alignment. */
852 rli->record_align = MAX (BITS_PER_UNIT, TYPE_ALIGN (t));
853 rli->unpacked_align = rli->record_align;
854 rli->offset_align = MAX (rli->record_align, BIGGEST_ALIGNMENT);
855
856 #ifdef STRUCTURE_SIZE_BOUNDARY
857 /* Packed structures don't need to have minimum size. */
858 if (! TYPE_PACKED (t))
859 {
860 unsigned tmp;
861
862 /* #pragma pack overrides STRUCTURE_SIZE_BOUNDARY. */
863 tmp = (unsigned) STRUCTURE_SIZE_BOUNDARY;
864 if (maximum_field_alignment != 0)
865 tmp = MIN (tmp, maximum_field_alignment);
866 rli->record_align = MAX (rli->record_align, tmp);
867 }
868 #endif
869
870 rli->offset = size_zero_node;
871 rli->bitpos = bitsize_zero_node;
872 rli->prev_field = 0;
873 rli->pending_statics = 0;
874 rli->packed_maybe_necessary = 0;
875 rli->remaining_in_alignment = 0;
876
877 return rli;
878 }
879
880 /* Fold sizetype value X to bitsizetype, given that X represents a type
881 size or offset. */
882
883 static tree
884 bits_from_bytes (tree x)
885 {
886 if (POLY_INT_CST_P (x))
887 /* The runtime calculation isn't allowed to overflow sizetype;
888 increasing the runtime values must always increase the size
889 or offset of the object. This means that the object imposes
890 a maximum value on the runtime parameters, but we don't record
891 what that is. */
892 return build_poly_int_cst
893 (bitsizetype,
894 poly_wide_int::from (poly_int_cst_value (x),
895 TYPE_PRECISION (bitsizetype),
896 TYPE_SIGN (TREE_TYPE (x))));
897 x = fold_convert (bitsizetype, x);
898 gcc_checking_assert (x);
899 return x;
900 }
901
902 /* Return the combined bit position for the byte offset OFFSET and the
903 bit position BITPOS.
904
905 These functions operate on byte and bit positions present in FIELD_DECLs
906 and assume that these expressions result in no (intermediate) overflow.
907 This assumption is necessary to fold the expressions as much as possible,
908 so as to avoid creating artificially variable-sized types in languages
909 supporting variable-sized types like Ada. */
910
911 tree
912 bit_from_pos (tree offset, tree bitpos)
913 {
914 return size_binop (PLUS_EXPR, bitpos,
915 size_binop (MULT_EXPR, bits_from_bytes (offset),
916 bitsize_unit_node));
917 }
918
919 /* Return the combined truncated byte position for the byte offset OFFSET and
920 the bit position BITPOS. */
921
922 tree
923 byte_from_pos (tree offset, tree bitpos)
924 {
925 tree bytepos;
926 if (TREE_CODE (bitpos) == MULT_EXPR
927 && tree_int_cst_equal (TREE_OPERAND (bitpos, 1), bitsize_unit_node))
928 bytepos = TREE_OPERAND (bitpos, 0);
929 else
930 bytepos = size_binop (TRUNC_DIV_EXPR, bitpos, bitsize_unit_node);
931 return size_binop (PLUS_EXPR, offset, fold_convert (sizetype, bytepos));
932 }
933
934 /* Split the bit position POS into a byte offset *POFFSET and a bit
935 position *PBITPOS with the byte offset aligned to OFF_ALIGN bits. */
936
937 void
938 pos_from_bit (tree *poffset, tree *pbitpos, unsigned int off_align,
939 tree pos)
940 {
941 tree toff_align = bitsize_int (off_align);
942 if (TREE_CODE (pos) == MULT_EXPR
943 && tree_int_cst_equal (TREE_OPERAND (pos, 1), toff_align))
944 {
945 *poffset = size_binop (MULT_EXPR,
946 fold_convert (sizetype, TREE_OPERAND (pos, 0)),
947 size_int (off_align / BITS_PER_UNIT));
948 *pbitpos = bitsize_zero_node;
949 }
950 else
951 {
952 *poffset = size_binop (MULT_EXPR,
953 fold_convert (sizetype,
954 size_binop (FLOOR_DIV_EXPR, pos,
955 toff_align)),
956 size_int (off_align / BITS_PER_UNIT));
957 *pbitpos = size_binop (FLOOR_MOD_EXPR, pos, toff_align);
958 }
959 }
960
961 /* Given a pointer to bit and byte offsets and an offset alignment,
962 normalize the offsets so they are within the alignment. */
963
964 void
965 normalize_offset (tree *poffset, tree *pbitpos, unsigned int off_align)
966 {
967 /* If the bit position is now larger than it should be, adjust it
968 downwards. */
969 if (compare_tree_int (*pbitpos, off_align) >= 0)
970 {
971 tree offset, bitpos;
972 pos_from_bit (&offset, &bitpos, off_align, *pbitpos);
973 *poffset = size_binop (PLUS_EXPR, *poffset, offset);
974 *pbitpos = bitpos;
975 }
976 }
977
978 /* Print debugging information about the information in RLI. */
979
980 DEBUG_FUNCTION void
981 debug_rli (record_layout_info rli)
982 {
983 print_node_brief (stderr, "type", rli->t, 0);
984 print_node_brief (stderr, "\noffset", rli->offset, 0);
985 print_node_brief (stderr, " bitpos", rli->bitpos, 0);
986
987 fprintf (stderr, "\naligns: rec = %u, unpack = %u, off = %u\n",
988 rli->record_align, rli->unpacked_align,
989 rli->offset_align);
990
991 /* The ms_struct code is the only that uses this. */
992 if (targetm.ms_bitfield_layout_p (rli->t))
993 fprintf (stderr, "remaining in alignment = %u\n", rli->remaining_in_alignment);
994
995 if (rli->packed_maybe_necessary)
996 fprintf (stderr, "packed may be necessary\n");
997
998 if (!vec_safe_is_empty (rli->pending_statics))
999 {
1000 fprintf (stderr, "pending statics:\n");
1001 debug (rli->pending_statics);
1002 }
1003 }
1004
1005 /* Given an RLI with a possibly-incremented BITPOS, adjust OFFSET and
1006 BITPOS if necessary to keep BITPOS below OFFSET_ALIGN. */
1007
1008 void
1009 normalize_rli (record_layout_info rli)
1010 {
1011 normalize_offset (&rli->offset, &rli->bitpos, rli->offset_align);
1012 }
1013
1014 /* Returns the size in bytes allocated so far. */
1015
1016 tree
1017 rli_size_unit_so_far (record_layout_info rli)
1018 {
1019 return byte_from_pos (rli->offset, rli->bitpos);
1020 }
1021
1022 /* Returns the size in bits allocated so far. */
1023
1024 tree
1025 rli_size_so_far (record_layout_info rli)
1026 {
1027 return bit_from_pos (rli->offset, rli->bitpos);
1028 }
1029
1030 /* FIELD is about to be added to RLI->T. The alignment (in bits) of
1031 the next available location within the record is given by KNOWN_ALIGN.
1032 Update the variable alignment fields in RLI, and return the alignment
1033 to give the FIELD. */
1034
1035 unsigned int
1036 update_alignment_for_field (record_layout_info rli, tree field,
1037 unsigned int known_align)
1038 {
1039 /* The alignment required for FIELD. */
1040 unsigned int desired_align;
1041 /* The type of this field. */
1042 tree type = TREE_TYPE (field);
1043 /* True if the field was explicitly aligned by the user. */
1044 bool user_align;
1045 bool is_bitfield;
1046
1047 /* Do not attempt to align an ERROR_MARK node */
1048 if (TREE_CODE (type) == ERROR_MARK)
1049 return 0;
1050
1051 /* Lay out the field so we know what alignment it needs. */
1052 layout_decl (field, known_align);
1053 desired_align = DECL_ALIGN (field);
1054 user_align = DECL_USER_ALIGN (field);
1055
1056 is_bitfield = (type != error_mark_node
1057 && DECL_BIT_FIELD_TYPE (field)
1058 && ! integer_zerop (TYPE_SIZE (type)));
1059
1060 /* Record must have at least as much alignment as any field.
1061 Otherwise, the alignment of the field within the record is
1062 meaningless. */
1063 if (targetm.ms_bitfield_layout_p (rli->t))
1064 {
1065 /* Here, the alignment of the underlying type of a bitfield can
1066 affect the alignment of a record; even a zero-sized field
1067 can do this. The alignment should be to the alignment of
1068 the type, except that for zero-size bitfields this only
1069 applies if there was an immediately prior, nonzero-size
1070 bitfield. (That's the way it is, experimentally.) */
1071 if (!is_bitfield
1072 || ((DECL_SIZE (field) == NULL_TREE
1073 || !integer_zerop (DECL_SIZE (field)))
1074 ? !DECL_PACKED (field)
1075 : (rli->prev_field
1076 && DECL_BIT_FIELD_TYPE (rli->prev_field)
1077 && ! integer_zerop (DECL_SIZE (rli->prev_field)))))
1078 {
1079 unsigned int type_align = TYPE_ALIGN (type);
1080 if (!is_bitfield && DECL_PACKED (field))
1081 type_align = desired_align;
1082 else
1083 type_align = MAX (type_align, desired_align);
1084 if (maximum_field_alignment != 0)
1085 type_align = MIN (type_align, maximum_field_alignment);
1086 rli->record_align = MAX (rli->record_align, type_align);
1087 rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type));
1088 }
1089 }
1090 else if (is_bitfield && PCC_BITFIELD_TYPE_MATTERS)
1091 {
1092 /* Named bit-fields cause the entire structure to have the
1093 alignment implied by their type. Some targets also apply the same
1094 rules to unnamed bitfields. */
1095 if (DECL_NAME (field) != 0
1096 || targetm.align_anon_bitfield ())
1097 {
1098 unsigned int type_align = TYPE_ALIGN (type);
1099
1100 #ifdef ADJUST_FIELD_ALIGN
1101 if (! TYPE_USER_ALIGN (type))
1102 type_align = ADJUST_FIELD_ALIGN (field, type, type_align);
1103 #endif
1104
1105 /* Targets might chose to handle unnamed and hence possibly
1106 zero-width bitfield. Those are not influenced by #pragmas
1107 or packed attributes. */
1108 if (integer_zerop (DECL_SIZE (field)))
1109 {
1110 if (initial_max_fld_align)
1111 type_align = MIN (type_align,
1112 initial_max_fld_align * BITS_PER_UNIT);
1113 }
1114 else if (maximum_field_alignment != 0)
1115 type_align = MIN (type_align, maximum_field_alignment);
1116 else if (DECL_PACKED (field))
1117 type_align = MIN (type_align, BITS_PER_UNIT);
1118
1119 /* The alignment of the record is increased to the maximum
1120 of the current alignment, the alignment indicated on the
1121 field (i.e., the alignment specified by an __aligned__
1122 attribute), and the alignment indicated by the type of
1123 the field. */
1124 rli->record_align = MAX (rli->record_align, desired_align);
1125 rli->record_align = MAX (rli->record_align, type_align);
1126
1127 if (warn_packed)
1128 rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type));
1129 user_align |= TYPE_USER_ALIGN (type);
1130 }
1131 }
1132 else
1133 {
1134 rli->record_align = MAX (rli->record_align, desired_align);
1135 rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type));
1136 }
1137
1138 TYPE_USER_ALIGN (rli->t) |= user_align;
1139
1140 return desired_align;
1141 }
1142
1143 /* Issue a warning if the record alignment, RECORD_ALIGN, is less than
1144 the field alignment of FIELD or FIELD isn't aligned. */
1145
1146 static void
1147 handle_warn_if_not_align (tree field, unsigned int record_align)
1148 {
1149 tree type = TREE_TYPE (field);
1150
1151 if (type == error_mark_node)
1152 return;
1153
1154 unsigned int warn_if_not_align = 0;
1155
1156 int opt_w = 0;
1157
1158 if (warn_if_not_aligned)
1159 {
1160 warn_if_not_align = DECL_WARN_IF_NOT_ALIGN (field);
1161 if (!warn_if_not_align)
1162 warn_if_not_align = TYPE_WARN_IF_NOT_ALIGN (type);
1163 if (warn_if_not_align)
1164 opt_w = OPT_Wif_not_aligned;
1165 }
1166
1167 if (!warn_if_not_align
1168 && warn_packed_not_aligned
1169 && lookup_attribute ("aligned", TYPE_ATTRIBUTES (type)))
1170 {
1171 warn_if_not_align = TYPE_ALIGN (type);
1172 opt_w = OPT_Wpacked_not_aligned;
1173 }
1174
1175 if (!warn_if_not_align)
1176 return;
1177
1178 tree context = DECL_CONTEXT (field);
1179
1180 warn_if_not_align /= BITS_PER_UNIT;
1181 record_align /= BITS_PER_UNIT;
1182 if ((record_align % warn_if_not_align) != 0)
1183 warning (opt_w, "alignment %u of %qT is less than %u",
1184 record_align, context, warn_if_not_align);
1185
1186 tree off = byte_position (field);
1187 if (!multiple_of_p (TREE_TYPE (off), off, size_int (warn_if_not_align)))
1188 {
1189 if (TREE_CODE (off) == INTEGER_CST)
1190 warning (opt_w, "%q+D offset %E in %qT isn%'t aligned to %u",
1191 field, off, context, warn_if_not_align);
1192 else
1193 warning (opt_w, "%q+D offset %E in %qT may not be aligned to %u",
1194 field, off, context, warn_if_not_align);
1195 }
1196 }
1197
1198 /* Called from place_field to handle unions. */
1199
1200 static void
1201 place_union_field (record_layout_info rli, tree field)
1202 {
1203 update_alignment_for_field (rli, field, /*known_align=*/0);
1204
1205 DECL_FIELD_OFFSET (field) = size_zero_node;
1206 DECL_FIELD_BIT_OFFSET (field) = bitsize_zero_node;
1207 SET_DECL_OFFSET_ALIGN (field, BIGGEST_ALIGNMENT);
1208 handle_warn_if_not_align (field, rli->record_align);
1209
1210 /* If this is an ERROR_MARK return *after* having set the
1211 field at the start of the union. This helps when parsing
1212 invalid fields. */
1213 if (TREE_CODE (TREE_TYPE (field)) == ERROR_MARK)
1214 return;
1215
1216 if (AGGREGATE_TYPE_P (TREE_TYPE (field))
1217 && TYPE_TYPELESS_STORAGE (TREE_TYPE (field)))
1218 TYPE_TYPELESS_STORAGE (rli->t) = 1;
1219
1220 /* We assume the union's size will be a multiple of a byte so we don't
1221 bother with BITPOS. */
1222 if (TREE_CODE (rli->t) == UNION_TYPE)
1223 rli->offset = size_binop (MAX_EXPR, rli->offset, DECL_SIZE_UNIT (field));
1224 else if (TREE_CODE (rli->t) == QUAL_UNION_TYPE)
1225 rli->offset = fold_build3 (COND_EXPR, sizetype, DECL_QUALIFIER (field),
1226 DECL_SIZE_UNIT (field), rli->offset);
1227 }
1228
1229 /* A bitfield of SIZE with a required access alignment of ALIGN is allocated
1230 at BYTE_OFFSET / BIT_OFFSET. Return nonzero if the field would span more
1231 units of alignment than the underlying TYPE. */
1232 static int
1233 excess_unit_span (HOST_WIDE_INT byte_offset, HOST_WIDE_INT bit_offset,
1234 HOST_WIDE_INT size, HOST_WIDE_INT align, tree type)
1235 {
1236 /* Note that the calculation of OFFSET might overflow; we calculate it so
1237 that we still get the right result as long as ALIGN is a power of two. */
1238 unsigned HOST_WIDE_INT offset = byte_offset * BITS_PER_UNIT + bit_offset;
1239
1240 offset = offset % align;
1241 return ((offset + size + align - 1) / align
1242 > tree_to_uhwi (TYPE_SIZE (type)) / align);
1243 }
1244
1245 /* RLI contains information about the layout of a RECORD_TYPE. FIELD
1246 is a FIELD_DECL to be added after those fields already present in
1247 T. (FIELD is not actually added to the TYPE_FIELDS list here;
1248 callers that desire that behavior must manually perform that step.) */
1249
1250 void
1251 place_field (record_layout_info rli, tree field)
1252 {
1253 /* The alignment required for FIELD. */
1254 unsigned int desired_align;
1255 /* The alignment FIELD would have if we just dropped it into the
1256 record as it presently stands. */
1257 unsigned int known_align;
1258 unsigned int actual_align;
1259 /* The type of this field. */
1260 tree type = TREE_TYPE (field);
1261
1262 gcc_assert (TREE_CODE (field) != ERROR_MARK);
1263
1264 /* If FIELD is static, then treat it like a separate variable, not
1265 really like a structure field. If it is a FUNCTION_DECL, it's a
1266 method. In both cases, all we do is lay out the decl, and we do
1267 it *after* the record is laid out. */
1268 if (VAR_P (field))
1269 {
1270 vec_safe_push (rli->pending_statics, field);
1271 return;
1272 }
1273
1274 /* Enumerators and enum types which are local to this class need not
1275 be laid out. Likewise for initialized constant fields. */
1276 else if (TREE_CODE (field) != FIELD_DECL)
1277 return;
1278
1279 /* Unions are laid out very differently than records, so split
1280 that code off to another function. */
1281 else if (TREE_CODE (rli->t) != RECORD_TYPE)
1282 {
1283 place_union_field (rli, field);
1284 return;
1285 }
1286
1287 else if (TREE_CODE (type) == ERROR_MARK)
1288 {
1289 /* Place this field at the current allocation position, so we
1290 maintain monotonicity. */
1291 DECL_FIELD_OFFSET (field) = rli->offset;
1292 DECL_FIELD_BIT_OFFSET (field) = rli->bitpos;
1293 SET_DECL_OFFSET_ALIGN (field, rli->offset_align);
1294 handle_warn_if_not_align (field, rli->record_align);
1295 return;
1296 }
1297
1298 if (AGGREGATE_TYPE_P (type)
1299 && TYPE_TYPELESS_STORAGE (type))
1300 TYPE_TYPELESS_STORAGE (rli->t) = 1;
1301
1302 /* Work out the known alignment so far. Note that A & (-A) is the
1303 value of the least-significant bit in A that is one. */
1304 if (! integer_zerop (rli->bitpos))
1305 known_align = least_bit_hwi (tree_to_uhwi (rli->bitpos));
1306 else if (integer_zerop (rli->offset))
1307 known_align = 0;
1308 else if (tree_fits_uhwi_p (rli->offset))
1309 known_align = (BITS_PER_UNIT
1310 * least_bit_hwi (tree_to_uhwi (rli->offset)));
1311 else
1312 known_align = rli->offset_align;
1313
1314 desired_align = update_alignment_for_field (rli, field, known_align);
1315 if (known_align == 0)
1316 known_align = MAX (BIGGEST_ALIGNMENT, rli->record_align);
1317
1318 if (warn_packed && DECL_PACKED (field))
1319 {
1320 if (known_align >= TYPE_ALIGN (type))
1321 {
1322 if (TYPE_ALIGN (type) > desired_align)
1323 {
1324 if (STRICT_ALIGNMENT)
1325 warning (OPT_Wattributes, "packed attribute causes "
1326 "inefficient alignment for %q+D", field);
1327 /* Don't warn if DECL_PACKED was set by the type. */
1328 else if (!TYPE_PACKED (rli->t))
1329 warning (OPT_Wattributes, "packed attribute is "
1330 "unnecessary for %q+D", field);
1331 }
1332 }
1333 else
1334 rli->packed_maybe_necessary = 1;
1335 }
1336
1337 /* Does this field automatically have alignment it needs by virtue
1338 of the fields that precede it and the record's own alignment? */
1339 if (known_align < desired_align
1340 && (! targetm.ms_bitfield_layout_p (rli->t)
1341 || rli->prev_field == NULL))
1342 {
1343 /* No, we need to skip space before this field.
1344 Bump the cumulative size to multiple of field alignment. */
1345
1346 if (!targetm.ms_bitfield_layout_p (rli->t)
1347 && DECL_SOURCE_LOCATION (field) != BUILTINS_LOCATION
1348 && !TYPE_ARTIFICIAL (rli->t))
1349 warning (OPT_Wpadded, "padding struct to align %q+D", field);
1350
1351 /* If the alignment is still within offset_align, just align
1352 the bit position. */
1353 if (desired_align < rli->offset_align)
1354 rli->bitpos = round_up (rli->bitpos, desired_align);
1355 else
1356 {
1357 /* First adjust OFFSET by the partial bits, then align. */
1358 rli->offset
1359 = size_binop (PLUS_EXPR, rli->offset,
1360 fold_convert (sizetype,
1361 size_binop (CEIL_DIV_EXPR, rli->bitpos,
1362 bitsize_unit_node)));
1363 rli->bitpos = bitsize_zero_node;
1364
1365 rli->offset = round_up (rli->offset, desired_align / BITS_PER_UNIT);
1366 }
1367
1368 if (! TREE_CONSTANT (rli->offset))
1369 rli->offset_align = desired_align;
1370 }
1371
1372 /* Handle compatibility with PCC. Note that if the record has any
1373 variable-sized fields, we need not worry about compatibility. */
1374 if (PCC_BITFIELD_TYPE_MATTERS
1375 && ! targetm.ms_bitfield_layout_p (rli->t)
1376 && TREE_CODE (field) == FIELD_DECL
1377 && type != error_mark_node
1378 && DECL_BIT_FIELD (field)
1379 && (! DECL_PACKED (field)
1380 /* Enter for these packed fields only to issue a warning. */
1381 || TYPE_ALIGN (type) <= BITS_PER_UNIT)
1382 && maximum_field_alignment == 0
1383 && ! integer_zerop (DECL_SIZE (field))
1384 && tree_fits_uhwi_p (DECL_SIZE (field))
1385 && tree_fits_uhwi_p (rli->offset)
1386 && tree_fits_uhwi_p (TYPE_SIZE (type)))
1387 {
1388 unsigned int type_align = TYPE_ALIGN (type);
1389 tree dsize = DECL_SIZE (field);
1390 HOST_WIDE_INT field_size = tree_to_uhwi (dsize);
1391 HOST_WIDE_INT offset = tree_to_uhwi (rli->offset);
1392 HOST_WIDE_INT bit_offset = tree_to_shwi (rli->bitpos);
1393
1394 #ifdef ADJUST_FIELD_ALIGN
1395 if (! TYPE_USER_ALIGN (type))
1396 type_align = ADJUST_FIELD_ALIGN (field, type, type_align);
1397 #endif
1398
1399 /* A bit field may not span more units of alignment of its type
1400 than its type itself. Advance to next boundary if necessary. */
1401 if (excess_unit_span (offset, bit_offset, field_size, type_align, type))
1402 {
1403 if (DECL_PACKED (field))
1404 {
1405 if (warn_packed_bitfield_compat == 1)
1406 inform
1407 (input_location,
1408 "offset of packed bit-field %qD has changed in GCC 4.4",
1409 field);
1410 }
1411 else
1412 rli->bitpos = round_up (rli->bitpos, type_align);
1413 }
1414
1415 if (! DECL_PACKED (field))
1416 TYPE_USER_ALIGN (rli->t) |= TYPE_USER_ALIGN (type);
1417
1418 SET_TYPE_WARN_IF_NOT_ALIGN (rli->t,
1419 TYPE_WARN_IF_NOT_ALIGN (type));
1420 }
1421
1422 #ifdef BITFIELD_NBYTES_LIMITED
1423 if (BITFIELD_NBYTES_LIMITED
1424 && ! targetm.ms_bitfield_layout_p (rli->t)
1425 && TREE_CODE (field) == FIELD_DECL
1426 && type != error_mark_node
1427 && DECL_BIT_FIELD_TYPE (field)
1428 && ! DECL_PACKED (field)
1429 && ! integer_zerop (DECL_SIZE (field))
1430 && tree_fits_uhwi_p (DECL_SIZE (field))
1431 && tree_fits_uhwi_p (rli->offset)
1432 && tree_fits_uhwi_p (TYPE_SIZE (type)))
1433 {
1434 unsigned int type_align = TYPE_ALIGN (type);
1435 tree dsize = DECL_SIZE (field);
1436 HOST_WIDE_INT field_size = tree_to_uhwi (dsize);
1437 HOST_WIDE_INT offset = tree_to_uhwi (rli->offset);
1438 HOST_WIDE_INT bit_offset = tree_to_shwi (rli->bitpos);
1439
1440 #ifdef ADJUST_FIELD_ALIGN
1441 if (! TYPE_USER_ALIGN (type))
1442 type_align = ADJUST_FIELD_ALIGN (field, type, type_align);
1443 #endif
1444
1445 if (maximum_field_alignment != 0)
1446 type_align = MIN (type_align, maximum_field_alignment);
1447 /* ??? This test is opposite the test in the containing if
1448 statement, so this code is unreachable currently. */
1449 else if (DECL_PACKED (field))
1450 type_align = MIN (type_align, BITS_PER_UNIT);
1451
1452 /* A bit field may not span the unit of alignment of its type.
1453 Advance to next boundary if necessary. */
1454 if (excess_unit_span (offset, bit_offset, field_size, type_align, type))
1455 rli->bitpos = round_up (rli->bitpos, type_align);
1456
1457 TYPE_USER_ALIGN (rli->t) |= TYPE_USER_ALIGN (type);
1458 SET_TYPE_WARN_IF_NOT_ALIGN (rli->t,
1459 TYPE_WARN_IF_NOT_ALIGN (type));
1460 }
1461 #endif
1462
1463 /* See the docs for TARGET_MS_BITFIELD_LAYOUT_P for details.
1464 A subtlety:
1465 When a bit field is inserted into a packed record, the whole
1466 size of the underlying type is used by one or more same-size
1467 adjacent bitfields. (That is, if its long:3, 32 bits is
1468 used in the record, and any additional adjacent long bitfields are
1469 packed into the same chunk of 32 bits. However, if the size
1470 changes, a new field of that size is allocated.) In an unpacked
1471 record, this is the same as using alignment, but not equivalent
1472 when packing.
1473
1474 Note: for compatibility, we use the type size, not the type alignment
1475 to determine alignment, since that matches the documentation */
1476
1477 if (targetm.ms_bitfield_layout_p (rli->t))
1478 {
1479 tree prev_saved = rli->prev_field;
1480 tree prev_type = prev_saved ? DECL_BIT_FIELD_TYPE (prev_saved) : NULL;
1481
1482 /* This is a bitfield if it exists. */
1483 if (rli->prev_field)
1484 {
1485 bool realign_p = known_align < desired_align;
1486
1487 /* If both are bitfields, nonzero, and the same size, this is
1488 the middle of a run. Zero declared size fields are special
1489 and handled as "end of run". (Note: it's nonzero declared
1490 size, but equal type sizes!) (Since we know that both
1491 the current and previous fields are bitfields by the
1492 time we check it, DECL_SIZE must be present for both.) */
1493 if (DECL_BIT_FIELD_TYPE (field)
1494 && !integer_zerop (DECL_SIZE (field))
1495 && !integer_zerop (DECL_SIZE (rli->prev_field))
1496 && tree_fits_shwi_p (DECL_SIZE (rli->prev_field))
1497 && tree_fits_uhwi_p (TYPE_SIZE (type))
1498 && simple_cst_equal (TYPE_SIZE (type), TYPE_SIZE (prev_type)))
1499 {
1500 /* We're in the middle of a run of equal type size fields; make
1501 sure we realign if we run out of bits. (Not decl size,
1502 type size!) */
1503 HOST_WIDE_INT bitsize = tree_to_uhwi (DECL_SIZE (field));
1504
1505 if (rli->remaining_in_alignment < bitsize)
1506 {
1507 HOST_WIDE_INT typesize = tree_to_uhwi (TYPE_SIZE (type));
1508
1509 /* out of bits; bump up to next 'word'. */
1510 rli->bitpos
1511 = size_binop (PLUS_EXPR, rli->bitpos,
1512 bitsize_int (rli->remaining_in_alignment));
1513 rli->prev_field = field;
1514 if (typesize < bitsize)
1515 rli->remaining_in_alignment = 0;
1516 else
1517 rli->remaining_in_alignment = typesize - bitsize;
1518 }
1519 else
1520 {
1521 rli->remaining_in_alignment -= bitsize;
1522 realign_p = false;
1523 }
1524 }
1525 else
1526 {
1527 /* End of a run: if leaving a run of bitfields of the same type
1528 size, we have to "use up" the rest of the bits of the type
1529 size.
1530
1531 Compute the new position as the sum of the size for the prior
1532 type and where we first started working on that type.
1533 Note: since the beginning of the field was aligned then
1534 of course the end will be too. No round needed. */
1535
1536 if (!integer_zerop (DECL_SIZE (rli->prev_field)))
1537 {
1538 rli->bitpos
1539 = size_binop (PLUS_EXPR, rli->bitpos,
1540 bitsize_int (rli->remaining_in_alignment));
1541 }
1542 else
1543 /* We "use up" size zero fields; the code below should behave
1544 as if the prior field was not a bitfield. */
1545 prev_saved = NULL;
1546
1547 /* Cause a new bitfield to be captured, either this time (if
1548 currently a bitfield) or next time we see one. */
1549 if (!DECL_BIT_FIELD_TYPE (field)
1550 || integer_zerop (DECL_SIZE (field)))
1551 rli->prev_field = NULL;
1552 }
1553
1554 /* Does this field automatically have alignment it needs by virtue
1555 of the fields that precede it and the record's own alignment? */
1556 if (realign_p)
1557 {
1558 /* If the alignment is still within offset_align, just align
1559 the bit position. */
1560 if (desired_align < rli->offset_align)
1561 rli->bitpos = round_up (rli->bitpos, desired_align);
1562 else
1563 {
1564 /* First adjust OFFSET by the partial bits, then align. */
1565 tree d = size_binop (CEIL_DIV_EXPR, rli->bitpos,
1566 bitsize_unit_node);
1567 rli->offset = size_binop (PLUS_EXPR, rli->offset,
1568 fold_convert (sizetype, d));
1569 rli->bitpos = bitsize_zero_node;
1570
1571 rli->offset = round_up (rli->offset,
1572 desired_align / BITS_PER_UNIT);
1573 }
1574
1575 if (! TREE_CONSTANT (rli->offset))
1576 rli->offset_align = desired_align;
1577 }
1578
1579 normalize_rli (rli);
1580 }
1581
1582 /* If we're starting a new run of same type size bitfields
1583 (or a run of non-bitfields), set up the "first of the run"
1584 fields.
1585
1586 That is, if the current field is not a bitfield, or if there
1587 was a prior bitfield the type sizes differ, or if there wasn't
1588 a prior bitfield the size of the current field is nonzero.
1589
1590 Note: we must be sure to test ONLY the type size if there was
1591 a prior bitfield and ONLY for the current field being zero if
1592 there wasn't. */
1593
1594 if (!DECL_BIT_FIELD_TYPE (field)
1595 || (prev_saved != NULL
1596 ? !simple_cst_equal (TYPE_SIZE (type), TYPE_SIZE (prev_type))
1597 : !integer_zerop (DECL_SIZE (field))))
1598 {
1599 /* Never smaller than a byte for compatibility. */
1600 unsigned int type_align = BITS_PER_UNIT;
1601
1602 /* (When not a bitfield), we could be seeing a flex array (with
1603 no DECL_SIZE). Since we won't be using remaining_in_alignment
1604 until we see a bitfield (and come by here again) we just skip
1605 calculating it. */
1606 if (DECL_SIZE (field) != NULL
1607 && tree_fits_uhwi_p (TYPE_SIZE (TREE_TYPE (field)))
1608 && tree_fits_uhwi_p (DECL_SIZE (field)))
1609 {
1610 unsigned HOST_WIDE_INT bitsize
1611 = tree_to_uhwi (DECL_SIZE (field));
1612 unsigned HOST_WIDE_INT typesize
1613 = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (field)));
1614
1615 if (typesize < bitsize)
1616 rli->remaining_in_alignment = 0;
1617 else
1618 rli->remaining_in_alignment = typesize - bitsize;
1619 }
1620
1621 /* Now align (conventionally) for the new type. */
1622 if (! DECL_PACKED (field))
1623 type_align = TYPE_ALIGN (TREE_TYPE (field));
1624
1625 if (maximum_field_alignment != 0)
1626 type_align = MIN (type_align, maximum_field_alignment);
1627
1628 rli->bitpos = round_up (rli->bitpos, type_align);
1629
1630 /* If we really aligned, don't allow subsequent bitfields
1631 to undo that. */
1632 rli->prev_field = NULL;
1633 }
1634 }
1635
1636 /* Offset so far becomes the position of this field after normalizing. */
1637 normalize_rli (rli);
1638 DECL_FIELD_OFFSET (field) = rli->offset;
1639 DECL_FIELD_BIT_OFFSET (field) = rli->bitpos;
1640 SET_DECL_OFFSET_ALIGN (field, rli->offset_align);
1641 handle_warn_if_not_align (field, rli->record_align);
1642
1643 /* Evaluate nonconstant offsets only once, either now or as soon as safe. */
1644 if (TREE_CODE (DECL_FIELD_OFFSET (field)) != INTEGER_CST)
1645 DECL_FIELD_OFFSET (field) = variable_size (DECL_FIELD_OFFSET (field));
1646
1647 /* If this field ended up more aligned than we thought it would be (we
1648 approximate this by seeing if its position changed), lay out the field
1649 again; perhaps we can use an integral mode for it now. */
1650 if (! integer_zerop (DECL_FIELD_BIT_OFFSET (field)))
1651 actual_align = least_bit_hwi (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field)));
1652 else if (integer_zerop (DECL_FIELD_OFFSET (field)))
1653 actual_align = MAX (BIGGEST_ALIGNMENT, rli->record_align);
1654 else if (tree_fits_uhwi_p (DECL_FIELD_OFFSET (field)))
1655 actual_align = (BITS_PER_UNIT
1656 * least_bit_hwi (tree_to_uhwi (DECL_FIELD_OFFSET (field))));
1657 else
1658 actual_align = DECL_OFFSET_ALIGN (field);
1659 /* ACTUAL_ALIGN is still the actual alignment *within the record* .
1660 store / extract bit field operations will check the alignment of the
1661 record against the mode of bit fields. */
1662
1663 if (known_align != actual_align)
1664 layout_decl (field, actual_align);
1665
1666 if (rli->prev_field == NULL && DECL_BIT_FIELD_TYPE (field))
1667 rli->prev_field = field;
1668
1669 /* Now add size of this field to the size of the record. If the size is
1670 not constant, treat the field as being a multiple of bytes and just
1671 adjust the offset, resetting the bit position. Otherwise, apportion the
1672 size amongst the bit position and offset. First handle the case of an
1673 unspecified size, which can happen when we have an invalid nested struct
1674 definition, such as struct j { struct j { int i; } }. The error message
1675 is printed in finish_struct. */
1676 if (DECL_SIZE (field) == 0)
1677 /* Do nothing. */;
1678 else if (TREE_CODE (DECL_SIZE (field)) != INTEGER_CST
1679 || TREE_OVERFLOW (DECL_SIZE (field)))
1680 {
1681 rli->offset
1682 = size_binop (PLUS_EXPR, rli->offset,
1683 fold_convert (sizetype,
1684 size_binop (CEIL_DIV_EXPR, rli->bitpos,
1685 bitsize_unit_node)));
1686 rli->offset
1687 = size_binop (PLUS_EXPR, rli->offset, DECL_SIZE_UNIT (field));
1688 rli->bitpos = bitsize_zero_node;
1689 rli->offset_align = MIN (rli->offset_align, desired_align);
1690
1691 if (!multiple_of_p (bitsizetype, DECL_SIZE (field),
1692 bitsize_int (rli->offset_align)))
1693 {
1694 tree type = strip_array_types (TREE_TYPE (field));
1695 /* The above adjusts offset_align just based on the start of the
1696 field. The field might not have a size that is a multiple of
1697 that offset_align though. If the field is an array of fixed
1698 sized elements, assume there can be any multiple of those
1699 sizes. If it is a variable length aggregate or array of
1700 variable length aggregates, assume worst that the end is
1701 just BITS_PER_UNIT aligned. */
1702 if (TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
1703 {
1704 if (TREE_INT_CST_LOW (TYPE_SIZE (type)))
1705 {
1706 unsigned HOST_WIDE_INT sz
1707 = least_bit_hwi (TREE_INT_CST_LOW (TYPE_SIZE (type)));
1708 rli->offset_align = MIN (rli->offset_align, sz);
1709 }
1710 }
1711 else
1712 rli->offset_align = MIN (rli->offset_align, BITS_PER_UNIT);
1713 }
1714 }
1715 else if (targetm.ms_bitfield_layout_p (rli->t))
1716 {
1717 rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos, DECL_SIZE (field));
1718
1719 /* If FIELD is the last field and doesn't end at the full length
1720 of the type then pad the struct out to the full length of the
1721 last type. */
1722 if (DECL_BIT_FIELD_TYPE (field)
1723 && !integer_zerop (DECL_SIZE (field)))
1724 {
1725 /* We have to scan, because non-field DECLS are also here. */
1726 tree probe = field;
1727 while ((probe = DECL_CHAIN (probe)))
1728 if (TREE_CODE (probe) == FIELD_DECL)
1729 break;
1730 if (!probe)
1731 rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos,
1732 bitsize_int (rli->remaining_in_alignment));
1733 }
1734
1735 normalize_rli (rli);
1736 }
1737 else
1738 {
1739 rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos, DECL_SIZE (field));
1740 normalize_rli (rli);
1741 }
1742 }
1743
1744 /* Assuming that all the fields have been laid out, this function uses
1745 RLI to compute the final TYPE_SIZE, TYPE_ALIGN, etc. for the type
1746 indicated by RLI. */
1747
1748 static void
1749 finalize_record_size (record_layout_info rli)
1750 {
1751 tree unpadded_size, unpadded_size_unit;
1752
1753 /* Now we want just byte and bit offsets, so set the offset alignment
1754 to be a byte and then normalize. */
1755 rli->offset_align = BITS_PER_UNIT;
1756 normalize_rli (rli);
1757
1758 /* Determine the desired alignment. */
1759 #ifdef ROUND_TYPE_ALIGN
1760 SET_TYPE_ALIGN (rli->t, ROUND_TYPE_ALIGN (rli->t, TYPE_ALIGN (rli->t),
1761 rli->record_align));
1762 #else
1763 SET_TYPE_ALIGN (rli->t, MAX (TYPE_ALIGN (rli->t), rli->record_align));
1764 #endif
1765
1766 /* Compute the size so far. Be sure to allow for extra bits in the
1767 size in bytes. We have guaranteed above that it will be no more
1768 than a single byte. */
1769 unpadded_size = rli_size_so_far (rli);
1770 unpadded_size_unit = rli_size_unit_so_far (rli);
1771 if (! integer_zerop (rli->bitpos))
1772 unpadded_size_unit
1773 = size_binop (PLUS_EXPR, unpadded_size_unit, size_one_node);
1774
1775 /* Round the size up to be a multiple of the required alignment. */
1776 TYPE_SIZE (rli->t) = round_up (unpadded_size, TYPE_ALIGN (rli->t));
1777 TYPE_SIZE_UNIT (rli->t)
1778 = round_up (unpadded_size_unit, TYPE_ALIGN_UNIT (rli->t));
1779
1780 if (TREE_CONSTANT (unpadded_size)
1781 && simple_cst_equal (unpadded_size, TYPE_SIZE (rli->t)) == 0
1782 && input_location != BUILTINS_LOCATION
1783 && !TYPE_ARTIFICIAL (rli->t))
1784 warning (OPT_Wpadded, "padding struct size to alignment boundary");
1785
1786 if (warn_packed && TREE_CODE (rli->t) == RECORD_TYPE
1787 && TYPE_PACKED (rli->t) && ! rli->packed_maybe_necessary
1788 && TREE_CONSTANT (unpadded_size))
1789 {
1790 tree unpacked_size;
1791
1792 #ifdef ROUND_TYPE_ALIGN
1793 rli->unpacked_align
1794 = ROUND_TYPE_ALIGN (rli->t, TYPE_ALIGN (rli->t), rli->unpacked_align);
1795 #else
1796 rli->unpacked_align = MAX (TYPE_ALIGN (rli->t), rli->unpacked_align);
1797 #endif
1798
1799 unpacked_size = round_up (TYPE_SIZE (rli->t), rli->unpacked_align);
1800 if (simple_cst_equal (unpacked_size, TYPE_SIZE (rli->t)))
1801 {
1802 if (TYPE_NAME (rli->t))
1803 {
1804 tree name;
1805
1806 if (TREE_CODE (TYPE_NAME (rli->t)) == IDENTIFIER_NODE)
1807 name = TYPE_NAME (rli->t);
1808 else
1809 name = DECL_NAME (TYPE_NAME (rli->t));
1810
1811 if (STRICT_ALIGNMENT)
1812 warning (OPT_Wpacked, "packed attribute causes inefficient "
1813 "alignment for %qE", name);
1814 else
1815 warning (OPT_Wpacked,
1816 "packed attribute is unnecessary for %qE", name);
1817 }
1818 else
1819 {
1820 if (STRICT_ALIGNMENT)
1821 warning (OPT_Wpacked,
1822 "packed attribute causes inefficient alignment");
1823 else
1824 warning (OPT_Wpacked, "packed attribute is unnecessary");
1825 }
1826 }
1827 }
1828 }
1829
1830 /* Compute the TYPE_MODE for the TYPE (which is a RECORD_TYPE). */
1831
1832 void
1833 compute_record_mode (tree type)
1834 {
1835 tree field;
1836 machine_mode mode = VOIDmode;
1837
1838 /* Most RECORD_TYPEs have BLKmode, so we start off assuming that.
1839 However, if possible, we use a mode that fits in a register
1840 instead, in order to allow for better optimization down the
1841 line. */
1842 SET_TYPE_MODE (type, BLKmode);
1843
1844 poly_uint64 type_size;
1845 if (!poly_int_tree_p (TYPE_SIZE (type), &type_size))
1846 return;
1847
1848 /* A record which has any BLKmode members must itself be
1849 BLKmode; it can't go in a register. Unless the member is
1850 BLKmode only because it isn't aligned. */
1851 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
1852 {
1853 if (TREE_CODE (field) != FIELD_DECL)
1854 continue;
1855
1856 poly_uint64 field_size;
1857 if (TREE_CODE (TREE_TYPE (field)) == ERROR_MARK
1858 || (TYPE_MODE (TREE_TYPE (field)) == BLKmode
1859 && ! TYPE_NO_FORCE_BLK (TREE_TYPE (field))
1860 && !(TYPE_SIZE (TREE_TYPE (field)) != 0
1861 && integer_zerop (TYPE_SIZE (TREE_TYPE (field)))))
1862 || !tree_fits_poly_uint64_p (bit_position (field))
1863 || DECL_SIZE (field) == 0
1864 || !poly_int_tree_p (DECL_SIZE (field), &field_size))
1865 return;
1866
1867 /* If this field is the whole struct, remember its mode so
1868 that, say, we can put a double in a class into a DF
1869 register instead of forcing it to live in the stack. */
1870 if (known_eq (field_size, type_size)
1871 /* Partial int types (e.g. __int20) may have TYPE_SIZE equal to
1872 wider types (e.g. int32), despite precision being less. Ensure
1873 that the TYPE_MODE of the struct does not get set to the partial
1874 int mode if there is a wider type also in the struct. */
1875 && known_gt (GET_MODE_PRECISION (DECL_MODE (field)),
1876 GET_MODE_PRECISION (mode)))
1877 mode = DECL_MODE (field);
1878
1879 /* With some targets, it is sub-optimal to access an aligned
1880 BLKmode structure as a scalar. */
1881 if (targetm.member_type_forces_blk (field, mode))
1882 return;
1883 }
1884
1885 /* If we only have one real field; use its mode if that mode's size
1886 matches the type's size. This generally only applies to RECORD_TYPE.
1887 For UNION_TYPE, if the widest field is MODE_INT then use that mode.
1888 If the widest field is MODE_PARTIAL_INT, and the union will be passed
1889 by reference, then use that mode. */
1890 if ((TREE_CODE (type) == RECORD_TYPE
1891 || (TREE_CODE (type) == UNION_TYPE
1892 && (GET_MODE_CLASS (mode) == MODE_INT
1893 || (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT
1894 && (targetm.calls.pass_by_reference
1895 (pack_cumulative_args (0),
1896 function_arg_info (type, mode, /*named=*/false)))))))
1897 && mode != VOIDmode
1898 && known_eq (GET_MODE_BITSIZE (mode), type_size))
1899 ;
1900 else
1901 mode = mode_for_size_tree (TYPE_SIZE (type), MODE_INT, 1).else_blk ();
1902
1903 /* If structure's known alignment is less than what the scalar
1904 mode would need, and it matters, then stick with BLKmode. */
1905 if (mode != BLKmode
1906 && STRICT_ALIGNMENT
1907 && ! (TYPE_ALIGN (type) >= BIGGEST_ALIGNMENT
1908 || TYPE_ALIGN (type) >= GET_MODE_ALIGNMENT (mode)))
1909 {
1910 /* If this is the only reason this type is BLKmode, then
1911 don't force containing types to be BLKmode. */
1912 TYPE_NO_FORCE_BLK (type) = 1;
1913 mode = BLKmode;
1914 }
1915
1916 SET_TYPE_MODE (type, mode);
1917 }
1918
1919 /* Compute TYPE_SIZE and TYPE_ALIGN for TYPE, once it has been laid
1920 out. */
1921
1922 static void
1923 finalize_type_size (tree type)
1924 {
1925 /* Normally, use the alignment corresponding to the mode chosen.
1926 However, where strict alignment is not required, avoid
1927 over-aligning structures, since most compilers do not do this
1928 alignment. */
1929 if (TYPE_MODE (type) != BLKmode
1930 && TYPE_MODE (type) != VOIDmode
1931 && (STRICT_ALIGNMENT || !AGGREGATE_TYPE_P (type)))
1932 {
1933 unsigned mode_align = GET_MODE_ALIGNMENT (TYPE_MODE (type));
1934
1935 /* Don't override a larger alignment requirement coming from a user
1936 alignment of one of the fields. */
1937 if (mode_align >= TYPE_ALIGN (type))
1938 {
1939 SET_TYPE_ALIGN (type, mode_align);
1940 TYPE_USER_ALIGN (type) = 0;
1941 }
1942 }
1943
1944 /* Do machine-dependent extra alignment. */
1945 #ifdef ROUND_TYPE_ALIGN
1946 SET_TYPE_ALIGN (type,
1947 ROUND_TYPE_ALIGN (type, TYPE_ALIGN (type), BITS_PER_UNIT));
1948 #endif
1949
1950 /* If we failed to find a simple way to calculate the unit size
1951 of the type, find it by division. */
1952 if (TYPE_SIZE_UNIT (type) == 0 && TYPE_SIZE (type) != 0)
1953 /* TYPE_SIZE (type) is computed in bitsizetype. After the division, the
1954 result will fit in sizetype. We will get more efficient code using
1955 sizetype, so we force a conversion. */
1956 TYPE_SIZE_UNIT (type)
1957 = fold_convert (sizetype,
1958 size_binop (FLOOR_DIV_EXPR, TYPE_SIZE (type),
1959 bitsize_unit_node));
1960
1961 if (TYPE_SIZE (type) != 0)
1962 {
1963 TYPE_SIZE (type) = round_up (TYPE_SIZE (type), TYPE_ALIGN (type));
1964 TYPE_SIZE_UNIT (type)
1965 = round_up (TYPE_SIZE_UNIT (type), TYPE_ALIGN_UNIT (type));
1966 }
1967
1968 /* Evaluate nonconstant sizes only once, either now or as soon as safe. */
1969 if (TYPE_SIZE (type) != 0 && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
1970 TYPE_SIZE (type) = variable_size (TYPE_SIZE (type));
1971 if (TYPE_SIZE_UNIT (type) != 0
1972 && TREE_CODE (TYPE_SIZE_UNIT (type)) != INTEGER_CST)
1973 TYPE_SIZE_UNIT (type) = variable_size (TYPE_SIZE_UNIT (type));
1974
1975 /* Handle empty records as per the x86-64 psABI. */
1976 TYPE_EMPTY_P (type) = targetm.calls.empty_record_p (type);
1977
1978 /* Also layout any other variants of the type. */
1979 if (TYPE_NEXT_VARIANT (type)
1980 || type != TYPE_MAIN_VARIANT (type))
1981 {
1982 tree variant;
1983 /* Record layout info of this variant. */
1984 tree size = TYPE_SIZE (type);
1985 tree size_unit = TYPE_SIZE_UNIT (type);
1986 unsigned int align = TYPE_ALIGN (type);
1987 unsigned int precision = TYPE_PRECISION (type);
1988 unsigned int user_align = TYPE_USER_ALIGN (type);
1989 machine_mode mode = TYPE_MODE (type);
1990 bool empty_p = TYPE_EMPTY_P (type);
1991
1992 /* Copy it into all variants. */
1993 for (variant = TYPE_MAIN_VARIANT (type);
1994 variant != 0;
1995 variant = TYPE_NEXT_VARIANT (variant))
1996 {
1997 TYPE_SIZE (variant) = size;
1998 TYPE_SIZE_UNIT (variant) = size_unit;
1999 unsigned valign = align;
2000 if (TYPE_USER_ALIGN (variant))
2001 valign = MAX (valign, TYPE_ALIGN (variant));
2002 else
2003 TYPE_USER_ALIGN (variant) = user_align;
2004 SET_TYPE_ALIGN (variant, valign);
2005 TYPE_PRECISION (variant) = precision;
2006 SET_TYPE_MODE (variant, mode);
2007 TYPE_EMPTY_P (variant) = empty_p;
2008 }
2009 }
2010 }
2011
2012 /* Return a new underlying object for a bitfield started with FIELD. */
2013
2014 static tree
2015 start_bitfield_representative (tree field)
2016 {
2017 tree repr = make_node (FIELD_DECL);
2018 DECL_FIELD_OFFSET (repr) = DECL_FIELD_OFFSET (field);
2019 /* Force the representative to begin at a BITS_PER_UNIT aligned
2020 boundary - C++ may use tail-padding of a base object to
2021 continue packing bits so the bitfield region does not start
2022 at bit zero (see g++.dg/abi/bitfield5.C for example).
2023 Unallocated bits may happen for other reasons as well,
2024 for example Ada which allows explicit bit-granular structure layout. */
2025 DECL_FIELD_BIT_OFFSET (repr)
2026 = size_binop (BIT_AND_EXPR,
2027 DECL_FIELD_BIT_OFFSET (field),
2028 bitsize_int (~(BITS_PER_UNIT - 1)));
2029 SET_DECL_OFFSET_ALIGN (repr, DECL_OFFSET_ALIGN (field));
2030 DECL_SIZE (repr) = DECL_SIZE (field);
2031 DECL_SIZE_UNIT (repr) = DECL_SIZE_UNIT (field);
2032 DECL_PACKED (repr) = DECL_PACKED (field);
2033 DECL_CONTEXT (repr) = DECL_CONTEXT (field);
2034 /* There are no indirect accesses to this field. If we introduce
2035 some then they have to use the record alias set. This makes
2036 sure to properly conflict with [indirect] accesses to addressable
2037 fields of the bitfield group. */
2038 DECL_NONADDRESSABLE_P (repr) = 1;
2039 return repr;
2040 }
2041
2042 /* Finish up a bitfield group that was started by creating the underlying
2043 object REPR with the last field in the bitfield group FIELD. */
2044
2045 static void
2046 finish_bitfield_representative (tree repr, tree field)
2047 {
2048 unsigned HOST_WIDE_INT bitsize, maxbitsize;
2049 tree nextf, size;
2050
2051 size = size_diffop (DECL_FIELD_OFFSET (field),
2052 DECL_FIELD_OFFSET (repr));
2053 while (TREE_CODE (size) == COMPOUND_EXPR)
2054 size = TREE_OPERAND (size, 1);
2055 gcc_assert (tree_fits_uhwi_p (size));
2056 bitsize = (tree_to_uhwi (size) * BITS_PER_UNIT
2057 + tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field))
2058 - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr))
2059 + tree_to_uhwi (DECL_SIZE (field)));
2060
2061 /* Round up bitsize to multiples of BITS_PER_UNIT. */
2062 bitsize = (bitsize + BITS_PER_UNIT - 1) & ~(BITS_PER_UNIT - 1);
2063
2064 /* Now nothing tells us how to pad out bitsize ... */
2065 nextf = DECL_CHAIN (field);
2066 while (nextf && TREE_CODE (nextf) != FIELD_DECL)
2067 nextf = DECL_CHAIN (nextf);
2068 if (nextf)
2069 {
2070 tree maxsize;
2071 /* If there was an error, the field may be not laid out
2072 correctly. Don't bother to do anything. */
2073 if (TREE_TYPE (nextf) == error_mark_node)
2074 return;
2075 maxsize = size_diffop (DECL_FIELD_OFFSET (nextf),
2076 DECL_FIELD_OFFSET (repr));
2077 if (tree_fits_uhwi_p (maxsize))
2078 {
2079 maxbitsize = (tree_to_uhwi (maxsize) * BITS_PER_UNIT
2080 + tree_to_uhwi (DECL_FIELD_BIT_OFFSET (nextf))
2081 - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr)));
2082 /* If the group ends within a bitfield nextf does not need to be
2083 aligned to BITS_PER_UNIT. Thus round up. */
2084 maxbitsize = (maxbitsize + BITS_PER_UNIT - 1) & ~(BITS_PER_UNIT - 1);
2085 }
2086 else
2087 maxbitsize = bitsize;
2088 }
2089 else
2090 {
2091 /* Note that if the C++ FE sets up tail-padding to be re-used it
2092 creates a as-base variant of the type with TYPE_SIZE adjusted
2093 accordingly. So it is safe to include tail-padding here. */
2094 tree aggsize = lang_hooks.types.unit_size_without_reusable_padding
2095 (DECL_CONTEXT (field));
2096 tree maxsize = size_diffop (aggsize, DECL_FIELD_OFFSET (repr));
2097 /* We cannot generally rely on maxsize to fold to an integer constant,
2098 so use bitsize as fallback for this case. */
2099 if (tree_fits_uhwi_p (maxsize))
2100 maxbitsize = (tree_to_uhwi (maxsize) * BITS_PER_UNIT
2101 - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr)));
2102 else
2103 maxbitsize = bitsize;
2104 }
2105
2106 /* Only if we don't artificially break up the representative in
2107 the middle of a large bitfield with different possibly
2108 overlapping representatives. And all representatives start
2109 at byte offset. */
2110 gcc_assert (maxbitsize % BITS_PER_UNIT == 0);
2111
2112 /* Find the smallest nice mode to use. */
2113 opt_scalar_int_mode mode_iter;
2114 FOR_EACH_MODE_IN_CLASS (mode_iter, MODE_INT)
2115 if (GET_MODE_BITSIZE (mode_iter.require ()) >= bitsize)
2116 break;
2117
2118 scalar_int_mode mode;
2119 if (!mode_iter.exists (&mode)
2120 || GET_MODE_BITSIZE (mode) > maxbitsize
2121 || GET_MODE_BITSIZE (mode) > MAX_FIXED_MODE_SIZE)
2122 {
2123 /* We really want a BLKmode representative only as a last resort,
2124 considering the member b in
2125 struct { int a : 7; int b : 17; int c; } __attribute__((packed));
2126 Otherwise we simply want to split the representative up
2127 allowing for overlaps within the bitfield region as required for
2128 struct { int a : 7; int b : 7;
2129 int c : 10; int d; } __attribute__((packed));
2130 [0, 15] HImode for a and b, [8, 23] HImode for c. */
2131 DECL_SIZE (repr) = bitsize_int (bitsize);
2132 DECL_SIZE_UNIT (repr) = size_int (bitsize / BITS_PER_UNIT);
2133 SET_DECL_MODE (repr, BLKmode);
2134 TREE_TYPE (repr) = build_array_type_nelts (unsigned_char_type_node,
2135 bitsize / BITS_PER_UNIT);
2136 }
2137 else
2138 {
2139 unsigned HOST_WIDE_INT modesize = GET_MODE_BITSIZE (mode);
2140 DECL_SIZE (repr) = bitsize_int (modesize);
2141 DECL_SIZE_UNIT (repr) = size_int (modesize / BITS_PER_UNIT);
2142 SET_DECL_MODE (repr, mode);
2143 TREE_TYPE (repr) = lang_hooks.types.type_for_mode (mode, 1);
2144 }
2145
2146 /* Remember whether the bitfield group is at the end of the
2147 structure or not. */
2148 DECL_CHAIN (repr) = nextf;
2149 }
2150
2151 /* Compute and set FIELD_DECLs for the underlying objects we should
2152 use for bitfield access for the structure T. */
2153
2154 void
2155 finish_bitfield_layout (tree t)
2156 {
2157 tree field, prev;
2158 tree repr = NULL_TREE;
2159
2160 /* Unions would be special, for the ease of type-punning optimizations
2161 we could use the underlying type as hint for the representative
2162 if the bitfield would fit and the representative would not exceed
2163 the union in size. */
2164 if (TREE_CODE (t) != RECORD_TYPE)
2165 return;
2166
2167 for (prev = NULL_TREE, field = TYPE_FIELDS (t);
2168 field; field = DECL_CHAIN (field))
2169 {
2170 if (TREE_CODE (field) != FIELD_DECL)
2171 continue;
2172
2173 /* In the C++ memory model, consecutive bit fields in a structure are
2174 considered one memory location and updating a memory location
2175 may not store into adjacent memory locations. */
2176 if (!repr
2177 && DECL_BIT_FIELD_TYPE (field))
2178 {
2179 /* Start new representative. */
2180 repr = start_bitfield_representative (field);
2181 }
2182 else if (repr
2183 && ! DECL_BIT_FIELD_TYPE (field))
2184 {
2185 /* Finish off new representative. */
2186 finish_bitfield_representative (repr, prev);
2187 repr = NULL_TREE;
2188 }
2189 else if (DECL_BIT_FIELD_TYPE (field))
2190 {
2191 gcc_assert (repr != NULL_TREE);
2192
2193 /* Zero-size bitfields finish off a representative and
2194 do not have a representative themselves. This is
2195 required by the C++ memory model. */
2196 if (integer_zerop (DECL_SIZE (field)))
2197 {
2198 finish_bitfield_representative (repr, prev);
2199 repr = NULL_TREE;
2200 }
2201
2202 /* We assume that either DECL_FIELD_OFFSET of the representative
2203 and each bitfield member is a constant or they are equal.
2204 This is because we need to be able to compute the bit-offset
2205 of each field relative to the representative in get_bit_range
2206 during RTL expansion.
2207 If these constraints are not met, simply force a new
2208 representative to be generated. That will at most
2209 generate worse code but still maintain correctness with
2210 respect to the C++ memory model. */
2211 else if (!((tree_fits_uhwi_p (DECL_FIELD_OFFSET (repr))
2212 && tree_fits_uhwi_p (DECL_FIELD_OFFSET (field)))
2213 || operand_equal_p (DECL_FIELD_OFFSET (repr),
2214 DECL_FIELD_OFFSET (field), 0)))
2215 {
2216 finish_bitfield_representative (repr, prev);
2217 repr = start_bitfield_representative (field);
2218 }
2219 }
2220 else
2221 continue;
2222
2223 if (repr)
2224 DECL_BIT_FIELD_REPRESENTATIVE (field) = repr;
2225
2226 prev = field;
2227 }
2228
2229 if (repr)
2230 finish_bitfield_representative (repr, prev);
2231 }
2232
2233 /* Do all of the work required to layout the type indicated by RLI,
2234 once the fields have been laid out. This function will call `free'
2235 for RLI, unless FREE_P is false. Passing a value other than false
2236 for FREE_P is bad practice; this option only exists to support the
2237 G++ 3.2 ABI. */
2238
2239 void
2240 finish_record_layout (record_layout_info rli, int free_p)
2241 {
2242 tree variant;
2243
2244 /* Compute the final size. */
2245 finalize_record_size (rli);
2246
2247 /* Compute the TYPE_MODE for the record. */
2248 compute_record_mode (rli->t);
2249
2250 /* Perform any last tweaks to the TYPE_SIZE, etc. */
2251 finalize_type_size (rli->t);
2252
2253 /* Compute bitfield representatives. */
2254 finish_bitfield_layout (rli->t);
2255
2256 /* Propagate TYPE_PACKED and TYPE_REVERSE_STORAGE_ORDER to variants.
2257 With C++ templates, it is too early to do this when the attribute
2258 is being parsed. */
2259 for (variant = TYPE_NEXT_VARIANT (rli->t); variant;
2260 variant = TYPE_NEXT_VARIANT (variant))
2261 {
2262 TYPE_PACKED (variant) = TYPE_PACKED (rli->t);
2263 TYPE_REVERSE_STORAGE_ORDER (variant)
2264 = TYPE_REVERSE_STORAGE_ORDER (rli->t);
2265 }
2266
2267 /* Lay out any static members. This is done now because their type
2268 may use the record's type. */
2269 while (!vec_safe_is_empty (rli->pending_statics))
2270 layout_decl (rli->pending_statics->pop (), 0);
2271
2272 /* Clean up. */
2273 if (free_p)
2274 {
2275 vec_free (rli->pending_statics);
2276 free (rli);
2277 }
2278 }
2279 \f
2280
2281 /* Finish processing a builtin RECORD_TYPE type TYPE. It's name is
2282 NAME, its fields are chained in reverse on FIELDS.
2283
2284 If ALIGN_TYPE is non-null, it is given the same alignment as
2285 ALIGN_TYPE. */
2286
2287 void
2288 finish_builtin_struct (tree type, const char *name, tree fields,
2289 tree align_type)
2290 {
2291 tree tail, next;
2292
2293 for (tail = NULL_TREE; fields; tail = fields, fields = next)
2294 {
2295 DECL_FIELD_CONTEXT (fields) = type;
2296 next = DECL_CHAIN (fields);
2297 DECL_CHAIN (fields) = tail;
2298 }
2299 TYPE_FIELDS (type) = tail;
2300
2301 if (align_type)
2302 {
2303 SET_TYPE_ALIGN (type, TYPE_ALIGN (align_type));
2304 TYPE_USER_ALIGN (type) = TYPE_USER_ALIGN (align_type);
2305 SET_TYPE_WARN_IF_NOT_ALIGN (type,
2306 TYPE_WARN_IF_NOT_ALIGN (align_type));
2307 }
2308
2309 layout_type (type);
2310 #if 0 /* not yet, should get fixed properly later */
2311 TYPE_NAME (type) = make_type_decl (get_identifier (name), type);
2312 #else
2313 TYPE_NAME (type) = build_decl (BUILTINS_LOCATION,
2314 TYPE_DECL, get_identifier (name), type);
2315 #endif
2316 TYPE_STUB_DECL (type) = TYPE_NAME (type);
2317 layout_decl (TYPE_NAME (type), 0);
2318 }
2319
2320 /* Calculate the mode, size, and alignment for TYPE.
2321 For an array type, calculate the element separation as well.
2322 Record TYPE on the chain of permanent or temporary types
2323 so that dbxout will find out about it.
2324
2325 TYPE_SIZE of a type is nonzero if the type has been laid out already.
2326 layout_type does nothing on such a type.
2327
2328 If the type is incomplete, its TYPE_SIZE remains zero. */
2329
2330 void
2331 layout_type (tree type)
2332 {
2333 gcc_assert (type);
2334
2335 if (type == error_mark_node)
2336 return;
2337
2338 /* We don't want finalize_type_size to copy an alignment attribute to
2339 variants that don't have it. */
2340 type = TYPE_MAIN_VARIANT (type);
2341
2342 /* Do nothing if type has been laid out before. */
2343 if (TYPE_SIZE (type))
2344 return;
2345
2346 switch (TREE_CODE (type))
2347 {
2348 case LANG_TYPE:
2349 /* This kind of type is the responsibility
2350 of the language-specific code. */
2351 gcc_unreachable ();
2352
2353 case BOOLEAN_TYPE:
2354 case INTEGER_TYPE:
2355 case ENUMERAL_TYPE:
2356 {
2357 scalar_int_mode mode
2358 = smallest_int_mode_for_size (TYPE_PRECISION (type));
2359 SET_TYPE_MODE (type, mode);
2360 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (mode));
2361 /* Don't set TYPE_PRECISION here, as it may be set by a bitfield. */
2362 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (mode));
2363 break;
2364 }
2365
2366 case REAL_TYPE:
2367 {
2368 /* Allow the caller to choose the type mode, which is how decimal
2369 floats are distinguished from binary ones. */
2370 if (TYPE_MODE (type) == VOIDmode)
2371 SET_TYPE_MODE
2372 (type, float_mode_for_size (TYPE_PRECISION (type)).require ());
2373 scalar_float_mode mode = as_a <scalar_float_mode> (TYPE_MODE (type));
2374 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (mode));
2375 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (mode));
2376 break;
2377 }
2378
2379 case FIXED_POINT_TYPE:
2380 {
2381 /* TYPE_MODE (type) has been set already. */
2382 scalar_mode mode = SCALAR_TYPE_MODE (type);
2383 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (mode));
2384 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (mode));
2385 break;
2386 }
2387
2388 case COMPLEX_TYPE:
2389 TYPE_UNSIGNED (type) = TYPE_UNSIGNED (TREE_TYPE (type));
2390 SET_TYPE_MODE (type,
2391 GET_MODE_COMPLEX_MODE (TYPE_MODE (TREE_TYPE (type))));
2392
2393 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
2394 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
2395 break;
2396
2397 case VECTOR_TYPE:
2398 {
2399 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (type);
2400 tree innertype = TREE_TYPE (type);
2401
2402 /* Find an appropriate mode for the vector type. */
2403 if (TYPE_MODE (type) == VOIDmode)
2404 SET_TYPE_MODE (type,
2405 mode_for_vector (SCALAR_TYPE_MODE (innertype),
2406 nunits).else_blk ());
2407
2408 TYPE_SATURATING (type) = TYPE_SATURATING (TREE_TYPE (type));
2409 TYPE_UNSIGNED (type) = TYPE_UNSIGNED (TREE_TYPE (type));
2410 /* Several boolean vector elements may fit in a single unit. */
2411 if (VECTOR_BOOLEAN_TYPE_P (type)
2412 && type->type_common.mode != BLKmode)
2413 TYPE_SIZE_UNIT (type)
2414 = size_int (GET_MODE_SIZE (type->type_common.mode));
2415 else
2416 TYPE_SIZE_UNIT (type) = int_const_binop (MULT_EXPR,
2417 TYPE_SIZE_UNIT (innertype),
2418 size_int (nunits));
2419 TYPE_SIZE (type) = int_const_binop
2420 (MULT_EXPR,
2421 bits_from_bytes (TYPE_SIZE_UNIT (type)),
2422 bitsize_int (BITS_PER_UNIT));
2423
2424 /* For vector types, we do not default to the mode's alignment.
2425 Instead, query a target hook, defaulting to natural alignment.
2426 This prevents ABI changes depending on whether or not native
2427 vector modes are supported. */
2428 SET_TYPE_ALIGN (type, targetm.vector_alignment (type));
2429
2430 /* However, if the underlying mode requires a bigger alignment than
2431 what the target hook provides, we cannot use the mode. For now,
2432 simply reject that case. */
2433 gcc_assert (TYPE_ALIGN (type)
2434 >= GET_MODE_ALIGNMENT (TYPE_MODE (type)));
2435 break;
2436 }
2437
2438 case VOID_TYPE:
2439 /* This is an incomplete type and so doesn't have a size. */
2440 SET_TYPE_ALIGN (type, 1);
2441 TYPE_USER_ALIGN (type) = 0;
2442 SET_TYPE_MODE (type, VOIDmode);
2443 break;
2444
2445 case OFFSET_TYPE:
2446 TYPE_SIZE (type) = bitsize_int (POINTER_SIZE);
2447 TYPE_SIZE_UNIT (type) = size_int (POINTER_SIZE_UNITS);
2448 /* A pointer might be MODE_PARTIAL_INT, but ptrdiff_t must be
2449 integral, which may be an __intN. */
2450 SET_TYPE_MODE (type, int_mode_for_size (POINTER_SIZE, 0).require ());
2451 TYPE_PRECISION (type) = POINTER_SIZE;
2452 break;
2453
2454 case FUNCTION_TYPE:
2455 case METHOD_TYPE:
2456 /* It's hard to see what the mode and size of a function ought to
2457 be, but we do know the alignment is FUNCTION_BOUNDARY, so
2458 make it consistent with that. */
2459 SET_TYPE_MODE (type,
2460 int_mode_for_size (FUNCTION_BOUNDARY, 0).else_blk ());
2461 TYPE_SIZE (type) = bitsize_int (FUNCTION_BOUNDARY);
2462 TYPE_SIZE_UNIT (type) = size_int (FUNCTION_BOUNDARY / BITS_PER_UNIT);
2463 break;
2464
2465 case POINTER_TYPE:
2466 case REFERENCE_TYPE:
2467 {
2468 scalar_int_mode mode = SCALAR_INT_TYPE_MODE (type);
2469 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (mode));
2470 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (mode));
2471 TYPE_UNSIGNED (type) = 1;
2472 TYPE_PRECISION (type) = GET_MODE_PRECISION (mode);
2473 }
2474 break;
2475
2476 case ARRAY_TYPE:
2477 {
2478 tree index = TYPE_DOMAIN (type);
2479 tree element = TREE_TYPE (type);
2480
2481 /* We need to know both bounds in order to compute the size. */
2482 if (index && TYPE_MAX_VALUE (index) && TYPE_MIN_VALUE (index)
2483 && TYPE_SIZE (element))
2484 {
2485 tree ub = TYPE_MAX_VALUE (index);
2486 tree lb = TYPE_MIN_VALUE (index);
2487 tree element_size = TYPE_SIZE (element);
2488 tree length;
2489
2490 /* Make sure that an array of zero-sized element is zero-sized
2491 regardless of its extent. */
2492 if (integer_zerop (element_size))
2493 length = size_zero_node;
2494
2495 /* The computation should happen in the original signedness so
2496 that (possible) negative values are handled appropriately
2497 when determining overflow. */
2498 else
2499 {
2500 /* ??? When it is obvious that the range is signed
2501 represent it using ssizetype. */
2502 if (TREE_CODE (lb) == INTEGER_CST
2503 && TREE_CODE (ub) == INTEGER_CST
2504 && TYPE_UNSIGNED (TREE_TYPE (lb))
2505 && tree_int_cst_lt (ub, lb))
2506 {
2507 lb = wide_int_to_tree (ssizetype,
2508 offset_int::from (wi::to_wide (lb),
2509 SIGNED));
2510 ub = wide_int_to_tree (ssizetype,
2511 offset_int::from (wi::to_wide (ub),
2512 SIGNED));
2513 }
2514 length
2515 = fold_convert (sizetype,
2516 size_binop (PLUS_EXPR,
2517 build_int_cst (TREE_TYPE (lb), 1),
2518 size_binop (MINUS_EXPR, ub, lb)));
2519 }
2520
2521 /* ??? We have no way to distinguish a null-sized array from an
2522 array spanning the whole sizetype range, so we arbitrarily
2523 decide that [0, -1] is the only valid representation. */
2524 if (integer_zerop (length)
2525 && TREE_OVERFLOW (length)
2526 && integer_zerop (lb))
2527 length = size_zero_node;
2528
2529 TYPE_SIZE (type) = size_binop (MULT_EXPR, element_size,
2530 bits_from_bytes (length));
2531
2532 /* If we know the size of the element, calculate the total size
2533 directly, rather than do some division thing below. This
2534 optimization helps Fortran assumed-size arrays (where the
2535 size of the array is determined at runtime) substantially. */
2536 if (TYPE_SIZE_UNIT (element))
2537 TYPE_SIZE_UNIT (type)
2538 = size_binop (MULT_EXPR, TYPE_SIZE_UNIT (element), length);
2539 }
2540
2541 /* Now round the alignment and size,
2542 using machine-dependent criteria if any. */
2543
2544 unsigned align = TYPE_ALIGN (element);
2545 if (TYPE_USER_ALIGN (type))
2546 align = MAX (align, TYPE_ALIGN (type));
2547 else
2548 TYPE_USER_ALIGN (type) = TYPE_USER_ALIGN (element);
2549 if (!TYPE_WARN_IF_NOT_ALIGN (type))
2550 SET_TYPE_WARN_IF_NOT_ALIGN (type,
2551 TYPE_WARN_IF_NOT_ALIGN (element));
2552 #ifdef ROUND_TYPE_ALIGN
2553 align = ROUND_TYPE_ALIGN (type, align, BITS_PER_UNIT);
2554 #else
2555 align = MAX (align, BITS_PER_UNIT);
2556 #endif
2557 SET_TYPE_ALIGN (type, align);
2558 SET_TYPE_MODE (type, BLKmode);
2559 if (TYPE_SIZE (type) != 0
2560 && ! targetm.member_type_forces_blk (type, VOIDmode)
2561 /* BLKmode elements force BLKmode aggregate;
2562 else extract/store fields may lose. */
2563 && (TYPE_MODE (TREE_TYPE (type)) != BLKmode
2564 || TYPE_NO_FORCE_BLK (TREE_TYPE (type))))
2565 {
2566 SET_TYPE_MODE (type, mode_for_array (TREE_TYPE (type),
2567 TYPE_SIZE (type)));
2568 if (TYPE_MODE (type) != BLKmode
2569 && STRICT_ALIGNMENT && TYPE_ALIGN (type) < BIGGEST_ALIGNMENT
2570 && TYPE_ALIGN (type) < GET_MODE_ALIGNMENT (TYPE_MODE (type)))
2571 {
2572 TYPE_NO_FORCE_BLK (type) = 1;
2573 SET_TYPE_MODE (type, BLKmode);
2574 }
2575 }
2576 if (AGGREGATE_TYPE_P (element))
2577 TYPE_TYPELESS_STORAGE (type) = TYPE_TYPELESS_STORAGE (element);
2578 /* When the element size is constant, check that it is at least as
2579 large as the element alignment. */
2580 if (TYPE_SIZE_UNIT (element)
2581 && TREE_CODE (TYPE_SIZE_UNIT (element)) == INTEGER_CST
2582 /* If TYPE_SIZE_UNIT overflowed, then it is certainly larger than
2583 TYPE_ALIGN_UNIT. */
2584 && !TREE_OVERFLOW (TYPE_SIZE_UNIT (element))
2585 && !integer_zerop (TYPE_SIZE_UNIT (element)))
2586 {
2587 if (compare_tree_int (TYPE_SIZE_UNIT (element),
2588 TYPE_ALIGN_UNIT (element)) < 0)
2589 error ("alignment of array elements is greater than "
2590 "element size");
2591 else if (TYPE_ALIGN_UNIT (element) > 1
2592 && (wi::zext (wi::to_wide (TYPE_SIZE_UNIT (element)),
2593 ffs_hwi (TYPE_ALIGN_UNIT (element)) - 1)
2594 != 0))
2595 error ("size of array element is not a multiple of its "
2596 "alignment");
2597 }
2598 break;
2599 }
2600
2601 case RECORD_TYPE:
2602 case UNION_TYPE:
2603 case QUAL_UNION_TYPE:
2604 {
2605 tree field;
2606 record_layout_info rli;
2607
2608 /* Initialize the layout information. */
2609 rli = start_record_layout (type);
2610
2611 /* If this is a QUAL_UNION_TYPE, we want to process the fields
2612 in the reverse order in building the COND_EXPR that denotes
2613 its size. We reverse them again later. */
2614 if (TREE_CODE (type) == QUAL_UNION_TYPE)
2615 TYPE_FIELDS (type) = nreverse (TYPE_FIELDS (type));
2616
2617 /* Place all the fields. */
2618 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
2619 place_field (rli, field);
2620
2621 if (TREE_CODE (type) == QUAL_UNION_TYPE)
2622 TYPE_FIELDS (type) = nreverse (TYPE_FIELDS (type));
2623
2624 /* Finish laying out the record. */
2625 finish_record_layout (rli, /*free_p=*/true);
2626 }
2627 break;
2628
2629 default:
2630 gcc_unreachable ();
2631 }
2632
2633 /* Compute the final TYPE_SIZE, TYPE_ALIGN, etc. for TYPE. For
2634 records and unions, finish_record_layout already called this
2635 function. */
2636 if (!RECORD_OR_UNION_TYPE_P (type))
2637 finalize_type_size (type);
2638
2639 /* We should never see alias sets on incomplete aggregates. And we
2640 should not call layout_type on not incomplete aggregates. */
2641 if (AGGREGATE_TYPE_P (type))
2642 gcc_assert (!TYPE_ALIAS_SET_KNOWN_P (type));
2643 }
2644
2645 /* Return the least alignment required for type TYPE. */
2646
2647 unsigned int
2648 min_align_of_type (tree type)
2649 {
2650 unsigned int align = TYPE_ALIGN (type);
2651 if (!TYPE_USER_ALIGN (type))
2652 {
2653 align = MIN (align, BIGGEST_ALIGNMENT);
2654 #ifdef BIGGEST_FIELD_ALIGNMENT
2655 align = MIN (align, BIGGEST_FIELD_ALIGNMENT);
2656 #endif
2657 unsigned int field_align = align;
2658 #ifdef ADJUST_FIELD_ALIGN
2659 field_align = ADJUST_FIELD_ALIGN (NULL_TREE, type, field_align);
2660 #endif
2661 align = MIN (align, field_align);
2662 }
2663 return align / BITS_PER_UNIT;
2664 }
2665 \f
2666 /* Create and return a type for signed integers of PRECISION bits. */
2667
2668 tree
2669 make_signed_type (int precision)
2670 {
2671 tree type = make_node (INTEGER_TYPE);
2672
2673 TYPE_PRECISION (type) = precision;
2674
2675 fixup_signed_type (type);
2676 return type;
2677 }
2678
2679 /* Create and return a type for unsigned integers of PRECISION bits. */
2680
2681 tree
2682 make_unsigned_type (int precision)
2683 {
2684 tree type = make_node (INTEGER_TYPE);
2685
2686 TYPE_PRECISION (type) = precision;
2687
2688 fixup_unsigned_type (type);
2689 return type;
2690 }
2691 \f
2692 /* Create and return a type for fract of PRECISION bits, UNSIGNEDP,
2693 and SATP. */
2694
2695 tree
2696 make_fract_type (int precision, int unsignedp, int satp)
2697 {
2698 tree type = make_node (FIXED_POINT_TYPE);
2699
2700 TYPE_PRECISION (type) = precision;
2701
2702 if (satp)
2703 TYPE_SATURATING (type) = 1;
2704
2705 /* Lay out the type: set its alignment, size, etc. */
2706 TYPE_UNSIGNED (type) = unsignedp;
2707 enum mode_class mclass = unsignedp ? MODE_UFRACT : MODE_FRACT;
2708 SET_TYPE_MODE (type, mode_for_size (precision, mclass, 0).require ());
2709 layout_type (type);
2710
2711 return type;
2712 }
2713
2714 /* Create and return a type for accum of PRECISION bits, UNSIGNEDP,
2715 and SATP. */
2716
2717 tree
2718 make_accum_type (int precision, int unsignedp, int satp)
2719 {
2720 tree type = make_node (FIXED_POINT_TYPE);
2721
2722 TYPE_PRECISION (type) = precision;
2723
2724 if (satp)
2725 TYPE_SATURATING (type) = 1;
2726
2727 /* Lay out the type: set its alignment, size, etc. */
2728 TYPE_UNSIGNED (type) = unsignedp;
2729 enum mode_class mclass = unsignedp ? MODE_UACCUM : MODE_ACCUM;
2730 SET_TYPE_MODE (type, mode_for_size (precision, mclass, 0).require ());
2731 layout_type (type);
2732
2733 return type;
2734 }
2735
2736 /* Initialize sizetypes so layout_type can use them. */
2737
2738 void
2739 initialize_sizetypes (void)
2740 {
2741 int precision, bprecision;
2742
2743 /* Get sizetypes precision from the SIZE_TYPE target macro. */
2744 if (strcmp (SIZETYPE, "unsigned int") == 0)
2745 precision = INT_TYPE_SIZE;
2746 else if (strcmp (SIZETYPE, "long unsigned int") == 0)
2747 precision = LONG_TYPE_SIZE;
2748 else if (strcmp (SIZETYPE, "long long unsigned int") == 0)
2749 precision = LONG_LONG_TYPE_SIZE;
2750 else if (strcmp (SIZETYPE, "short unsigned int") == 0)
2751 precision = SHORT_TYPE_SIZE;
2752 else
2753 {
2754 int i;
2755
2756 precision = -1;
2757 for (i = 0; i < NUM_INT_N_ENTS; i++)
2758 if (int_n_enabled_p[i])
2759 {
2760 char name[50], altname[50];
2761 sprintf (name, "__int%d unsigned", int_n_data[i].bitsize);
2762 sprintf (altname, "__int%d__ unsigned", int_n_data[i].bitsize);
2763
2764 if (strcmp (name, SIZETYPE) == 0
2765 || strcmp (altname, SIZETYPE) == 0)
2766 {
2767 precision = int_n_data[i].bitsize;
2768 }
2769 }
2770 if (precision == -1)
2771 gcc_unreachable ();
2772 }
2773
2774 bprecision
2775 = MIN (precision + LOG2_BITS_PER_UNIT + 1, MAX_FIXED_MODE_SIZE);
2776 bprecision = GET_MODE_PRECISION (smallest_int_mode_for_size (bprecision));
2777 if (bprecision > HOST_BITS_PER_DOUBLE_INT)
2778 bprecision = HOST_BITS_PER_DOUBLE_INT;
2779
2780 /* Create stubs for sizetype and bitsizetype so we can create constants. */
2781 sizetype = make_node (INTEGER_TYPE);
2782 TYPE_NAME (sizetype) = get_identifier ("sizetype");
2783 TYPE_PRECISION (sizetype) = precision;
2784 TYPE_UNSIGNED (sizetype) = 1;
2785 bitsizetype = make_node (INTEGER_TYPE);
2786 TYPE_NAME (bitsizetype) = get_identifier ("bitsizetype");
2787 TYPE_PRECISION (bitsizetype) = bprecision;
2788 TYPE_UNSIGNED (bitsizetype) = 1;
2789
2790 /* Now layout both types manually. */
2791 scalar_int_mode mode = smallest_int_mode_for_size (precision);
2792 SET_TYPE_MODE (sizetype, mode);
2793 SET_TYPE_ALIGN (sizetype, GET_MODE_ALIGNMENT (TYPE_MODE (sizetype)));
2794 TYPE_SIZE (sizetype) = bitsize_int (precision);
2795 TYPE_SIZE_UNIT (sizetype) = size_int (GET_MODE_SIZE (mode));
2796 set_min_and_max_values_for_integral_type (sizetype, precision, UNSIGNED);
2797
2798 mode = smallest_int_mode_for_size (bprecision);
2799 SET_TYPE_MODE (bitsizetype, mode);
2800 SET_TYPE_ALIGN (bitsizetype, GET_MODE_ALIGNMENT (TYPE_MODE (bitsizetype)));
2801 TYPE_SIZE (bitsizetype) = bitsize_int (bprecision);
2802 TYPE_SIZE_UNIT (bitsizetype) = size_int (GET_MODE_SIZE (mode));
2803 set_min_and_max_values_for_integral_type (bitsizetype, bprecision, UNSIGNED);
2804
2805 /* Create the signed variants of *sizetype. */
2806 ssizetype = make_signed_type (TYPE_PRECISION (sizetype));
2807 TYPE_NAME (ssizetype) = get_identifier ("ssizetype");
2808 sbitsizetype = make_signed_type (TYPE_PRECISION (bitsizetype));
2809 TYPE_NAME (sbitsizetype) = get_identifier ("sbitsizetype");
2810 }
2811 \f
2812 /* TYPE is an integral type, i.e., an INTEGRAL_TYPE, ENUMERAL_TYPE
2813 or BOOLEAN_TYPE. Set TYPE_MIN_VALUE and TYPE_MAX_VALUE
2814 for TYPE, based on the PRECISION and whether or not the TYPE
2815 IS_UNSIGNED. PRECISION need not correspond to a width supported
2816 natively by the hardware; for example, on a machine with 8-bit,
2817 16-bit, and 32-bit register modes, PRECISION might be 7, 23, or
2818 61. */
2819
2820 void
2821 set_min_and_max_values_for_integral_type (tree type,
2822 int precision,
2823 signop sgn)
2824 {
2825 /* For bitfields with zero width we end up creating integer types
2826 with zero precision. Don't assign any minimum/maximum values
2827 to those types, they don't have any valid value. */
2828 if (precision < 1)
2829 return;
2830
2831 TYPE_MIN_VALUE (type)
2832 = wide_int_to_tree (type, wi::min_value (precision, sgn));
2833 TYPE_MAX_VALUE (type)
2834 = wide_int_to_tree (type, wi::max_value (precision, sgn));
2835 }
2836
2837 /* Set the extreme values of TYPE based on its precision in bits,
2838 then lay it out. Used when make_signed_type won't do
2839 because the tree code is not INTEGER_TYPE. */
2840
2841 void
2842 fixup_signed_type (tree type)
2843 {
2844 int precision = TYPE_PRECISION (type);
2845
2846 set_min_and_max_values_for_integral_type (type, precision, SIGNED);
2847
2848 /* Lay out the type: set its alignment, size, etc. */
2849 layout_type (type);
2850 }
2851
2852 /* Set the extreme values of TYPE based on its precision in bits,
2853 then lay it out. This is used both in `make_unsigned_type'
2854 and for enumeral types. */
2855
2856 void
2857 fixup_unsigned_type (tree type)
2858 {
2859 int precision = TYPE_PRECISION (type);
2860
2861 TYPE_UNSIGNED (type) = 1;
2862
2863 set_min_and_max_values_for_integral_type (type, precision, UNSIGNED);
2864
2865 /* Lay out the type: set its alignment, size, etc. */
2866 layout_type (type);
2867 }
2868 \f
2869 /* Construct an iterator for a bitfield that spans BITSIZE bits,
2870 starting at BITPOS.
2871
2872 BITREGION_START is the bit position of the first bit in this
2873 sequence of bit fields. BITREGION_END is the last bit in this
2874 sequence. If these two fields are non-zero, we should restrict the
2875 memory access to that range. Otherwise, we are allowed to touch
2876 any adjacent non bit-fields.
2877
2878 ALIGN is the alignment of the underlying object in bits.
2879 VOLATILEP says whether the bitfield is volatile. */
2880
2881 bit_field_mode_iterator
2882 ::bit_field_mode_iterator (HOST_WIDE_INT bitsize, HOST_WIDE_INT bitpos,
2883 poly_int64 bitregion_start,
2884 poly_int64 bitregion_end,
2885 unsigned int align, bool volatilep)
2886 : m_mode (NARROWEST_INT_MODE), m_bitsize (bitsize),
2887 m_bitpos (bitpos), m_bitregion_start (bitregion_start),
2888 m_bitregion_end (bitregion_end), m_align (align),
2889 m_volatilep (volatilep), m_count (0)
2890 {
2891 if (known_eq (m_bitregion_end, 0))
2892 {
2893 /* We can assume that any aligned chunk of ALIGN bits that overlaps
2894 the bitfield is mapped and won't trap, provided that ALIGN isn't
2895 too large. The cap is the biggest required alignment for data,
2896 or at least the word size. And force one such chunk at least. */
2897 unsigned HOST_WIDE_INT units
2898 = MIN (align, MAX (BIGGEST_ALIGNMENT, BITS_PER_WORD));
2899 if (bitsize <= 0)
2900 bitsize = 1;
2901 HOST_WIDE_INT end = bitpos + bitsize + units - 1;
2902 m_bitregion_end = end - end % units - 1;
2903 }
2904 }
2905
2906 /* Calls to this function return successively larger modes that can be used
2907 to represent the bitfield. Return true if another bitfield mode is
2908 available, storing it in *OUT_MODE if so. */
2909
2910 bool
2911 bit_field_mode_iterator::next_mode (scalar_int_mode *out_mode)
2912 {
2913 scalar_int_mode mode;
2914 for (; m_mode.exists (&mode); m_mode = GET_MODE_WIDER_MODE (mode))
2915 {
2916 unsigned int unit = GET_MODE_BITSIZE (mode);
2917
2918 /* Skip modes that don't have full precision. */
2919 if (unit != GET_MODE_PRECISION (mode))
2920 continue;
2921
2922 /* Stop if the mode is too wide to handle efficiently. */
2923 if (unit > MAX_FIXED_MODE_SIZE)
2924 break;
2925
2926 /* Don't deliver more than one multiword mode; the smallest one
2927 should be used. */
2928 if (m_count > 0 && unit > BITS_PER_WORD)
2929 break;
2930
2931 /* Skip modes that are too small. */
2932 unsigned HOST_WIDE_INT substart = (unsigned HOST_WIDE_INT) m_bitpos % unit;
2933 unsigned HOST_WIDE_INT subend = substart + m_bitsize;
2934 if (subend > unit)
2935 continue;
2936
2937 /* Stop if the mode goes outside the bitregion. */
2938 HOST_WIDE_INT start = m_bitpos - substart;
2939 if (maybe_ne (m_bitregion_start, 0)
2940 && maybe_lt (start, m_bitregion_start))
2941 break;
2942 HOST_WIDE_INT end = start + unit;
2943 if (maybe_gt (end, m_bitregion_end + 1))
2944 break;
2945
2946 /* Stop if the mode requires too much alignment. */
2947 if (GET_MODE_ALIGNMENT (mode) > m_align
2948 && targetm.slow_unaligned_access (mode, m_align))
2949 break;
2950
2951 *out_mode = mode;
2952 m_mode = GET_MODE_WIDER_MODE (mode);
2953 m_count++;
2954 return true;
2955 }
2956 return false;
2957 }
2958
2959 /* Return true if smaller modes are generally preferred for this kind
2960 of bitfield. */
2961
2962 bool
2963 bit_field_mode_iterator::prefer_smaller_modes ()
2964 {
2965 return (m_volatilep
2966 ? targetm.narrow_volatile_bitfield ()
2967 : !SLOW_BYTE_ACCESS);
2968 }
2969
2970 /* Find the best machine mode to use when referencing a bit field of length
2971 BITSIZE bits starting at BITPOS.
2972
2973 BITREGION_START is the bit position of the first bit in this
2974 sequence of bit fields. BITREGION_END is the last bit in this
2975 sequence. If these two fields are non-zero, we should restrict the
2976 memory access to that range. Otherwise, we are allowed to touch
2977 any adjacent non bit-fields.
2978
2979 The chosen mode must have no more than LARGEST_MODE_BITSIZE bits.
2980 INT_MAX is a suitable value for LARGEST_MODE_BITSIZE if the caller
2981 doesn't want to apply a specific limit.
2982
2983 If no mode meets all these conditions, we return VOIDmode.
2984
2985 The underlying object is known to be aligned to a boundary of ALIGN bits.
2986
2987 If VOLATILEP is false and SLOW_BYTE_ACCESS is false, we return the
2988 smallest mode meeting these conditions.
2989
2990 If VOLATILEP is false and SLOW_BYTE_ACCESS is true, we return the
2991 largest mode (but a mode no wider than UNITS_PER_WORD) that meets
2992 all the conditions.
2993
2994 If VOLATILEP is true the narrow_volatile_bitfields target hook is used to
2995 decide which of the above modes should be used. */
2996
2997 bool
2998 get_best_mode (int bitsize, int bitpos,
2999 poly_uint64 bitregion_start, poly_uint64 bitregion_end,
3000 unsigned int align,
3001 unsigned HOST_WIDE_INT largest_mode_bitsize, bool volatilep,
3002 scalar_int_mode *best_mode)
3003 {
3004 bit_field_mode_iterator iter (bitsize, bitpos, bitregion_start,
3005 bitregion_end, align, volatilep);
3006 scalar_int_mode mode;
3007 bool found = false;
3008 while (iter.next_mode (&mode)
3009 /* ??? For historical reasons, reject modes that would normally
3010 receive greater alignment, even if unaligned accesses are
3011 acceptable. This has both advantages and disadvantages.
3012 Removing this check means that something like:
3013
3014 struct s { unsigned int x; unsigned int y; };
3015 int f (struct s *s) { return s->x == 0 && s->y == 0; }
3016
3017 can be implemented using a single load and compare on
3018 64-bit machines that have no alignment restrictions.
3019 For example, on powerpc64-linux-gnu, we would generate:
3020
3021 ld 3,0(3)
3022 cntlzd 3,3
3023 srdi 3,3,6
3024 blr
3025
3026 rather than:
3027
3028 lwz 9,0(3)
3029 cmpwi 7,9,0
3030 bne 7,.L3
3031 lwz 3,4(3)
3032 cntlzw 3,3
3033 srwi 3,3,5
3034 extsw 3,3
3035 blr
3036 .p2align 4,,15
3037 .L3:
3038 li 3,0
3039 blr
3040
3041 However, accessing more than one field can make life harder
3042 for the gimple optimizers. For example, gcc.dg/vect/bb-slp-5.c
3043 has a series of unsigned short copies followed by a series of
3044 unsigned short comparisons. With this check, both the copies
3045 and comparisons remain 16-bit accesses and FRE is able
3046 to eliminate the latter. Without the check, the comparisons
3047 can be done using 2 64-bit operations, which FRE isn't able
3048 to handle in the same way.
3049
3050 Either way, it would probably be worth disabling this check
3051 during expand. One particular example where removing the
3052 check would help is the get_best_mode call in store_bit_field.
3053 If we are given a memory bitregion of 128 bits that is aligned
3054 to a 64-bit boundary, and the bitfield we want to modify is
3055 in the second half of the bitregion, this check causes
3056 store_bitfield to turn the memory into a 64-bit reference
3057 to the _first_ half of the region. We later use
3058 adjust_bitfield_address to get a reference to the correct half,
3059 but doing so looks to adjust_bitfield_address as though we are
3060 moving past the end of the original object, so it drops the
3061 associated MEM_EXPR and MEM_OFFSET. Removing the check
3062 causes store_bit_field to keep a 128-bit memory reference,
3063 so that the final bitfield reference still has a MEM_EXPR
3064 and MEM_OFFSET. */
3065 && GET_MODE_ALIGNMENT (mode) <= align
3066 && GET_MODE_BITSIZE (mode) <= largest_mode_bitsize)
3067 {
3068 *best_mode = mode;
3069 found = true;
3070 if (iter.prefer_smaller_modes ())
3071 break;
3072 }
3073
3074 return found;
3075 }
3076
3077 /* Gets minimal and maximal values for MODE (signed or unsigned depending on
3078 SIGN). The returned constants are made to be usable in TARGET_MODE. */
3079
3080 void
3081 get_mode_bounds (scalar_int_mode mode, int sign,
3082 scalar_int_mode target_mode,
3083 rtx *mmin, rtx *mmax)
3084 {
3085 unsigned size = GET_MODE_PRECISION (mode);
3086 unsigned HOST_WIDE_INT min_val, max_val;
3087
3088 gcc_assert (size <= HOST_BITS_PER_WIDE_INT);
3089
3090 /* Special case BImode, which has values 0 and STORE_FLAG_VALUE. */
3091 if (mode == BImode)
3092 {
3093 if (STORE_FLAG_VALUE < 0)
3094 {
3095 min_val = STORE_FLAG_VALUE;
3096 max_val = 0;
3097 }
3098 else
3099 {
3100 min_val = 0;
3101 max_val = STORE_FLAG_VALUE;
3102 }
3103 }
3104 else if (sign)
3105 {
3106 min_val = -(HOST_WIDE_INT_1U << (size - 1));
3107 max_val = (HOST_WIDE_INT_1U << (size - 1)) - 1;
3108 }
3109 else
3110 {
3111 min_val = 0;
3112 max_val = (HOST_WIDE_INT_1U << (size - 1) << 1) - 1;
3113 }
3114
3115 *mmin = gen_int_mode (min_val, target_mode);
3116 *mmax = gen_int_mode (max_val, target_mode);
3117 }
3118
3119 #include "gt-stor-layout.h"