]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/stor-layout.c
[Ada] Warning for out-of-order record representation clauses
[thirdparty/gcc.git] / gcc / stor-layout.c
1 /* C-compiler utilities for types and variables storage layout
2 Copyright (C) 1987-2019 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "target.h"
25 #include "function.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "memmodel.h"
29 #include "tm_p.h"
30 #include "stringpool.h"
31 #include "regs.h"
32 #include "emit-rtl.h"
33 #include "cgraph.h"
34 #include "diagnostic-core.h"
35 #include "fold-const.h"
36 #include "stor-layout.h"
37 #include "varasm.h"
38 #include "print-tree.h"
39 #include "langhooks.h"
40 #include "tree-inline.h"
41 #include "dumpfile.h"
42 #include "gimplify.h"
43 #include "attribs.h"
44 #include "debug.h"
45 #include "calls.h"
46
47 /* Data type for the expressions representing sizes of data types.
48 It is the first integer type laid out. */
49 tree sizetype_tab[(int) stk_type_kind_last];
50
51 /* If nonzero, this is an upper limit on alignment of structure fields.
52 The value is measured in bits. */
53 unsigned int maximum_field_alignment = TARGET_DEFAULT_PACK_STRUCT * BITS_PER_UNIT;
54
55 static tree self_referential_size (tree);
56 static void finalize_record_size (record_layout_info);
57 static void finalize_type_size (tree);
58 static void place_union_field (record_layout_info, tree);
59 static int excess_unit_span (HOST_WIDE_INT, HOST_WIDE_INT, HOST_WIDE_INT,
60 HOST_WIDE_INT, tree);
61 extern void debug_rli (record_layout_info);
62 \f
63 /* Given a size SIZE that may not be a constant, return a SAVE_EXPR
64 to serve as the actual size-expression for a type or decl. */
65
66 tree
67 variable_size (tree size)
68 {
69 /* Obviously. */
70 if (TREE_CONSTANT (size))
71 return size;
72
73 /* If the size is self-referential, we can't make a SAVE_EXPR (see
74 save_expr for the rationale). But we can do something else. */
75 if (CONTAINS_PLACEHOLDER_P (size))
76 return self_referential_size (size);
77
78 /* If we are in the global binding level, we can't make a SAVE_EXPR
79 since it may end up being shared across functions, so it is up
80 to the front-end to deal with this case. */
81 if (lang_hooks.decls.global_bindings_p ())
82 return size;
83
84 return save_expr (size);
85 }
86
87 /* An array of functions used for self-referential size computation. */
88 static GTY(()) vec<tree, va_gc> *size_functions;
89
90 /* Return true if T is a self-referential component reference. */
91
92 static bool
93 self_referential_component_ref_p (tree t)
94 {
95 if (TREE_CODE (t) != COMPONENT_REF)
96 return false;
97
98 while (REFERENCE_CLASS_P (t))
99 t = TREE_OPERAND (t, 0);
100
101 return (TREE_CODE (t) == PLACEHOLDER_EXPR);
102 }
103
104 /* Similar to copy_tree_r but do not copy component references involving
105 PLACEHOLDER_EXPRs. These nodes are spotted in find_placeholder_in_expr
106 and substituted in substitute_in_expr. */
107
108 static tree
109 copy_self_referential_tree_r (tree *tp, int *walk_subtrees, void *data)
110 {
111 enum tree_code code = TREE_CODE (*tp);
112
113 /* Stop at types, decls, constants like copy_tree_r. */
114 if (TREE_CODE_CLASS (code) == tcc_type
115 || TREE_CODE_CLASS (code) == tcc_declaration
116 || TREE_CODE_CLASS (code) == tcc_constant)
117 {
118 *walk_subtrees = 0;
119 return NULL_TREE;
120 }
121
122 /* This is the pattern built in ada/make_aligning_type. */
123 else if (code == ADDR_EXPR
124 && TREE_CODE (TREE_OPERAND (*tp, 0)) == PLACEHOLDER_EXPR)
125 {
126 *walk_subtrees = 0;
127 return NULL_TREE;
128 }
129
130 /* Default case: the component reference. */
131 else if (self_referential_component_ref_p (*tp))
132 {
133 *walk_subtrees = 0;
134 return NULL_TREE;
135 }
136
137 /* We're not supposed to have them in self-referential size trees
138 because we wouldn't properly control when they are evaluated.
139 However, not creating superfluous SAVE_EXPRs requires accurate
140 tracking of readonly-ness all the way down to here, which we
141 cannot always guarantee in practice. So punt in this case. */
142 else if (code == SAVE_EXPR)
143 return error_mark_node;
144
145 else if (code == STATEMENT_LIST)
146 gcc_unreachable ();
147
148 return copy_tree_r (tp, walk_subtrees, data);
149 }
150
151 /* Given a SIZE expression that is self-referential, return an equivalent
152 expression to serve as the actual size expression for a type. */
153
154 static tree
155 self_referential_size (tree size)
156 {
157 static unsigned HOST_WIDE_INT fnno = 0;
158 vec<tree> self_refs = vNULL;
159 tree param_type_list = NULL, param_decl_list = NULL;
160 tree t, ref, return_type, fntype, fnname, fndecl;
161 unsigned int i;
162 char buf[128];
163 vec<tree, va_gc> *args = NULL;
164
165 /* Do not factor out simple operations. */
166 t = skip_simple_constant_arithmetic (size);
167 if (TREE_CODE (t) == CALL_EXPR || self_referential_component_ref_p (t))
168 return size;
169
170 /* Collect the list of self-references in the expression. */
171 find_placeholder_in_expr (size, &self_refs);
172 gcc_assert (self_refs.length () > 0);
173
174 /* Obtain a private copy of the expression. */
175 t = size;
176 if (walk_tree (&t, copy_self_referential_tree_r, NULL, NULL) != NULL_TREE)
177 return size;
178 size = t;
179
180 /* Build the parameter and argument lists in parallel; also
181 substitute the former for the latter in the expression. */
182 vec_alloc (args, self_refs.length ());
183 FOR_EACH_VEC_ELT (self_refs, i, ref)
184 {
185 tree subst, param_name, param_type, param_decl;
186
187 if (DECL_P (ref))
188 {
189 /* We shouldn't have true variables here. */
190 gcc_assert (TREE_READONLY (ref));
191 subst = ref;
192 }
193 /* This is the pattern built in ada/make_aligning_type. */
194 else if (TREE_CODE (ref) == ADDR_EXPR)
195 subst = ref;
196 /* Default case: the component reference. */
197 else
198 subst = TREE_OPERAND (ref, 1);
199
200 sprintf (buf, "p%d", i);
201 param_name = get_identifier (buf);
202 param_type = TREE_TYPE (ref);
203 param_decl
204 = build_decl (input_location, PARM_DECL, param_name, param_type);
205 DECL_ARG_TYPE (param_decl) = param_type;
206 DECL_ARTIFICIAL (param_decl) = 1;
207 TREE_READONLY (param_decl) = 1;
208
209 size = substitute_in_expr (size, subst, param_decl);
210
211 param_type_list = tree_cons (NULL_TREE, param_type, param_type_list);
212 param_decl_list = chainon (param_decl, param_decl_list);
213 args->quick_push (ref);
214 }
215
216 self_refs.release ();
217
218 /* Append 'void' to indicate that the number of parameters is fixed. */
219 param_type_list = tree_cons (NULL_TREE, void_type_node, param_type_list);
220
221 /* The 3 lists have been created in reverse order. */
222 param_type_list = nreverse (param_type_list);
223 param_decl_list = nreverse (param_decl_list);
224
225 /* Build the function type. */
226 return_type = TREE_TYPE (size);
227 fntype = build_function_type (return_type, param_type_list);
228
229 /* Build the function declaration. */
230 sprintf (buf, "SZ" HOST_WIDE_INT_PRINT_UNSIGNED, fnno++);
231 fnname = get_file_function_name (buf);
232 fndecl = build_decl (input_location, FUNCTION_DECL, fnname, fntype);
233 for (t = param_decl_list; t; t = DECL_CHAIN (t))
234 DECL_CONTEXT (t) = fndecl;
235 DECL_ARGUMENTS (fndecl) = param_decl_list;
236 DECL_RESULT (fndecl)
237 = build_decl (input_location, RESULT_DECL, 0, return_type);
238 DECL_CONTEXT (DECL_RESULT (fndecl)) = fndecl;
239
240 /* The function has been created by the compiler and we don't
241 want to emit debug info for it. */
242 DECL_ARTIFICIAL (fndecl) = 1;
243 DECL_IGNORED_P (fndecl) = 1;
244
245 /* It is supposed to be "const" and never throw. */
246 TREE_READONLY (fndecl) = 1;
247 TREE_NOTHROW (fndecl) = 1;
248
249 /* We want it to be inlined when this is deemed profitable, as
250 well as discarded if every call has been integrated. */
251 DECL_DECLARED_INLINE_P (fndecl) = 1;
252
253 /* It is made up of a unique return statement. */
254 DECL_INITIAL (fndecl) = make_node (BLOCK);
255 BLOCK_SUPERCONTEXT (DECL_INITIAL (fndecl)) = fndecl;
256 t = build2 (MODIFY_EXPR, return_type, DECL_RESULT (fndecl), size);
257 DECL_SAVED_TREE (fndecl) = build1 (RETURN_EXPR, void_type_node, t);
258 TREE_STATIC (fndecl) = 1;
259
260 /* Put it onto the list of size functions. */
261 vec_safe_push (size_functions, fndecl);
262
263 /* Replace the original expression with a call to the size function. */
264 return build_call_expr_loc_vec (UNKNOWN_LOCATION, fndecl, args);
265 }
266
267 /* Take, queue and compile all the size functions. It is essential that
268 the size functions be gimplified at the very end of the compilation
269 in order to guarantee transparent handling of self-referential sizes.
270 Otherwise the GENERIC inliner would not be able to inline them back
271 at each of their call sites, thus creating artificial non-constant
272 size expressions which would trigger nasty problems later on. */
273
274 void
275 finalize_size_functions (void)
276 {
277 unsigned int i;
278 tree fndecl;
279
280 for (i = 0; size_functions && size_functions->iterate (i, &fndecl); i++)
281 {
282 allocate_struct_function (fndecl, false);
283 set_cfun (NULL);
284 dump_function (TDI_original, fndecl);
285
286 /* As these functions are used to describe the layout of variable-length
287 structures, debug info generation needs their implementation. */
288 debug_hooks->size_function (fndecl);
289 gimplify_function_tree (fndecl);
290 cgraph_node::finalize_function (fndecl, false);
291 }
292
293 vec_free (size_functions);
294 }
295 \f
296 /* Return a machine mode of class MCLASS with SIZE bits of precision,
297 if one exists. The mode may have padding bits as well the SIZE
298 value bits. If LIMIT is nonzero, disregard modes wider than
299 MAX_FIXED_MODE_SIZE. */
300
301 opt_machine_mode
302 mode_for_size (poly_uint64 size, enum mode_class mclass, int limit)
303 {
304 machine_mode mode;
305 int i;
306
307 if (limit && maybe_gt (size, (unsigned int) MAX_FIXED_MODE_SIZE))
308 return opt_machine_mode ();
309
310 /* Get the first mode which has this size, in the specified class. */
311 FOR_EACH_MODE_IN_CLASS (mode, mclass)
312 if (known_eq (GET_MODE_PRECISION (mode), size))
313 return mode;
314
315 if (mclass == MODE_INT || mclass == MODE_PARTIAL_INT)
316 for (i = 0; i < NUM_INT_N_ENTS; i ++)
317 if (known_eq (int_n_data[i].bitsize, size)
318 && int_n_enabled_p[i])
319 return int_n_data[i].m;
320
321 return opt_machine_mode ();
322 }
323
324 /* Similar, except passed a tree node. */
325
326 opt_machine_mode
327 mode_for_size_tree (const_tree size, enum mode_class mclass, int limit)
328 {
329 unsigned HOST_WIDE_INT uhwi;
330 unsigned int ui;
331
332 if (!tree_fits_uhwi_p (size))
333 return opt_machine_mode ();
334 uhwi = tree_to_uhwi (size);
335 ui = uhwi;
336 if (uhwi != ui)
337 return opt_machine_mode ();
338 return mode_for_size (ui, mclass, limit);
339 }
340
341 /* Return the narrowest mode of class MCLASS that contains at least
342 SIZE bits. Abort if no such mode exists. */
343
344 machine_mode
345 smallest_mode_for_size (poly_uint64 size, enum mode_class mclass)
346 {
347 machine_mode mode = VOIDmode;
348 int i;
349
350 /* Get the first mode which has at least this size, in the
351 specified class. */
352 FOR_EACH_MODE_IN_CLASS (mode, mclass)
353 if (known_ge (GET_MODE_PRECISION (mode), size))
354 break;
355
356 gcc_assert (mode != VOIDmode);
357
358 if (mclass == MODE_INT || mclass == MODE_PARTIAL_INT)
359 for (i = 0; i < NUM_INT_N_ENTS; i ++)
360 if (known_ge (int_n_data[i].bitsize, size)
361 && known_lt (int_n_data[i].bitsize, GET_MODE_PRECISION (mode))
362 && int_n_enabled_p[i])
363 mode = int_n_data[i].m;
364
365 return mode;
366 }
367
368 /* Return an integer mode of exactly the same size as MODE, if one exists. */
369
370 opt_scalar_int_mode
371 int_mode_for_mode (machine_mode mode)
372 {
373 switch (GET_MODE_CLASS (mode))
374 {
375 case MODE_INT:
376 case MODE_PARTIAL_INT:
377 return as_a <scalar_int_mode> (mode);
378
379 case MODE_COMPLEX_INT:
380 case MODE_COMPLEX_FLOAT:
381 case MODE_FLOAT:
382 case MODE_DECIMAL_FLOAT:
383 case MODE_FRACT:
384 case MODE_ACCUM:
385 case MODE_UFRACT:
386 case MODE_UACCUM:
387 case MODE_VECTOR_BOOL:
388 case MODE_VECTOR_INT:
389 case MODE_VECTOR_FLOAT:
390 case MODE_VECTOR_FRACT:
391 case MODE_VECTOR_ACCUM:
392 case MODE_VECTOR_UFRACT:
393 case MODE_VECTOR_UACCUM:
394 return int_mode_for_size (GET_MODE_BITSIZE (mode), 0);
395
396 case MODE_RANDOM:
397 if (mode == BLKmode)
398 return opt_scalar_int_mode ();
399
400 /* fall through */
401
402 case MODE_CC:
403 default:
404 gcc_unreachable ();
405 }
406 }
407
408 /* Find a mode that can be used for efficient bitwise operations on MODE,
409 if one exists. */
410
411 opt_machine_mode
412 bitwise_mode_for_mode (machine_mode mode)
413 {
414 /* Quick exit if we already have a suitable mode. */
415 scalar_int_mode int_mode;
416 if (is_a <scalar_int_mode> (mode, &int_mode)
417 && GET_MODE_BITSIZE (int_mode) <= MAX_FIXED_MODE_SIZE)
418 return int_mode;
419
420 /* Reuse the sanity checks from int_mode_for_mode. */
421 gcc_checking_assert ((int_mode_for_mode (mode), true));
422
423 poly_int64 bitsize = GET_MODE_BITSIZE (mode);
424
425 /* Try to replace complex modes with complex modes. In general we
426 expect both components to be processed independently, so we only
427 care whether there is a register for the inner mode. */
428 if (COMPLEX_MODE_P (mode))
429 {
430 machine_mode trial = mode;
431 if ((GET_MODE_CLASS (trial) == MODE_COMPLEX_INT
432 || mode_for_size (bitsize, MODE_COMPLEX_INT, false).exists (&trial))
433 && have_regs_of_mode[GET_MODE_INNER (trial)])
434 return trial;
435 }
436
437 /* Try to replace vector modes with vector modes. Also try using vector
438 modes if an integer mode would be too big. */
439 if (VECTOR_MODE_P (mode)
440 || maybe_gt (bitsize, MAX_FIXED_MODE_SIZE))
441 {
442 machine_mode trial = mode;
443 if ((GET_MODE_CLASS (trial) == MODE_VECTOR_INT
444 || mode_for_size (bitsize, MODE_VECTOR_INT, 0).exists (&trial))
445 && have_regs_of_mode[trial]
446 && targetm.vector_mode_supported_p (trial))
447 return trial;
448 }
449
450 /* Otherwise fall back on integers while honoring MAX_FIXED_MODE_SIZE. */
451 return mode_for_size (bitsize, MODE_INT, true);
452 }
453
454 /* Find a type that can be used for efficient bitwise operations on MODE.
455 Return null if no such mode exists. */
456
457 tree
458 bitwise_type_for_mode (machine_mode mode)
459 {
460 if (!bitwise_mode_for_mode (mode).exists (&mode))
461 return NULL_TREE;
462
463 unsigned int inner_size = GET_MODE_UNIT_BITSIZE (mode);
464 tree inner_type = build_nonstandard_integer_type (inner_size, true);
465
466 if (VECTOR_MODE_P (mode))
467 return build_vector_type_for_mode (inner_type, mode);
468
469 if (COMPLEX_MODE_P (mode))
470 return build_complex_type (inner_type);
471
472 gcc_checking_assert (GET_MODE_INNER (mode) == mode);
473 return inner_type;
474 }
475
476 /* Find a mode that is suitable for representing a vector with NUNITS
477 elements of mode INNERMODE, if one exists. The returned mode can be
478 either an integer mode or a vector mode. */
479
480 opt_machine_mode
481 mode_for_vector (scalar_mode innermode, poly_uint64 nunits)
482 {
483 machine_mode mode;
484
485 /* First, look for a supported vector type. */
486 if (SCALAR_FLOAT_MODE_P (innermode))
487 mode = MIN_MODE_VECTOR_FLOAT;
488 else if (SCALAR_FRACT_MODE_P (innermode))
489 mode = MIN_MODE_VECTOR_FRACT;
490 else if (SCALAR_UFRACT_MODE_P (innermode))
491 mode = MIN_MODE_VECTOR_UFRACT;
492 else if (SCALAR_ACCUM_MODE_P (innermode))
493 mode = MIN_MODE_VECTOR_ACCUM;
494 else if (SCALAR_UACCUM_MODE_P (innermode))
495 mode = MIN_MODE_VECTOR_UACCUM;
496 else
497 mode = MIN_MODE_VECTOR_INT;
498
499 /* Do not check vector_mode_supported_p here. We'll do that
500 later in vector_type_mode. */
501 FOR_EACH_MODE_FROM (mode, mode)
502 if (known_eq (GET_MODE_NUNITS (mode), nunits)
503 && GET_MODE_INNER (mode) == innermode)
504 return mode;
505
506 /* For integers, try mapping it to a same-sized scalar mode. */
507 if (GET_MODE_CLASS (innermode) == MODE_INT)
508 {
509 poly_uint64 nbits = nunits * GET_MODE_BITSIZE (innermode);
510 if (int_mode_for_size (nbits, 0).exists (&mode)
511 && have_regs_of_mode[mode])
512 return mode;
513 }
514
515 return opt_machine_mode ();
516 }
517
518 /* Return the mode for a vector that has NUNITS integer elements of
519 INT_BITS bits each, if such a mode exists. The mode can be either
520 an integer mode or a vector mode. */
521
522 opt_machine_mode
523 mode_for_int_vector (unsigned int int_bits, poly_uint64 nunits)
524 {
525 scalar_int_mode int_mode;
526 machine_mode vec_mode;
527 if (int_mode_for_size (int_bits, 0).exists (&int_mode)
528 && mode_for_vector (int_mode, nunits).exists (&vec_mode))
529 return vec_mode;
530 return opt_machine_mode ();
531 }
532
533 /* Return the alignment of MODE. This will be bounded by 1 and
534 BIGGEST_ALIGNMENT. */
535
536 unsigned int
537 get_mode_alignment (machine_mode mode)
538 {
539 return MIN (BIGGEST_ALIGNMENT, MAX (1, mode_base_align[mode]*BITS_PER_UNIT));
540 }
541
542 /* Return the natural mode of an array, given that it is SIZE bytes in
543 total and has elements of type ELEM_TYPE. */
544
545 static machine_mode
546 mode_for_array (tree elem_type, tree size)
547 {
548 tree elem_size;
549 poly_uint64 int_size, int_elem_size;
550 unsigned HOST_WIDE_INT num_elems;
551 bool limit_p;
552
553 /* One-element arrays get the component type's mode. */
554 elem_size = TYPE_SIZE (elem_type);
555 if (simple_cst_equal (size, elem_size))
556 return TYPE_MODE (elem_type);
557
558 limit_p = true;
559 if (poly_int_tree_p (size, &int_size)
560 && poly_int_tree_p (elem_size, &int_elem_size)
561 && maybe_ne (int_elem_size, 0U)
562 && constant_multiple_p (int_size, int_elem_size, &num_elems))
563 {
564 machine_mode elem_mode = TYPE_MODE (elem_type);
565 machine_mode mode;
566 if (targetm.array_mode (elem_mode, num_elems).exists (&mode))
567 return mode;
568 if (targetm.array_mode_supported_p (elem_mode, num_elems))
569 limit_p = false;
570 }
571 return mode_for_size_tree (size, MODE_INT, limit_p).else_blk ();
572 }
573 \f
574 /* Subroutine of layout_decl: Force alignment required for the data type.
575 But if the decl itself wants greater alignment, don't override that. */
576
577 static inline void
578 do_type_align (tree type, tree decl)
579 {
580 if (TYPE_ALIGN (type) > DECL_ALIGN (decl))
581 {
582 SET_DECL_ALIGN (decl, TYPE_ALIGN (type));
583 if (TREE_CODE (decl) == FIELD_DECL)
584 DECL_USER_ALIGN (decl) = TYPE_USER_ALIGN (type);
585 }
586 if (TYPE_WARN_IF_NOT_ALIGN (type) > DECL_WARN_IF_NOT_ALIGN (decl))
587 SET_DECL_WARN_IF_NOT_ALIGN (decl, TYPE_WARN_IF_NOT_ALIGN (type));
588 }
589
590 /* Set the size, mode and alignment of a ..._DECL node.
591 TYPE_DECL does need this for C++.
592 Note that LABEL_DECL and CONST_DECL nodes do not need this,
593 and FUNCTION_DECL nodes have them set up in a special (and simple) way.
594 Don't call layout_decl for them.
595
596 KNOWN_ALIGN is the amount of alignment we can assume this
597 decl has with no special effort. It is relevant only for FIELD_DECLs
598 and depends on the previous fields.
599 All that matters about KNOWN_ALIGN is which powers of 2 divide it.
600 If KNOWN_ALIGN is 0, it means, "as much alignment as you like":
601 the record will be aligned to suit. */
602
603 void
604 layout_decl (tree decl, unsigned int known_align)
605 {
606 tree type = TREE_TYPE (decl);
607 enum tree_code code = TREE_CODE (decl);
608 rtx rtl = NULL_RTX;
609 location_t loc = DECL_SOURCE_LOCATION (decl);
610
611 if (code == CONST_DECL)
612 return;
613
614 gcc_assert (code == VAR_DECL || code == PARM_DECL || code == RESULT_DECL
615 || code == TYPE_DECL || code == FIELD_DECL);
616
617 rtl = DECL_RTL_IF_SET (decl);
618
619 if (type == error_mark_node)
620 type = void_type_node;
621
622 /* Usually the size and mode come from the data type without change,
623 however, the front-end may set the explicit width of the field, so its
624 size may not be the same as the size of its type. This happens with
625 bitfields, of course (an `int' bitfield may be only 2 bits, say), but it
626 also happens with other fields. For example, the C++ front-end creates
627 zero-sized fields corresponding to empty base classes, and depends on
628 layout_type setting DECL_FIELD_BITPOS correctly for the field. Set the
629 size in bytes from the size in bits. If we have already set the mode,
630 don't set it again since we can be called twice for FIELD_DECLs. */
631
632 DECL_UNSIGNED (decl) = TYPE_UNSIGNED (type);
633 if (DECL_MODE (decl) == VOIDmode)
634 SET_DECL_MODE (decl, TYPE_MODE (type));
635
636 if (DECL_SIZE (decl) == 0)
637 {
638 DECL_SIZE (decl) = TYPE_SIZE (type);
639 DECL_SIZE_UNIT (decl) = TYPE_SIZE_UNIT (type);
640 }
641 else if (DECL_SIZE_UNIT (decl) == 0)
642 DECL_SIZE_UNIT (decl)
643 = fold_convert_loc (loc, sizetype,
644 size_binop_loc (loc, CEIL_DIV_EXPR, DECL_SIZE (decl),
645 bitsize_unit_node));
646
647 if (code != FIELD_DECL)
648 /* For non-fields, update the alignment from the type. */
649 do_type_align (type, decl);
650 else
651 /* For fields, it's a bit more complicated... */
652 {
653 bool old_user_align = DECL_USER_ALIGN (decl);
654 bool zero_bitfield = false;
655 bool packed_p = DECL_PACKED (decl);
656 unsigned int mfa;
657
658 if (DECL_BIT_FIELD (decl))
659 {
660 DECL_BIT_FIELD_TYPE (decl) = type;
661
662 /* A zero-length bit-field affects the alignment of the next
663 field. In essence such bit-fields are not influenced by
664 any packing due to #pragma pack or attribute packed. */
665 if (integer_zerop (DECL_SIZE (decl))
666 && ! targetm.ms_bitfield_layout_p (DECL_FIELD_CONTEXT (decl)))
667 {
668 zero_bitfield = true;
669 packed_p = false;
670 if (PCC_BITFIELD_TYPE_MATTERS)
671 do_type_align (type, decl);
672 else
673 {
674 #ifdef EMPTY_FIELD_BOUNDARY
675 if (EMPTY_FIELD_BOUNDARY > DECL_ALIGN (decl))
676 {
677 SET_DECL_ALIGN (decl, EMPTY_FIELD_BOUNDARY);
678 DECL_USER_ALIGN (decl) = 0;
679 }
680 #endif
681 }
682 }
683
684 /* See if we can use an ordinary integer mode for a bit-field.
685 Conditions are: a fixed size that is correct for another mode,
686 occupying a complete byte or bytes on proper boundary. */
687 if (TYPE_SIZE (type) != 0
688 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
689 && GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT)
690 {
691 machine_mode xmode;
692 if (mode_for_size_tree (DECL_SIZE (decl),
693 MODE_INT, 1).exists (&xmode))
694 {
695 unsigned int xalign = GET_MODE_ALIGNMENT (xmode);
696 if (!(xalign > BITS_PER_UNIT && DECL_PACKED (decl))
697 && (known_align == 0 || known_align >= xalign))
698 {
699 SET_DECL_ALIGN (decl, MAX (xalign, DECL_ALIGN (decl)));
700 SET_DECL_MODE (decl, xmode);
701 DECL_BIT_FIELD (decl) = 0;
702 }
703 }
704 }
705
706 /* Turn off DECL_BIT_FIELD if we won't need it set. */
707 if (TYPE_MODE (type) == BLKmode && DECL_MODE (decl) == BLKmode
708 && known_align >= TYPE_ALIGN (type)
709 && DECL_ALIGN (decl) >= TYPE_ALIGN (type))
710 DECL_BIT_FIELD (decl) = 0;
711 }
712 else if (packed_p && DECL_USER_ALIGN (decl))
713 /* Don't touch DECL_ALIGN. For other packed fields, go ahead and
714 round up; we'll reduce it again below. We want packing to
715 supersede USER_ALIGN inherited from the type, but defer to
716 alignment explicitly specified on the field decl. */;
717 else
718 do_type_align (type, decl);
719
720 /* If the field is packed and not explicitly aligned, give it the
721 minimum alignment. Note that do_type_align may set
722 DECL_USER_ALIGN, so we need to check old_user_align instead. */
723 if (packed_p
724 && !old_user_align)
725 SET_DECL_ALIGN (decl, MIN (DECL_ALIGN (decl), BITS_PER_UNIT));
726
727 if (! packed_p && ! DECL_USER_ALIGN (decl))
728 {
729 /* Some targets (i.e. i386, VMS) limit struct field alignment
730 to a lower boundary than alignment of variables unless
731 it was overridden by attribute aligned. */
732 #ifdef BIGGEST_FIELD_ALIGNMENT
733 SET_DECL_ALIGN (decl, MIN (DECL_ALIGN (decl),
734 (unsigned) BIGGEST_FIELD_ALIGNMENT));
735 #endif
736 #ifdef ADJUST_FIELD_ALIGN
737 SET_DECL_ALIGN (decl, ADJUST_FIELD_ALIGN (decl, TREE_TYPE (decl),
738 DECL_ALIGN (decl)));
739 #endif
740 }
741
742 if (zero_bitfield)
743 mfa = initial_max_fld_align * BITS_PER_UNIT;
744 else
745 mfa = maximum_field_alignment;
746 /* Should this be controlled by DECL_USER_ALIGN, too? */
747 if (mfa != 0)
748 SET_DECL_ALIGN (decl, MIN (DECL_ALIGN (decl), mfa));
749 }
750
751 /* Evaluate nonconstant size only once, either now or as soon as safe. */
752 if (DECL_SIZE (decl) != 0 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
753 DECL_SIZE (decl) = variable_size (DECL_SIZE (decl));
754 if (DECL_SIZE_UNIT (decl) != 0
755 && TREE_CODE (DECL_SIZE_UNIT (decl)) != INTEGER_CST)
756 DECL_SIZE_UNIT (decl) = variable_size (DECL_SIZE_UNIT (decl));
757
758 /* If requested, warn about definitions of large data objects. */
759 if ((code == PARM_DECL || (code == VAR_DECL && !DECL_NONLOCAL_FRAME (decl)))
760 && !DECL_EXTERNAL (decl))
761 {
762 tree size = DECL_SIZE_UNIT (decl);
763
764 if (size != 0 && TREE_CODE (size) == INTEGER_CST)
765 {
766 /* -Wlarger-than= argument of HOST_WIDE_INT_MAX is treated
767 as if PTRDIFF_MAX had been specified, with the value
768 being that on the target rather than the host. */
769 unsigned HOST_WIDE_INT max_size = warn_larger_than_size;
770 if (max_size == HOST_WIDE_INT_MAX)
771 max_size = tree_to_shwi (TYPE_MAX_VALUE (ptrdiff_type_node));
772
773 if (compare_tree_int (size, max_size) > 0)
774 warning (OPT_Wlarger_than_, "size of %q+D %E bytes exceeds "
775 "maximum object size %wu",
776 decl, size, max_size);
777 }
778 }
779
780 /* If the RTL was already set, update its mode and mem attributes. */
781 if (rtl)
782 {
783 PUT_MODE (rtl, DECL_MODE (decl));
784 SET_DECL_RTL (decl, 0);
785 if (MEM_P (rtl))
786 set_mem_attributes (rtl, decl, 1);
787 SET_DECL_RTL (decl, rtl);
788 }
789 }
790
791 /* Given a VAR_DECL, PARM_DECL, RESULT_DECL, or FIELD_DECL, clears the
792 results of a previous call to layout_decl and calls it again. */
793
794 void
795 relayout_decl (tree decl)
796 {
797 DECL_SIZE (decl) = DECL_SIZE_UNIT (decl) = 0;
798 SET_DECL_MODE (decl, VOIDmode);
799 if (!DECL_USER_ALIGN (decl))
800 SET_DECL_ALIGN (decl, 0);
801 if (DECL_RTL_SET_P (decl))
802 SET_DECL_RTL (decl, 0);
803
804 layout_decl (decl, 0);
805 }
806 \f
807 /* Begin laying out type T, which may be a RECORD_TYPE, UNION_TYPE, or
808 QUAL_UNION_TYPE. Return a pointer to a struct record_layout_info which
809 is to be passed to all other layout functions for this record. It is the
810 responsibility of the caller to call `free' for the storage returned.
811 Note that garbage collection is not permitted until we finish laying
812 out the record. */
813
814 record_layout_info
815 start_record_layout (tree t)
816 {
817 record_layout_info rli = XNEW (struct record_layout_info_s);
818
819 rli->t = t;
820
821 /* If the type has a minimum specified alignment (via an attribute
822 declaration, for example) use it -- otherwise, start with a
823 one-byte alignment. */
824 rli->record_align = MAX (BITS_PER_UNIT, TYPE_ALIGN (t));
825 rli->unpacked_align = rli->record_align;
826 rli->offset_align = MAX (rli->record_align, BIGGEST_ALIGNMENT);
827
828 #ifdef STRUCTURE_SIZE_BOUNDARY
829 /* Packed structures don't need to have minimum size. */
830 if (! TYPE_PACKED (t))
831 {
832 unsigned tmp;
833
834 /* #pragma pack overrides STRUCTURE_SIZE_BOUNDARY. */
835 tmp = (unsigned) STRUCTURE_SIZE_BOUNDARY;
836 if (maximum_field_alignment != 0)
837 tmp = MIN (tmp, maximum_field_alignment);
838 rli->record_align = MAX (rli->record_align, tmp);
839 }
840 #endif
841
842 rli->offset = size_zero_node;
843 rli->bitpos = bitsize_zero_node;
844 rli->prev_field = 0;
845 rli->pending_statics = 0;
846 rli->packed_maybe_necessary = 0;
847 rli->remaining_in_alignment = 0;
848
849 return rli;
850 }
851
852 /* Fold sizetype value X to bitsizetype, given that X represents a type
853 size or offset. */
854
855 static tree
856 bits_from_bytes (tree x)
857 {
858 if (POLY_INT_CST_P (x))
859 /* The runtime calculation isn't allowed to overflow sizetype;
860 increasing the runtime values must always increase the size
861 or offset of the object. This means that the object imposes
862 a maximum value on the runtime parameters, but we don't record
863 what that is. */
864 return build_poly_int_cst
865 (bitsizetype,
866 poly_wide_int::from (poly_int_cst_value (x),
867 TYPE_PRECISION (bitsizetype),
868 TYPE_SIGN (TREE_TYPE (x))));
869 x = fold_convert (bitsizetype, x);
870 gcc_checking_assert (x);
871 return x;
872 }
873
874 /* Return the combined bit position for the byte offset OFFSET and the
875 bit position BITPOS.
876
877 These functions operate on byte and bit positions present in FIELD_DECLs
878 and assume that these expressions result in no (intermediate) overflow.
879 This assumption is necessary to fold the expressions as much as possible,
880 so as to avoid creating artificially variable-sized types in languages
881 supporting variable-sized types like Ada. */
882
883 tree
884 bit_from_pos (tree offset, tree bitpos)
885 {
886 return size_binop (PLUS_EXPR, bitpos,
887 size_binop (MULT_EXPR, bits_from_bytes (offset),
888 bitsize_unit_node));
889 }
890
891 /* Return the combined truncated byte position for the byte offset OFFSET and
892 the bit position BITPOS. */
893
894 tree
895 byte_from_pos (tree offset, tree bitpos)
896 {
897 tree bytepos;
898 if (TREE_CODE (bitpos) == MULT_EXPR
899 && tree_int_cst_equal (TREE_OPERAND (bitpos, 1), bitsize_unit_node))
900 bytepos = TREE_OPERAND (bitpos, 0);
901 else
902 bytepos = size_binop (TRUNC_DIV_EXPR, bitpos, bitsize_unit_node);
903 return size_binop (PLUS_EXPR, offset, fold_convert (sizetype, bytepos));
904 }
905
906 /* Split the bit position POS into a byte offset *POFFSET and a bit
907 position *PBITPOS with the byte offset aligned to OFF_ALIGN bits. */
908
909 void
910 pos_from_bit (tree *poffset, tree *pbitpos, unsigned int off_align,
911 tree pos)
912 {
913 tree toff_align = bitsize_int (off_align);
914 if (TREE_CODE (pos) == MULT_EXPR
915 && tree_int_cst_equal (TREE_OPERAND (pos, 1), toff_align))
916 {
917 *poffset = size_binop (MULT_EXPR,
918 fold_convert (sizetype, TREE_OPERAND (pos, 0)),
919 size_int (off_align / BITS_PER_UNIT));
920 *pbitpos = bitsize_zero_node;
921 }
922 else
923 {
924 *poffset = size_binop (MULT_EXPR,
925 fold_convert (sizetype,
926 size_binop (FLOOR_DIV_EXPR, pos,
927 toff_align)),
928 size_int (off_align / BITS_PER_UNIT));
929 *pbitpos = size_binop (FLOOR_MOD_EXPR, pos, toff_align);
930 }
931 }
932
933 /* Given a pointer to bit and byte offsets and an offset alignment,
934 normalize the offsets so they are within the alignment. */
935
936 void
937 normalize_offset (tree *poffset, tree *pbitpos, unsigned int off_align)
938 {
939 /* If the bit position is now larger than it should be, adjust it
940 downwards. */
941 if (compare_tree_int (*pbitpos, off_align) >= 0)
942 {
943 tree offset, bitpos;
944 pos_from_bit (&offset, &bitpos, off_align, *pbitpos);
945 *poffset = size_binop (PLUS_EXPR, *poffset, offset);
946 *pbitpos = bitpos;
947 }
948 }
949
950 /* Print debugging information about the information in RLI. */
951
952 DEBUG_FUNCTION void
953 debug_rli (record_layout_info rli)
954 {
955 print_node_brief (stderr, "type", rli->t, 0);
956 print_node_brief (stderr, "\noffset", rli->offset, 0);
957 print_node_brief (stderr, " bitpos", rli->bitpos, 0);
958
959 fprintf (stderr, "\naligns: rec = %u, unpack = %u, off = %u\n",
960 rli->record_align, rli->unpacked_align,
961 rli->offset_align);
962
963 /* The ms_struct code is the only that uses this. */
964 if (targetm.ms_bitfield_layout_p (rli->t))
965 fprintf (stderr, "remaining in alignment = %u\n", rli->remaining_in_alignment);
966
967 if (rli->packed_maybe_necessary)
968 fprintf (stderr, "packed may be necessary\n");
969
970 if (!vec_safe_is_empty (rli->pending_statics))
971 {
972 fprintf (stderr, "pending statics:\n");
973 debug (rli->pending_statics);
974 }
975 }
976
977 /* Given an RLI with a possibly-incremented BITPOS, adjust OFFSET and
978 BITPOS if necessary to keep BITPOS below OFFSET_ALIGN. */
979
980 void
981 normalize_rli (record_layout_info rli)
982 {
983 normalize_offset (&rli->offset, &rli->bitpos, rli->offset_align);
984 }
985
986 /* Returns the size in bytes allocated so far. */
987
988 tree
989 rli_size_unit_so_far (record_layout_info rli)
990 {
991 return byte_from_pos (rli->offset, rli->bitpos);
992 }
993
994 /* Returns the size in bits allocated so far. */
995
996 tree
997 rli_size_so_far (record_layout_info rli)
998 {
999 return bit_from_pos (rli->offset, rli->bitpos);
1000 }
1001
1002 /* FIELD is about to be added to RLI->T. The alignment (in bits) of
1003 the next available location within the record is given by KNOWN_ALIGN.
1004 Update the variable alignment fields in RLI, and return the alignment
1005 to give the FIELD. */
1006
1007 unsigned int
1008 update_alignment_for_field (record_layout_info rli, tree field,
1009 unsigned int known_align)
1010 {
1011 /* The alignment required for FIELD. */
1012 unsigned int desired_align;
1013 /* The type of this field. */
1014 tree type = TREE_TYPE (field);
1015 /* True if the field was explicitly aligned by the user. */
1016 bool user_align;
1017 bool is_bitfield;
1018
1019 /* Do not attempt to align an ERROR_MARK node */
1020 if (TREE_CODE (type) == ERROR_MARK)
1021 return 0;
1022
1023 /* Lay out the field so we know what alignment it needs. */
1024 layout_decl (field, known_align);
1025 desired_align = DECL_ALIGN (field);
1026 user_align = DECL_USER_ALIGN (field);
1027
1028 is_bitfield = (type != error_mark_node
1029 && DECL_BIT_FIELD_TYPE (field)
1030 && ! integer_zerop (TYPE_SIZE (type)));
1031
1032 /* Record must have at least as much alignment as any field.
1033 Otherwise, the alignment of the field within the record is
1034 meaningless. */
1035 if (targetm.ms_bitfield_layout_p (rli->t))
1036 {
1037 /* Here, the alignment of the underlying type of a bitfield can
1038 affect the alignment of a record; even a zero-sized field
1039 can do this. The alignment should be to the alignment of
1040 the type, except that for zero-size bitfields this only
1041 applies if there was an immediately prior, nonzero-size
1042 bitfield. (That's the way it is, experimentally.) */
1043 if (!is_bitfield
1044 || ((DECL_SIZE (field) == NULL_TREE
1045 || !integer_zerop (DECL_SIZE (field)))
1046 ? !DECL_PACKED (field)
1047 : (rli->prev_field
1048 && DECL_BIT_FIELD_TYPE (rli->prev_field)
1049 && ! integer_zerop (DECL_SIZE (rli->prev_field)))))
1050 {
1051 unsigned int type_align = TYPE_ALIGN (type);
1052 if (!is_bitfield && DECL_PACKED (field))
1053 type_align = desired_align;
1054 else
1055 type_align = MAX (type_align, desired_align);
1056 if (maximum_field_alignment != 0)
1057 type_align = MIN (type_align, maximum_field_alignment);
1058 rli->record_align = MAX (rli->record_align, type_align);
1059 rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type));
1060 }
1061 }
1062 else if (is_bitfield && PCC_BITFIELD_TYPE_MATTERS)
1063 {
1064 /* Named bit-fields cause the entire structure to have the
1065 alignment implied by their type. Some targets also apply the same
1066 rules to unnamed bitfields. */
1067 if (DECL_NAME (field) != 0
1068 || targetm.align_anon_bitfield ())
1069 {
1070 unsigned int type_align = TYPE_ALIGN (type);
1071
1072 #ifdef ADJUST_FIELD_ALIGN
1073 if (! TYPE_USER_ALIGN (type))
1074 type_align = ADJUST_FIELD_ALIGN (field, type, type_align);
1075 #endif
1076
1077 /* Targets might chose to handle unnamed and hence possibly
1078 zero-width bitfield. Those are not influenced by #pragmas
1079 or packed attributes. */
1080 if (integer_zerop (DECL_SIZE (field)))
1081 {
1082 if (initial_max_fld_align)
1083 type_align = MIN (type_align,
1084 initial_max_fld_align * BITS_PER_UNIT);
1085 }
1086 else if (maximum_field_alignment != 0)
1087 type_align = MIN (type_align, maximum_field_alignment);
1088 else if (DECL_PACKED (field))
1089 type_align = MIN (type_align, BITS_PER_UNIT);
1090
1091 /* The alignment of the record is increased to the maximum
1092 of the current alignment, the alignment indicated on the
1093 field (i.e., the alignment specified by an __aligned__
1094 attribute), and the alignment indicated by the type of
1095 the field. */
1096 rli->record_align = MAX (rli->record_align, desired_align);
1097 rli->record_align = MAX (rli->record_align, type_align);
1098
1099 if (warn_packed)
1100 rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type));
1101 user_align |= TYPE_USER_ALIGN (type);
1102 }
1103 }
1104 else
1105 {
1106 rli->record_align = MAX (rli->record_align, desired_align);
1107 rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type));
1108 }
1109
1110 TYPE_USER_ALIGN (rli->t) |= user_align;
1111
1112 return desired_align;
1113 }
1114
1115 /* Issue a warning if the record alignment, RECORD_ALIGN, is less than
1116 the field alignment of FIELD or FIELD isn't aligned. */
1117
1118 static void
1119 handle_warn_if_not_align (tree field, unsigned int record_align)
1120 {
1121 tree type = TREE_TYPE (field);
1122
1123 if (type == error_mark_node)
1124 return;
1125
1126 unsigned int warn_if_not_align = 0;
1127
1128 int opt_w = 0;
1129
1130 if (warn_if_not_aligned)
1131 {
1132 warn_if_not_align = DECL_WARN_IF_NOT_ALIGN (field);
1133 if (!warn_if_not_align)
1134 warn_if_not_align = TYPE_WARN_IF_NOT_ALIGN (type);
1135 if (warn_if_not_align)
1136 opt_w = OPT_Wif_not_aligned;
1137 }
1138
1139 if (!warn_if_not_align
1140 && warn_packed_not_aligned
1141 && lookup_attribute ("aligned", TYPE_ATTRIBUTES (type)))
1142 {
1143 warn_if_not_align = TYPE_ALIGN (type);
1144 opt_w = OPT_Wpacked_not_aligned;
1145 }
1146
1147 if (!warn_if_not_align)
1148 return;
1149
1150 tree context = DECL_CONTEXT (field);
1151
1152 warn_if_not_align /= BITS_PER_UNIT;
1153 record_align /= BITS_PER_UNIT;
1154 if ((record_align % warn_if_not_align) != 0)
1155 warning (opt_w, "alignment %u of %qT is less than %u",
1156 record_align, context, warn_if_not_align);
1157
1158 tree off = byte_position (field);
1159 if (!multiple_of_p (TREE_TYPE (off), off, size_int (warn_if_not_align)))
1160 {
1161 if (TREE_CODE (off) == INTEGER_CST)
1162 warning (opt_w, "%q+D offset %E in %qT isn%'t aligned to %u",
1163 field, off, context, warn_if_not_align);
1164 else
1165 warning (opt_w, "%q+D offset %E in %qT may not be aligned to %u",
1166 field, off, context, warn_if_not_align);
1167 }
1168 }
1169
1170 /* Called from place_field to handle unions. */
1171
1172 static void
1173 place_union_field (record_layout_info rli, tree field)
1174 {
1175 update_alignment_for_field (rli, field, /*known_align=*/0);
1176
1177 DECL_FIELD_OFFSET (field) = size_zero_node;
1178 DECL_FIELD_BIT_OFFSET (field) = bitsize_zero_node;
1179 SET_DECL_OFFSET_ALIGN (field, BIGGEST_ALIGNMENT);
1180 handle_warn_if_not_align (field, rli->record_align);
1181
1182 /* If this is an ERROR_MARK return *after* having set the
1183 field at the start of the union. This helps when parsing
1184 invalid fields. */
1185 if (TREE_CODE (TREE_TYPE (field)) == ERROR_MARK)
1186 return;
1187
1188 if (AGGREGATE_TYPE_P (TREE_TYPE (field))
1189 && TYPE_TYPELESS_STORAGE (TREE_TYPE (field)))
1190 TYPE_TYPELESS_STORAGE (rli->t) = 1;
1191
1192 /* We assume the union's size will be a multiple of a byte so we don't
1193 bother with BITPOS. */
1194 if (TREE_CODE (rli->t) == UNION_TYPE)
1195 rli->offset = size_binop (MAX_EXPR, rli->offset, DECL_SIZE_UNIT (field));
1196 else if (TREE_CODE (rli->t) == QUAL_UNION_TYPE)
1197 rli->offset = fold_build3 (COND_EXPR, sizetype, DECL_QUALIFIER (field),
1198 DECL_SIZE_UNIT (field), rli->offset);
1199 }
1200
1201 /* A bitfield of SIZE with a required access alignment of ALIGN is allocated
1202 at BYTE_OFFSET / BIT_OFFSET. Return nonzero if the field would span more
1203 units of alignment than the underlying TYPE. */
1204 static int
1205 excess_unit_span (HOST_WIDE_INT byte_offset, HOST_WIDE_INT bit_offset,
1206 HOST_WIDE_INT size, HOST_WIDE_INT align, tree type)
1207 {
1208 /* Note that the calculation of OFFSET might overflow; we calculate it so
1209 that we still get the right result as long as ALIGN is a power of two. */
1210 unsigned HOST_WIDE_INT offset = byte_offset * BITS_PER_UNIT + bit_offset;
1211
1212 offset = offset % align;
1213 return ((offset + size + align - 1) / align
1214 > tree_to_uhwi (TYPE_SIZE (type)) / align);
1215 }
1216
1217 /* RLI contains information about the layout of a RECORD_TYPE. FIELD
1218 is a FIELD_DECL to be added after those fields already present in
1219 T. (FIELD is not actually added to the TYPE_FIELDS list here;
1220 callers that desire that behavior must manually perform that step.) */
1221
1222 void
1223 place_field (record_layout_info rli, tree field)
1224 {
1225 /* The alignment required for FIELD. */
1226 unsigned int desired_align;
1227 /* The alignment FIELD would have if we just dropped it into the
1228 record as it presently stands. */
1229 unsigned int known_align;
1230 unsigned int actual_align;
1231 /* The type of this field. */
1232 tree type = TREE_TYPE (field);
1233
1234 gcc_assert (TREE_CODE (field) != ERROR_MARK);
1235
1236 /* If FIELD is static, then treat it like a separate variable, not
1237 really like a structure field. If it is a FUNCTION_DECL, it's a
1238 method. In both cases, all we do is lay out the decl, and we do
1239 it *after* the record is laid out. */
1240 if (VAR_P (field))
1241 {
1242 vec_safe_push (rli->pending_statics, field);
1243 return;
1244 }
1245
1246 /* Enumerators and enum types which are local to this class need not
1247 be laid out. Likewise for initialized constant fields. */
1248 else if (TREE_CODE (field) != FIELD_DECL)
1249 return;
1250
1251 /* Unions are laid out very differently than records, so split
1252 that code off to another function. */
1253 else if (TREE_CODE (rli->t) != RECORD_TYPE)
1254 {
1255 place_union_field (rli, field);
1256 return;
1257 }
1258
1259 else if (TREE_CODE (type) == ERROR_MARK)
1260 {
1261 /* Place this field at the current allocation position, so we
1262 maintain monotonicity. */
1263 DECL_FIELD_OFFSET (field) = rli->offset;
1264 DECL_FIELD_BIT_OFFSET (field) = rli->bitpos;
1265 SET_DECL_OFFSET_ALIGN (field, rli->offset_align);
1266 handle_warn_if_not_align (field, rli->record_align);
1267 return;
1268 }
1269
1270 if (AGGREGATE_TYPE_P (type)
1271 && TYPE_TYPELESS_STORAGE (type))
1272 TYPE_TYPELESS_STORAGE (rli->t) = 1;
1273
1274 /* Work out the known alignment so far. Note that A & (-A) is the
1275 value of the least-significant bit in A that is one. */
1276 if (! integer_zerop (rli->bitpos))
1277 known_align = least_bit_hwi (tree_to_uhwi (rli->bitpos));
1278 else if (integer_zerop (rli->offset))
1279 known_align = 0;
1280 else if (tree_fits_uhwi_p (rli->offset))
1281 known_align = (BITS_PER_UNIT
1282 * least_bit_hwi (tree_to_uhwi (rli->offset)));
1283 else
1284 known_align = rli->offset_align;
1285
1286 desired_align = update_alignment_for_field (rli, field, known_align);
1287 if (known_align == 0)
1288 known_align = MAX (BIGGEST_ALIGNMENT, rli->record_align);
1289
1290 if (warn_packed && DECL_PACKED (field))
1291 {
1292 if (known_align >= TYPE_ALIGN (type))
1293 {
1294 if (TYPE_ALIGN (type) > desired_align)
1295 {
1296 if (STRICT_ALIGNMENT)
1297 warning (OPT_Wattributes, "packed attribute causes "
1298 "inefficient alignment for %q+D", field);
1299 /* Don't warn if DECL_PACKED was set by the type. */
1300 else if (!TYPE_PACKED (rli->t))
1301 warning (OPT_Wattributes, "packed attribute is "
1302 "unnecessary for %q+D", field);
1303 }
1304 }
1305 else
1306 rli->packed_maybe_necessary = 1;
1307 }
1308
1309 /* Does this field automatically have alignment it needs by virtue
1310 of the fields that precede it and the record's own alignment? */
1311 if (known_align < desired_align
1312 && (! targetm.ms_bitfield_layout_p (rli->t)
1313 || rli->prev_field == NULL))
1314 {
1315 /* No, we need to skip space before this field.
1316 Bump the cumulative size to multiple of field alignment. */
1317
1318 if (!targetm.ms_bitfield_layout_p (rli->t)
1319 && DECL_SOURCE_LOCATION (field) != BUILTINS_LOCATION)
1320 warning (OPT_Wpadded, "padding struct to align %q+D", field);
1321
1322 /* If the alignment is still within offset_align, just align
1323 the bit position. */
1324 if (desired_align < rli->offset_align)
1325 rli->bitpos = round_up (rli->bitpos, desired_align);
1326 else
1327 {
1328 /* First adjust OFFSET by the partial bits, then align. */
1329 rli->offset
1330 = size_binop (PLUS_EXPR, rli->offset,
1331 fold_convert (sizetype,
1332 size_binop (CEIL_DIV_EXPR, rli->bitpos,
1333 bitsize_unit_node)));
1334 rli->bitpos = bitsize_zero_node;
1335
1336 rli->offset = round_up (rli->offset, desired_align / BITS_PER_UNIT);
1337 }
1338
1339 if (! TREE_CONSTANT (rli->offset))
1340 rli->offset_align = desired_align;
1341 }
1342
1343 /* Handle compatibility with PCC. Note that if the record has any
1344 variable-sized fields, we need not worry about compatibility. */
1345 if (PCC_BITFIELD_TYPE_MATTERS
1346 && ! targetm.ms_bitfield_layout_p (rli->t)
1347 && TREE_CODE (field) == FIELD_DECL
1348 && type != error_mark_node
1349 && DECL_BIT_FIELD (field)
1350 && (! DECL_PACKED (field)
1351 /* Enter for these packed fields only to issue a warning. */
1352 || TYPE_ALIGN (type) <= BITS_PER_UNIT)
1353 && maximum_field_alignment == 0
1354 && ! integer_zerop (DECL_SIZE (field))
1355 && tree_fits_uhwi_p (DECL_SIZE (field))
1356 && tree_fits_uhwi_p (rli->offset)
1357 && tree_fits_uhwi_p (TYPE_SIZE (type)))
1358 {
1359 unsigned int type_align = TYPE_ALIGN (type);
1360 tree dsize = DECL_SIZE (field);
1361 HOST_WIDE_INT field_size = tree_to_uhwi (dsize);
1362 HOST_WIDE_INT offset = tree_to_uhwi (rli->offset);
1363 HOST_WIDE_INT bit_offset = tree_to_shwi (rli->bitpos);
1364
1365 #ifdef ADJUST_FIELD_ALIGN
1366 if (! TYPE_USER_ALIGN (type))
1367 type_align = ADJUST_FIELD_ALIGN (field, type, type_align);
1368 #endif
1369
1370 /* A bit field may not span more units of alignment of its type
1371 than its type itself. Advance to next boundary if necessary. */
1372 if (excess_unit_span (offset, bit_offset, field_size, type_align, type))
1373 {
1374 if (DECL_PACKED (field))
1375 {
1376 if (warn_packed_bitfield_compat == 1)
1377 inform
1378 (input_location,
1379 "offset of packed bit-field %qD has changed in GCC 4.4",
1380 field);
1381 }
1382 else
1383 rli->bitpos = round_up (rli->bitpos, type_align);
1384 }
1385
1386 if (! DECL_PACKED (field))
1387 TYPE_USER_ALIGN (rli->t) |= TYPE_USER_ALIGN (type);
1388
1389 SET_TYPE_WARN_IF_NOT_ALIGN (rli->t,
1390 TYPE_WARN_IF_NOT_ALIGN (type));
1391 }
1392
1393 #ifdef BITFIELD_NBYTES_LIMITED
1394 if (BITFIELD_NBYTES_LIMITED
1395 && ! targetm.ms_bitfield_layout_p (rli->t)
1396 && TREE_CODE (field) == FIELD_DECL
1397 && type != error_mark_node
1398 && DECL_BIT_FIELD_TYPE (field)
1399 && ! DECL_PACKED (field)
1400 && ! integer_zerop (DECL_SIZE (field))
1401 && tree_fits_uhwi_p (DECL_SIZE (field))
1402 && tree_fits_uhwi_p (rli->offset)
1403 && tree_fits_uhwi_p (TYPE_SIZE (type)))
1404 {
1405 unsigned int type_align = TYPE_ALIGN (type);
1406 tree dsize = DECL_SIZE (field);
1407 HOST_WIDE_INT field_size = tree_to_uhwi (dsize);
1408 HOST_WIDE_INT offset = tree_to_uhwi (rli->offset);
1409 HOST_WIDE_INT bit_offset = tree_to_shwi (rli->bitpos);
1410
1411 #ifdef ADJUST_FIELD_ALIGN
1412 if (! TYPE_USER_ALIGN (type))
1413 type_align = ADJUST_FIELD_ALIGN (field, type, type_align);
1414 #endif
1415
1416 if (maximum_field_alignment != 0)
1417 type_align = MIN (type_align, maximum_field_alignment);
1418 /* ??? This test is opposite the test in the containing if
1419 statement, so this code is unreachable currently. */
1420 else if (DECL_PACKED (field))
1421 type_align = MIN (type_align, BITS_PER_UNIT);
1422
1423 /* A bit field may not span the unit of alignment of its type.
1424 Advance to next boundary if necessary. */
1425 if (excess_unit_span (offset, bit_offset, field_size, type_align, type))
1426 rli->bitpos = round_up (rli->bitpos, type_align);
1427
1428 TYPE_USER_ALIGN (rli->t) |= TYPE_USER_ALIGN (type);
1429 SET_TYPE_WARN_IF_NOT_ALIGN (rli->t,
1430 TYPE_WARN_IF_NOT_ALIGN (type));
1431 }
1432 #endif
1433
1434 /* See the docs for TARGET_MS_BITFIELD_LAYOUT_P for details.
1435 A subtlety:
1436 When a bit field is inserted into a packed record, the whole
1437 size of the underlying type is used by one or more same-size
1438 adjacent bitfields. (That is, if its long:3, 32 bits is
1439 used in the record, and any additional adjacent long bitfields are
1440 packed into the same chunk of 32 bits. However, if the size
1441 changes, a new field of that size is allocated.) In an unpacked
1442 record, this is the same as using alignment, but not equivalent
1443 when packing.
1444
1445 Note: for compatibility, we use the type size, not the type alignment
1446 to determine alignment, since that matches the documentation */
1447
1448 if (targetm.ms_bitfield_layout_p (rli->t))
1449 {
1450 tree prev_saved = rli->prev_field;
1451 tree prev_type = prev_saved ? DECL_BIT_FIELD_TYPE (prev_saved) : NULL;
1452
1453 /* This is a bitfield if it exists. */
1454 if (rli->prev_field)
1455 {
1456 bool realign_p = known_align < desired_align;
1457
1458 /* If both are bitfields, nonzero, and the same size, this is
1459 the middle of a run. Zero declared size fields are special
1460 and handled as "end of run". (Note: it's nonzero declared
1461 size, but equal type sizes!) (Since we know that both
1462 the current and previous fields are bitfields by the
1463 time we check it, DECL_SIZE must be present for both.) */
1464 if (DECL_BIT_FIELD_TYPE (field)
1465 && !integer_zerop (DECL_SIZE (field))
1466 && !integer_zerop (DECL_SIZE (rli->prev_field))
1467 && tree_fits_shwi_p (DECL_SIZE (rli->prev_field))
1468 && tree_fits_uhwi_p (TYPE_SIZE (type))
1469 && simple_cst_equal (TYPE_SIZE (type), TYPE_SIZE (prev_type)))
1470 {
1471 /* We're in the middle of a run of equal type size fields; make
1472 sure we realign if we run out of bits. (Not decl size,
1473 type size!) */
1474 HOST_WIDE_INT bitsize = tree_to_uhwi (DECL_SIZE (field));
1475
1476 if (rli->remaining_in_alignment < bitsize)
1477 {
1478 HOST_WIDE_INT typesize = tree_to_uhwi (TYPE_SIZE (type));
1479
1480 /* out of bits; bump up to next 'word'. */
1481 rli->bitpos
1482 = size_binop (PLUS_EXPR, rli->bitpos,
1483 bitsize_int (rli->remaining_in_alignment));
1484 rli->prev_field = field;
1485 if (typesize < bitsize)
1486 rli->remaining_in_alignment = 0;
1487 else
1488 rli->remaining_in_alignment = typesize - bitsize;
1489 }
1490 else
1491 {
1492 rli->remaining_in_alignment -= bitsize;
1493 realign_p = false;
1494 }
1495 }
1496 else
1497 {
1498 /* End of a run: if leaving a run of bitfields of the same type
1499 size, we have to "use up" the rest of the bits of the type
1500 size.
1501
1502 Compute the new position as the sum of the size for the prior
1503 type and where we first started working on that type.
1504 Note: since the beginning of the field was aligned then
1505 of course the end will be too. No round needed. */
1506
1507 if (!integer_zerop (DECL_SIZE (rli->prev_field)))
1508 {
1509 rli->bitpos
1510 = size_binop (PLUS_EXPR, rli->bitpos,
1511 bitsize_int (rli->remaining_in_alignment));
1512 }
1513 else
1514 /* We "use up" size zero fields; the code below should behave
1515 as if the prior field was not a bitfield. */
1516 prev_saved = NULL;
1517
1518 /* Cause a new bitfield to be captured, either this time (if
1519 currently a bitfield) or next time we see one. */
1520 if (!DECL_BIT_FIELD_TYPE (field)
1521 || integer_zerop (DECL_SIZE (field)))
1522 rli->prev_field = NULL;
1523 }
1524
1525 /* Does this field automatically have alignment it needs by virtue
1526 of the fields that precede it and the record's own alignment? */
1527 if (realign_p)
1528 {
1529 /* If the alignment is still within offset_align, just align
1530 the bit position. */
1531 if (desired_align < rli->offset_align)
1532 rli->bitpos = round_up (rli->bitpos, desired_align);
1533 else
1534 {
1535 /* First adjust OFFSET by the partial bits, then align. */
1536 tree d = size_binop (CEIL_DIV_EXPR, rli->bitpos,
1537 bitsize_unit_node);
1538 rli->offset = size_binop (PLUS_EXPR, rli->offset,
1539 fold_convert (sizetype, d));
1540 rli->bitpos = bitsize_zero_node;
1541
1542 rli->offset = round_up (rli->offset,
1543 desired_align / BITS_PER_UNIT);
1544 }
1545
1546 if (! TREE_CONSTANT (rli->offset))
1547 rli->offset_align = desired_align;
1548 }
1549
1550 normalize_rli (rli);
1551 }
1552
1553 /* If we're starting a new run of same type size bitfields
1554 (or a run of non-bitfields), set up the "first of the run"
1555 fields.
1556
1557 That is, if the current field is not a bitfield, or if there
1558 was a prior bitfield the type sizes differ, or if there wasn't
1559 a prior bitfield the size of the current field is nonzero.
1560
1561 Note: we must be sure to test ONLY the type size if there was
1562 a prior bitfield and ONLY for the current field being zero if
1563 there wasn't. */
1564
1565 if (!DECL_BIT_FIELD_TYPE (field)
1566 || (prev_saved != NULL
1567 ? !simple_cst_equal (TYPE_SIZE (type), TYPE_SIZE (prev_type))
1568 : !integer_zerop (DECL_SIZE (field))))
1569 {
1570 /* Never smaller than a byte for compatibility. */
1571 unsigned int type_align = BITS_PER_UNIT;
1572
1573 /* (When not a bitfield), we could be seeing a flex array (with
1574 no DECL_SIZE). Since we won't be using remaining_in_alignment
1575 until we see a bitfield (and come by here again) we just skip
1576 calculating it. */
1577 if (DECL_SIZE (field) != NULL
1578 && tree_fits_uhwi_p (TYPE_SIZE (TREE_TYPE (field)))
1579 && tree_fits_uhwi_p (DECL_SIZE (field)))
1580 {
1581 unsigned HOST_WIDE_INT bitsize
1582 = tree_to_uhwi (DECL_SIZE (field));
1583 unsigned HOST_WIDE_INT typesize
1584 = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (field)));
1585
1586 if (typesize < bitsize)
1587 rli->remaining_in_alignment = 0;
1588 else
1589 rli->remaining_in_alignment = typesize - bitsize;
1590 }
1591
1592 /* Now align (conventionally) for the new type. */
1593 if (! DECL_PACKED (field))
1594 type_align = TYPE_ALIGN (TREE_TYPE (field));
1595
1596 if (maximum_field_alignment != 0)
1597 type_align = MIN (type_align, maximum_field_alignment);
1598
1599 rli->bitpos = round_up (rli->bitpos, type_align);
1600
1601 /* If we really aligned, don't allow subsequent bitfields
1602 to undo that. */
1603 rli->prev_field = NULL;
1604 }
1605 }
1606
1607 /* Offset so far becomes the position of this field after normalizing. */
1608 normalize_rli (rli);
1609 DECL_FIELD_OFFSET (field) = rli->offset;
1610 DECL_FIELD_BIT_OFFSET (field) = rli->bitpos;
1611 SET_DECL_OFFSET_ALIGN (field, rli->offset_align);
1612 handle_warn_if_not_align (field, rli->record_align);
1613
1614 /* Evaluate nonconstant offsets only once, either now or as soon as safe. */
1615 if (TREE_CODE (DECL_FIELD_OFFSET (field)) != INTEGER_CST)
1616 DECL_FIELD_OFFSET (field) = variable_size (DECL_FIELD_OFFSET (field));
1617
1618 /* If this field ended up more aligned than we thought it would be (we
1619 approximate this by seeing if its position changed), lay out the field
1620 again; perhaps we can use an integral mode for it now. */
1621 if (! integer_zerop (DECL_FIELD_BIT_OFFSET (field)))
1622 actual_align = least_bit_hwi (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field)));
1623 else if (integer_zerop (DECL_FIELD_OFFSET (field)))
1624 actual_align = MAX (BIGGEST_ALIGNMENT, rli->record_align);
1625 else if (tree_fits_uhwi_p (DECL_FIELD_OFFSET (field)))
1626 actual_align = (BITS_PER_UNIT
1627 * least_bit_hwi (tree_to_uhwi (DECL_FIELD_OFFSET (field))));
1628 else
1629 actual_align = DECL_OFFSET_ALIGN (field);
1630 /* ACTUAL_ALIGN is still the actual alignment *within the record* .
1631 store / extract bit field operations will check the alignment of the
1632 record against the mode of bit fields. */
1633
1634 if (known_align != actual_align)
1635 layout_decl (field, actual_align);
1636
1637 if (rli->prev_field == NULL && DECL_BIT_FIELD_TYPE (field))
1638 rli->prev_field = field;
1639
1640 /* Now add size of this field to the size of the record. If the size is
1641 not constant, treat the field as being a multiple of bytes and just
1642 adjust the offset, resetting the bit position. Otherwise, apportion the
1643 size amongst the bit position and offset. First handle the case of an
1644 unspecified size, which can happen when we have an invalid nested struct
1645 definition, such as struct j { struct j { int i; } }. The error message
1646 is printed in finish_struct. */
1647 if (DECL_SIZE (field) == 0)
1648 /* Do nothing. */;
1649 else if (TREE_CODE (DECL_SIZE (field)) != INTEGER_CST
1650 || TREE_OVERFLOW (DECL_SIZE (field)))
1651 {
1652 rli->offset
1653 = size_binop (PLUS_EXPR, rli->offset,
1654 fold_convert (sizetype,
1655 size_binop (CEIL_DIV_EXPR, rli->bitpos,
1656 bitsize_unit_node)));
1657 rli->offset
1658 = size_binop (PLUS_EXPR, rli->offset, DECL_SIZE_UNIT (field));
1659 rli->bitpos = bitsize_zero_node;
1660 rli->offset_align = MIN (rli->offset_align, desired_align);
1661
1662 if (!multiple_of_p (bitsizetype, DECL_SIZE (field),
1663 bitsize_int (rli->offset_align)))
1664 {
1665 tree type = strip_array_types (TREE_TYPE (field));
1666 /* The above adjusts offset_align just based on the start of the
1667 field. The field might not have a size that is a multiple of
1668 that offset_align though. If the field is an array of fixed
1669 sized elements, assume there can be any multiple of those
1670 sizes. If it is a variable length aggregate or array of
1671 variable length aggregates, assume worst that the end is
1672 just BITS_PER_UNIT aligned. */
1673 if (TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
1674 {
1675 if (TREE_INT_CST_LOW (TYPE_SIZE (type)))
1676 {
1677 unsigned HOST_WIDE_INT sz
1678 = least_bit_hwi (TREE_INT_CST_LOW (TYPE_SIZE (type)));
1679 rli->offset_align = MIN (rli->offset_align, sz);
1680 }
1681 }
1682 else
1683 rli->offset_align = MIN (rli->offset_align, BITS_PER_UNIT);
1684 }
1685 }
1686 else if (targetm.ms_bitfield_layout_p (rli->t))
1687 {
1688 rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos, DECL_SIZE (field));
1689
1690 /* If FIELD is the last field and doesn't end at the full length
1691 of the type then pad the struct out to the full length of the
1692 last type. */
1693 if (DECL_BIT_FIELD_TYPE (field)
1694 && !integer_zerop (DECL_SIZE (field)))
1695 {
1696 /* We have to scan, because non-field DECLS are also here. */
1697 tree probe = field;
1698 while ((probe = DECL_CHAIN (probe)))
1699 if (TREE_CODE (probe) == FIELD_DECL)
1700 break;
1701 if (!probe)
1702 rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos,
1703 bitsize_int (rli->remaining_in_alignment));
1704 }
1705
1706 normalize_rli (rli);
1707 }
1708 else
1709 {
1710 rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos, DECL_SIZE (field));
1711 normalize_rli (rli);
1712 }
1713 }
1714
1715 /* Assuming that all the fields have been laid out, this function uses
1716 RLI to compute the final TYPE_SIZE, TYPE_ALIGN, etc. for the type
1717 indicated by RLI. */
1718
1719 static void
1720 finalize_record_size (record_layout_info rli)
1721 {
1722 tree unpadded_size, unpadded_size_unit;
1723
1724 /* Now we want just byte and bit offsets, so set the offset alignment
1725 to be a byte and then normalize. */
1726 rli->offset_align = BITS_PER_UNIT;
1727 normalize_rli (rli);
1728
1729 /* Determine the desired alignment. */
1730 #ifdef ROUND_TYPE_ALIGN
1731 SET_TYPE_ALIGN (rli->t, ROUND_TYPE_ALIGN (rli->t, TYPE_ALIGN (rli->t),
1732 rli->record_align));
1733 #else
1734 SET_TYPE_ALIGN (rli->t, MAX (TYPE_ALIGN (rli->t), rli->record_align));
1735 #endif
1736
1737 /* Compute the size so far. Be sure to allow for extra bits in the
1738 size in bytes. We have guaranteed above that it will be no more
1739 than a single byte. */
1740 unpadded_size = rli_size_so_far (rli);
1741 unpadded_size_unit = rli_size_unit_so_far (rli);
1742 if (! integer_zerop (rli->bitpos))
1743 unpadded_size_unit
1744 = size_binop (PLUS_EXPR, unpadded_size_unit, size_one_node);
1745
1746 /* Round the size up to be a multiple of the required alignment. */
1747 TYPE_SIZE (rli->t) = round_up (unpadded_size, TYPE_ALIGN (rli->t));
1748 TYPE_SIZE_UNIT (rli->t)
1749 = round_up (unpadded_size_unit, TYPE_ALIGN_UNIT (rli->t));
1750
1751 if (TREE_CONSTANT (unpadded_size)
1752 && simple_cst_equal (unpadded_size, TYPE_SIZE (rli->t)) == 0
1753 && input_location != BUILTINS_LOCATION)
1754 warning (OPT_Wpadded, "padding struct size to alignment boundary");
1755
1756 if (warn_packed && TREE_CODE (rli->t) == RECORD_TYPE
1757 && TYPE_PACKED (rli->t) && ! rli->packed_maybe_necessary
1758 && TREE_CONSTANT (unpadded_size))
1759 {
1760 tree unpacked_size;
1761
1762 #ifdef ROUND_TYPE_ALIGN
1763 rli->unpacked_align
1764 = ROUND_TYPE_ALIGN (rli->t, TYPE_ALIGN (rli->t), rli->unpacked_align);
1765 #else
1766 rli->unpacked_align = MAX (TYPE_ALIGN (rli->t), rli->unpacked_align);
1767 #endif
1768
1769 unpacked_size = round_up (TYPE_SIZE (rli->t), rli->unpacked_align);
1770 if (simple_cst_equal (unpacked_size, TYPE_SIZE (rli->t)))
1771 {
1772 if (TYPE_NAME (rli->t))
1773 {
1774 tree name;
1775
1776 if (TREE_CODE (TYPE_NAME (rli->t)) == IDENTIFIER_NODE)
1777 name = TYPE_NAME (rli->t);
1778 else
1779 name = DECL_NAME (TYPE_NAME (rli->t));
1780
1781 if (STRICT_ALIGNMENT)
1782 warning (OPT_Wpacked, "packed attribute causes inefficient "
1783 "alignment for %qE", name);
1784 else
1785 warning (OPT_Wpacked,
1786 "packed attribute is unnecessary for %qE", name);
1787 }
1788 else
1789 {
1790 if (STRICT_ALIGNMENT)
1791 warning (OPT_Wpacked,
1792 "packed attribute causes inefficient alignment");
1793 else
1794 warning (OPT_Wpacked, "packed attribute is unnecessary");
1795 }
1796 }
1797 }
1798 }
1799
1800 /* Compute the TYPE_MODE for the TYPE (which is a RECORD_TYPE). */
1801
1802 void
1803 compute_record_mode (tree type)
1804 {
1805 tree field;
1806 machine_mode mode = VOIDmode;
1807
1808 /* Most RECORD_TYPEs have BLKmode, so we start off assuming that.
1809 However, if possible, we use a mode that fits in a register
1810 instead, in order to allow for better optimization down the
1811 line. */
1812 SET_TYPE_MODE (type, BLKmode);
1813
1814 if (! tree_fits_uhwi_p (TYPE_SIZE (type)))
1815 return;
1816
1817 /* A record which has any BLKmode members must itself be
1818 BLKmode; it can't go in a register. Unless the member is
1819 BLKmode only because it isn't aligned. */
1820 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
1821 {
1822 if (TREE_CODE (field) != FIELD_DECL)
1823 continue;
1824
1825 if (TREE_CODE (TREE_TYPE (field)) == ERROR_MARK
1826 || (TYPE_MODE (TREE_TYPE (field)) == BLKmode
1827 && ! TYPE_NO_FORCE_BLK (TREE_TYPE (field))
1828 && !(TYPE_SIZE (TREE_TYPE (field)) != 0
1829 && integer_zerop (TYPE_SIZE (TREE_TYPE (field)))))
1830 || ! tree_fits_uhwi_p (bit_position (field))
1831 || DECL_SIZE (field) == 0
1832 || ! tree_fits_uhwi_p (DECL_SIZE (field)))
1833 return;
1834
1835 /* If this field is the whole struct, remember its mode so
1836 that, say, we can put a double in a class into a DF
1837 register instead of forcing it to live in the stack. */
1838 if (simple_cst_equal (TYPE_SIZE (type), DECL_SIZE (field))
1839 /* Partial int types (e.g. __int20) may have TYPE_SIZE equal to
1840 wider types (e.g. int32), despite precision being less. Ensure
1841 that the TYPE_MODE of the struct does not get set to the partial
1842 int mode if there is a wider type also in the struct. */
1843 && known_gt (GET_MODE_PRECISION (DECL_MODE (field)),
1844 GET_MODE_PRECISION (mode)))
1845 mode = DECL_MODE (field);
1846
1847 /* With some targets, it is sub-optimal to access an aligned
1848 BLKmode structure as a scalar. */
1849 if (targetm.member_type_forces_blk (field, mode))
1850 return;
1851 }
1852
1853 /* If we only have one real field; use its mode if that mode's size
1854 matches the type's size. This generally only applies to RECORD_TYPE.
1855 For UNION_TYPE, if the widest field is MODE_INT then use that mode.
1856 If the widest field is MODE_PARTIAL_INT, and the union will be passed
1857 by reference, then use that mode. */
1858 poly_uint64 type_size;
1859 if ((TREE_CODE (type) == RECORD_TYPE
1860 || (TREE_CODE (type) == UNION_TYPE
1861 && (GET_MODE_CLASS (mode) == MODE_INT
1862 || (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT
1863 && (targetm.calls.pass_by_reference
1864 (pack_cumulative_args (0),
1865 function_arg_info (type, mode, /*named=*/false)))))))
1866 && mode != VOIDmode
1867 && poly_int_tree_p (TYPE_SIZE (type), &type_size)
1868 && known_eq (GET_MODE_BITSIZE (mode), type_size))
1869 ;
1870 else
1871 mode = mode_for_size_tree (TYPE_SIZE (type), MODE_INT, 1).else_blk ();
1872
1873 /* If structure's known alignment is less than what the scalar
1874 mode would need, and it matters, then stick with BLKmode. */
1875 if (mode != BLKmode
1876 && STRICT_ALIGNMENT
1877 && ! (TYPE_ALIGN (type) >= BIGGEST_ALIGNMENT
1878 || TYPE_ALIGN (type) >= GET_MODE_ALIGNMENT (mode)))
1879 {
1880 /* If this is the only reason this type is BLKmode, then
1881 don't force containing types to be BLKmode. */
1882 TYPE_NO_FORCE_BLK (type) = 1;
1883 mode = BLKmode;
1884 }
1885
1886 SET_TYPE_MODE (type, mode);
1887 }
1888
1889 /* Compute TYPE_SIZE and TYPE_ALIGN for TYPE, once it has been laid
1890 out. */
1891
1892 static void
1893 finalize_type_size (tree type)
1894 {
1895 /* Normally, use the alignment corresponding to the mode chosen.
1896 However, where strict alignment is not required, avoid
1897 over-aligning structures, since most compilers do not do this
1898 alignment. */
1899 if (TYPE_MODE (type) != BLKmode
1900 && TYPE_MODE (type) != VOIDmode
1901 && (STRICT_ALIGNMENT || !AGGREGATE_TYPE_P (type)))
1902 {
1903 unsigned mode_align = GET_MODE_ALIGNMENT (TYPE_MODE (type));
1904
1905 /* Don't override a larger alignment requirement coming from a user
1906 alignment of one of the fields. */
1907 if (mode_align >= TYPE_ALIGN (type))
1908 {
1909 SET_TYPE_ALIGN (type, mode_align);
1910 TYPE_USER_ALIGN (type) = 0;
1911 }
1912 }
1913
1914 /* Do machine-dependent extra alignment. */
1915 #ifdef ROUND_TYPE_ALIGN
1916 SET_TYPE_ALIGN (type,
1917 ROUND_TYPE_ALIGN (type, TYPE_ALIGN (type), BITS_PER_UNIT));
1918 #endif
1919
1920 /* If we failed to find a simple way to calculate the unit size
1921 of the type, find it by division. */
1922 if (TYPE_SIZE_UNIT (type) == 0 && TYPE_SIZE (type) != 0)
1923 /* TYPE_SIZE (type) is computed in bitsizetype. After the division, the
1924 result will fit in sizetype. We will get more efficient code using
1925 sizetype, so we force a conversion. */
1926 TYPE_SIZE_UNIT (type)
1927 = fold_convert (sizetype,
1928 size_binop (FLOOR_DIV_EXPR, TYPE_SIZE (type),
1929 bitsize_unit_node));
1930
1931 if (TYPE_SIZE (type) != 0)
1932 {
1933 TYPE_SIZE (type) = round_up (TYPE_SIZE (type), TYPE_ALIGN (type));
1934 TYPE_SIZE_UNIT (type)
1935 = round_up (TYPE_SIZE_UNIT (type), TYPE_ALIGN_UNIT (type));
1936 }
1937
1938 /* Evaluate nonconstant sizes only once, either now or as soon as safe. */
1939 if (TYPE_SIZE (type) != 0 && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
1940 TYPE_SIZE (type) = variable_size (TYPE_SIZE (type));
1941 if (TYPE_SIZE_UNIT (type) != 0
1942 && TREE_CODE (TYPE_SIZE_UNIT (type)) != INTEGER_CST)
1943 TYPE_SIZE_UNIT (type) = variable_size (TYPE_SIZE_UNIT (type));
1944
1945 /* Handle empty records as per the x86-64 psABI. */
1946 TYPE_EMPTY_P (type) = targetm.calls.empty_record_p (type);
1947
1948 /* Also layout any other variants of the type. */
1949 if (TYPE_NEXT_VARIANT (type)
1950 || type != TYPE_MAIN_VARIANT (type))
1951 {
1952 tree variant;
1953 /* Record layout info of this variant. */
1954 tree size = TYPE_SIZE (type);
1955 tree size_unit = TYPE_SIZE_UNIT (type);
1956 unsigned int align = TYPE_ALIGN (type);
1957 unsigned int precision = TYPE_PRECISION (type);
1958 unsigned int user_align = TYPE_USER_ALIGN (type);
1959 machine_mode mode = TYPE_MODE (type);
1960 bool empty_p = TYPE_EMPTY_P (type);
1961
1962 /* Copy it into all variants. */
1963 for (variant = TYPE_MAIN_VARIANT (type);
1964 variant != 0;
1965 variant = TYPE_NEXT_VARIANT (variant))
1966 {
1967 TYPE_SIZE (variant) = size;
1968 TYPE_SIZE_UNIT (variant) = size_unit;
1969 unsigned valign = align;
1970 if (TYPE_USER_ALIGN (variant))
1971 valign = MAX (valign, TYPE_ALIGN (variant));
1972 else
1973 TYPE_USER_ALIGN (variant) = user_align;
1974 SET_TYPE_ALIGN (variant, valign);
1975 TYPE_PRECISION (variant) = precision;
1976 SET_TYPE_MODE (variant, mode);
1977 TYPE_EMPTY_P (variant) = empty_p;
1978 }
1979 }
1980 }
1981
1982 /* Return a new underlying object for a bitfield started with FIELD. */
1983
1984 static tree
1985 start_bitfield_representative (tree field)
1986 {
1987 tree repr = make_node (FIELD_DECL);
1988 DECL_FIELD_OFFSET (repr) = DECL_FIELD_OFFSET (field);
1989 /* Force the representative to begin at a BITS_PER_UNIT aligned
1990 boundary - C++ may use tail-padding of a base object to
1991 continue packing bits so the bitfield region does not start
1992 at bit zero (see g++.dg/abi/bitfield5.C for example).
1993 Unallocated bits may happen for other reasons as well,
1994 for example Ada which allows explicit bit-granular structure layout. */
1995 DECL_FIELD_BIT_OFFSET (repr)
1996 = size_binop (BIT_AND_EXPR,
1997 DECL_FIELD_BIT_OFFSET (field),
1998 bitsize_int (~(BITS_PER_UNIT - 1)));
1999 SET_DECL_OFFSET_ALIGN (repr, DECL_OFFSET_ALIGN (field));
2000 DECL_SIZE (repr) = DECL_SIZE (field);
2001 DECL_SIZE_UNIT (repr) = DECL_SIZE_UNIT (field);
2002 DECL_PACKED (repr) = DECL_PACKED (field);
2003 DECL_CONTEXT (repr) = DECL_CONTEXT (field);
2004 /* There are no indirect accesses to this field. If we introduce
2005 some then they have to use the record alias set. This makes
2006 sure to properly conflict with [indirect] accesses to addressable
2007 fields of the bitfield group. */
2008 DECL_NONADDRESSABLE_P (repr) = 1;
2009 return repr;
2010 }
2011
2012 /* Finish up a bitfield group that was started by creating the underlying
2013 object REPR with the last field in the bitfield group FIELD. */
2014
2015 static void
2016 finish_bitfield_representative (tree repr, tree field)
2017 {
2018 unsigned HOST_WIDE_INT bitsize, maxbitsize;
2019 tree nextf, size;
2020
2021 size = size_diffop (DECL_FIELD_OFFSET (field),
2022 DECL_FIELD_OFFSET (repr));
2023 while (TREE_CODE (size) == COMPOUND_EXPR)
2024 size = TREE_OPERAND (size, 1);
2025 gcc_assert (tree_fits_uhwi_p (size));
2026 bitsize = (tree_to_uhwi (size) * BITS_PER_UNIT
2027 + tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field))
2028 - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr))
2029 + tree_to_uhwi (DECL_SIZE (field)));
2030
2031 /* Round up bitsize to multiples of BITS_PER_UNIT. */
2032 bitsize = (bitsize + BITS_PER_UNIT - 1) & ~(BITS_PER_UNIT - 1);
2033
2034 /* Now nothing tells us how to pad out bitsize ... */
2035 nextf = DECL_CHAIN (field);
2036 while (nextf && TREE_CODE (nextf) != FIELD_DECL)
2037 nextf = DECL_CHAIN (nextf);
2038 if (nextf)
2039 {
2040 tree maxsize;
2041 /* If there was an error, the field may be not laid out
2042 correctly. Don't bother to do anything. */
2043 if (TREE_TYPE (nextf) == error_mark_node)
2044 return;
2045 maxsize = size_diffop (DECL_FIELD_OFFSET (nextf),
2046 DECL_FIELD_OFFSET (repr));
2047 if (tree_fits_uhwi_p (maxsize))
2048 {
2049 maxbitsize = (tree_to_uhwi (maxsize) * BITS_PER_UNIT
2050 + tree_to_uhwi (DECL_FIELD_BIT_OFFSET (nextf))
2051 - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr)));
2052 /* If the group ends within a bitfield nextf does not need to be
2053 aligned to BITS_PER_UNIT. Thus round up. */
2054 maxbitsize = (maxbitsize + BITS_PER_UNIT - 1) & ~(BITS_PER_UNIT - 1);
2055 }
2056 else
2057 maxbitsize = bitsize;
2058 }
2059 else
2060 {
2061 /* Note that if the C++ FE sets up tail-padding to be re-used it
2062 creates a as-base variant of the type with TYPE_SIZE adjusted
2063 accordingly. So it is safe to include tail-padding here. */
2064 tree aggsize = lang_hooks.types.unit_size_without_reusable_padding
2065 (DECL_CONTEXT (field));
2066 tree maxsize = size_diffop (aggsize, DECL_FIELD_OFFSET (repr));
2067 /* We cannot generally rely on maxsize to fold to an integer constant,
2068 so use bitsize as fallback for this case. */
2069 if (tree_fits_uhwi_p (maxsize))
2070 maxbitsize = (tree_to_uhwi (maxsize) * BITS_PER_UNIT
2071 - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr)));
2072 else
2073 maxbitsize = bitsize;
2074 }
2075
2076 /* Only if we don't artificially break up the representative in
2077 the middle of a large bitfield with different possibly
2078 overlapping representatives. And all representatives start
2079 at byte offset. */
2080 gcc_assert (maxbitsize % BITS_PER_UNIT == 0);
2081
2082 /* Find the smallest nice mode to use. */
2083 opt_scalar_int_mode mode_iter;
2084 FOR_EACH_MODE_IN_CLASS (mode_iter, MODE_INT)
2085 if (GET_MODE_BITSIZE (mode_iter.require ()) >= bitsize)
2086 break;
2087
2088 scalar_int_mode mode;
2089 if (!mode_iter.exists (&mode)
2090 || GET_MODE_BITSIZE (mode) > maxbitsize
2091 || GET_MODE_BITSIZE (mode) > MAX_FIXED_MODE_SIZE)
2092 {
2093 /* We really want a BLKmode representative only as a last resort,
2094 considering the member b in
2095 struct { int a : 7; int b : 17; int c; } __attribute__((packed));
2096 Otherwise we simply want to split the representative up
2097 allowing for overlaps within the bitfield region as required for
2098 struct { int a : 7; int b : 7;
2099 int c : 10; int d; } __attribute__((packed));
2100 [0, 15] HImode for a and b, [8, 23] HImode for c. */
2101 DECL_SIZE (repr) = bitsize_int (bitsize);
2102 DECL_SIZE_UNIT (repr) = size_int (bitsize / BITS_PER_UNIT);
2103 SET_DECL_MODE (repr, BLKmode);
2104 TREE_TYPE (repr) = build_array_type_nelts (unsigned_char_type_node,
2105 bitsize / BITS_PER_UNIT);
2106 }
2107 else
2108 {
2109 unsigned HOST_WIDE_INT modesize = GET_MODE_BITSIZE (mode);
2110 DECL_SIZE (repr) = bitsize_int (modesize);
2111 DECL_SIZE_UNIT (repr) = size_int (modesize / BITS_PER_UNIT);
2112 SET_DECL_MODE (repr, mode);
2113 TREE_TYPE (repr) = lang_hooks.types.type_for_mode (mode, 1);
2114 }
2115
2116 /* Remember whether the bitfield group is at the end of the
2117 structure or not. */
2118 DECL_CHAIN (repr) = nextf;
2119 }
2120
2121 /* Compute and set FIELD_DECLs for the underlying objects we should
2122 use for bitfield access for the structure T. */
2123
2124 void
2125 finish_bitfield_layout (tree t)
2126 {
2127 tree field, prev;
2128 tree repr = NULL_TREE;
2129
2130 /* Unions would be special, for the ease of type-punning optimizations
2131 we could use the underlying type as hint for the representative
2132 if the bitfield would fit and the representative would not exceed
2133 the union in size. */
2134 if (TREE_CODE (t) != RECORD_TYPE)
2135 return;
2136
2137 for (prev = NULL_TREE, field = TYPE_FIELDS (t);
2138 field; field = DECL_CHAIN (field))
2139 {
2140 if (TREE_CODE (field) != FIELD_DECL)
2141 continue;
2142
2143 /* In the C++ memory model, consecutive bit fields in a structure are
2144 considered one memory location and updating a memory location
2145 may not store into adjacent memory locations. */
2146 if (!repr
2147 && DECL_BIT_FIELD_TYPE (field))
2148 {
2149 /* Start new representative. */
2150 repr = start_bitfield_representative (field);
2151 }
2152 else if (repr
2153 && ! DECL_BIT_FIELD_TYPE (field))
2154 {
2155 /* Finish off new representative. */
2156 finish_bitfield_representative (repr, prev);
2157 repr = NULL_TREE;
2158 }
2159 else if (DECL_BIT_FIELD_TYPE (field))
2160 {
2161 gcc_assert (repr != NULL_TREE);
2162
2163 /* Zero-size bitfields finish off a representative and
2164 do not have a representative themselves. This is
2165 required by the C++ memory model. */
2166 if (integer_zerop (DECL_SIZE (field)))
2167 {
2168 finish_bitfield_representative (repr, prev);
2169 repr = NULL_TREE;
2170 }
2171
2172 /* We assume that either DECL_FIELD_OFFSET of the representative
2173 and each bitfield member is a constant or they are equal.
2174 This is because we need to be able to compute the bit-offset
2175 of each field relative to the representative in get_bit_range
2176 during RTL expansion.
2177 If these constraints are not met, simply force a new
2178 representative to be generated. That will at most
2179 generate worse code but still maintain correctness with
2180 respect to the C++ memory model. */
2181 else if (!((tree_fits_uhwi_p (DECL_FIELD_OFFSET (repr))
2182 && tree_fits_uhwi_p (DECL_FIELD_OFFSET (field)))
2183 || operand_equal_p (DECL_FIELD_OFFSET (repr),
2184 DECL_FIELD_OFFSET (field), 0)))
2185 {
2186 finish_bitfield_representative (repr, prev);
2187 repr = start_bitfield_representative (field);
2188 }
2189 }
2190 else
2191 continue;
2192
2193 if (repr)
2194 DECL_BIT_FIELD_REPRESENTATIVE (field) = repr;
2195
2196 prev = field;
2197 }
2198
2199 if (repr)
2200 finish_bitfield_representative (repr, prev);
2201 }
2202
2203 /* Do all of the work required to layout the type indicated by RLI,
2204 once the fields have been laid out. This function will call `free'
2205 for RLI, unless FREE_P is false. Passing a value other than false
2206 for FREE_P is bad practice; this option only exists to support the
2207 G++ 3.2 ABI. */
2208
2209 void
2210 finish_record_layout (record_layout_info rli, int free_p)
2211 {
2212 tree variant;
2213
2214 /* Compute the final size. */
2215 finalize_record_size (rli);
2216
2217 /* Compute the TYPE_MODE for the record. */
2218 compute_record_mode (rli->t);
2219
2220 /* Perform any last tweaks to the TYPE_SIZE, etc. */
2221 finalize_type_size (rli->t);
2222
2223 /* Compute bitfield representatives. */
2224 finish_bitfield_layout (rli->t);
2225
2226 /* Propagate TYPE_PACKED and TYPE_REVERSE_STORAGE_ORDER to variants.
2227 With C++ templates, it is too early to do this when the attribute
2228 is being parsed. */
2229 for (variant = TYPE_NEXT_VARIANT (rli->t); variant;
2230 variant = TYPE_NEXT_VARIANT (variant))
2231 {
2232 TYPE_PACKED (variant) = TYPE_PACKED (rli->t);
2233 TYPE_REVERSE_STORAGE_ORDER (variant)
2234 = TYPE_REVERSE_STORAGE_ORDER (rli->t);
2235 }
2236
2237 /* Lay out any static members. This is done now because their type
2238 may use the record's type. */
2239 while (!vec_safe_is_empty (rli->pending_statics))
2240 layout_decl (rli->pending_statics->pop (), 0);
2241
2242 /* Clean up. */
2243 if (free_p)
2244 {
2245 vec_free (rli->pending_statics);
2246 free (rli);
2247 }
2248 }
2249 \f
2250
2251 /* Finish processing a builtin RECORD_TYPE type TYPE. It's name is
2252 NAME, its fields are chained in reverse on FIELDS.
2253
2254 If ALIGN_TYPE is non-null, it is given the same alignment as
2255 ALIGN_TYPE. */
2256
2257 void
2258 finish_builtin_struct (tree type, const char *name, tree fields,
2259 tree align_type)
2260 {
2261 tree tail, next;
2262
2263 for (tail = NULL_TREE; fields; tail = fields, fields = next)
2264 {
2265 DECL_FIELD_CONTEXT (fields) = type;
2266 next = DECL_CHAIN (fields);
2267 DECL_CHAIN (fields) = tail;
2268 }
2269 TYPE_FIELDS (type) = tail;
2270
2271 if (align_type)
2272 {
2273 SET_TYPE_ALIGN (type, TYPE_ALIGN (align_type));
2274 TYPE_USER_ALIGN (type) = TYPE_USER_ALIGN (align_type);
2275 SET_TYPE_WARN_IF_NOT_ALIGN (type,
2276 TYPE_WARN_IF_NOT_ALIGN (align_type));
2277 }
2278
2279 layout_type (type);
2280 #if 0 /* not yet, should get fixed properly later */
2281 TYPE_NAME (type) = make_type_decl (get_identifier (name), type);
2282 #else
2283 TYPE_NAME (type) = build_decl (BUILTINS_LOCATION,
2284 TYPE_DECL, get_identifier (name), type);
2285 #endif
2286 TYPE_STUB_DECL (type) = TYPE_NAME (type);
2287 layout_decl (TYPE_NAME (type), 0);
2288 }
2289
2290 /* Calculate the mode, size, and alignment for TYPE.
2291 For an array type, calculate the element separation as well.
2292 Record TYPE on the chain of permanent or temporary types
2293 so that dbxout will find out about it.
2294
2295 TYPE_SIZE of a type is nonzero if the type has been laid out already.
2296 layout_type does nothing on such a type.
2297
2298 If the type is incomplete, its TYPE_SIZE remains zero. */
2299
2300 void
2301 layout_type (tree type)
2302 {
2303 gcc_assert (type);
2304
2305 if (type == error_mark_node)
2306 return;
2307
2308 /* We don't want finalize_type_size to copy an alignment attribute to
2309 variants that don't have it. */
2310 type = TYPE_MAIN_VARIANT (type);
2311
2312 /* Do nothing if type has been laid out before. */
2313 if (TYPE_SIZE (type))
2314 return;
2315
2316 switch (TREE_CODE (type))
2317 {
2318 case LANG_TYPE:
2319 /* This kind of type is the responsibility
2320 of the language-specific code. */
2321 gcc_unreachable ();
2322
2323 case BOOLEAN_TYPE:
2324 case INTEGER_TYPE:
2325 case ENUMERAL_TYPE:
2326 {
2327 scalar_int_mode mode
2328 = smallest_int_mode_for_size (TYPE_PRECISION (type));
2329 SET_TYPE_MODE (type, mode);
2330 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (mode));
2331 /* Don't set TYPE_PRECISION here, as it may be set by a bitfield. */
2332 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (mode));
2333 break;
2334 }
2335
2336 case REAL_TYPE:
2337 {
2338 /* Allow the caller to choose the type mode, which is how decimal
2339 floats are distinguished from binary ones. */
2340 if (TYPE_MODE (type) == VOIDmode)
2341 SET_TYPE_MODE
2342 (type, float_mode_for_size (TYPE_PRECISION (type)).require ());
2343 scalar_float_mode mode = as_a <scalar_float_mode> (TYPE_MODE (type));
2344 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (mode));
2345 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (mode));
2346 break;
2347 }
2348
2349 case FIXED_POINT_TYPE:
2350 {
2351 /* TYPE_MODE (type) has been set already. */
2352 scalar_mode mode = SCALAR_TYPE_MODE (type);
2353 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (mode));
2354 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (mode));
2355 break;
2356 }
2357
2358 case COMPLEX_TYPE:
2359 TYPE_UNSIGNED (type) = TYPE_UNSIGNED (TREE_TYPE (type));
2360 SET_TYPE_MODE (type,
2361 GET_MODE_COMPLEX_MODE (TYPE_MODE (TREE_TYPE (type))));
2362
2363 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
2364 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
2365 break;
2366
2367 case VECTOR_TYPE:
2368 {
2369 poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (type);
2370 tree innertype = TREE_TYPE (type);
2371
2372 /* Find an appropriate mode for the vector type. */
2373 if (TYPE_MODE (type) == VOIDmode)
2374 SET_TYPE_MODE (type,
2375 mode_for_vector (SCALAR_TYPE_MODE (innertype),
2376 nunits).else_blk ());
2377
2378 TYPE_SATURATING (type) = TYPE_SATURATING (TREE_TYPE (type));
2379 TYPE_UNSIGNED (type) = TYPE_UNSIGNED (TREE_TYPE (type));
2380 /* Several boolean vector elements may fit in a single unit. */
2381 if (VECTOR_BOOLEAN_TYPE_P (type)
2382 && type->type_common.mode != BLKmode)
2383 TYPE_SIZE_UNIT (type)
2384 = size_int (GET_MODE_SIZE (type->type_common.mode));
2385 else
2386 TYPE_SIZE_UNIT (type) = int_const_binop (MULT_EXPR,
2387 TYPE_SIZE_UNIT (innertype),
2388 size_int (nunits));
2389 TYPE_SIZE (type) = int_const_binop
2390 (MULT_EXPR,
2391 bits_from_bytes (TYPE_SIZE_UNIT (type)),
2392 bitsize_int (BITS_PER_UNIT));
2393
2394 /* For vector types, we do not default to the mode's alignment.
2395 Instead, query a target hook, defaulting to natural alignment.
2396 This prevents ABI changes depending on whether or not native
2397 vector modes are supported. */
2398 SET_TYPE_ALIGN (type, targetm.vector_alignment (type));
2399
2400 /* However, if the underlying mode requires a bigger alignment than
2401 what the target hook provides, we cannot use the mode. For now,
2402 simply reject that case. */
2403 gcc_assert (TYPE_ALIGN (type)
2404 >= GET_MODE_ALIGNMENT (TYPE_MODE (type)));
2405 break;
2406 }
2407
2408 case VOID_TYPE:
2409 /* This is an incomplete type and so doesn't have a size. */
2410 SET_TYPE_ALIGN (type, 1);
2411 TYPE_USER_ALIGN (type) = 0;
2412 SET_TYPE_MODE (type, VOIDmode);
2413 break;
2414
2415 case OFFSET_TYPE:
2416 TYPE_SIZE (type) = bitsize_int (POINTER_SIZE);
2417 TYPE_SIZE_UNIT (type) = size_int (POINTER_SIZE_UNITS);
2418 /* A pointer might be MODE_PARTIAL_INT, but ptrdiff_t must be
2419 integral, which may be an __intN. */
2420 SET_TYPE_MODE (type, int_mode_for_size (POINTER_SIZE, 0).require ());
2421 TYPE_PRECISION (type) = POINTER_SIZE;
2422 break;
2423
2424 case FUNCTION_TYPE:
2425 case METHOD_TYPE:
2426 /* It's hard to see what the mode and size of a function ought to
2427 be, but we do know the alignment is FUNCTION_BOUNDARY, so
2428 make it consistent with that. */
2429 SET_TYPE_MODE (type,
2430 int_mode_for_size (FUNCTION_BOUNDARY, 0).else_blk ());
2431 TYPE_SIZE (type) = bitsize_int (FUNCTION_BOUNDARY);
2432 TYPE_SIZE_UNIT (type) = size_int (FUNCTION_BOUNDARY / BITS_PER_UNIT);
2433 break;
2434
2435 case POINTER_TYPE:
2436 case REFERENCE_TYPE:
2437 {
2438 scalar_int_mode mode = SCALAR_INT_TYPE_MODE (type);
2439 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (mode));
2440 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (mode));
2441 TYPE_UNSIGNED (type) = 1;
2442 TYPE_PRECISION (type) = GET_MODE_PRECISION (mode);
2443 }
2444 break;
2445
2446 case ARRAY_TYPE:
2447 {
2448 tree index = TYPE_DOMAIN (type);
2449 tree element = TREE_TYPE (type);
2450
2451 /* We need to know both bounds in order to compute the size. */
2452 if (index && TYPE_MAX_VALUE (index) && TYPE_MIN_VALUE (index)
2453 && TYPE_SIZE (element))
2454 {
2455 tree ub = TYPE_MAX_VALUE (index);
2456 tree lb = TYPE_MIN_VALUE (index);
2457 tree element_size = TYPE_SIZE (element);
2458 tree length;
2459
2460 /* Make sure that an array of zero-sized element is zero-sized
2461 regardless of its extent. */
2462 if (integer_zerop (element_size))
2463 length = size_zero_node;
2464
2465 /* The computation should happen in the original signedness so
2466 that (possible) negative values are handled appropriately
2467 when determining overflow. */
2468 else
2469 {
2470 /* ??? When it is obvious that the range is signed
2471 represent it using ssizetype. */
2472 if (TREE_CODE (lb) == INTEGER_CST
2473 && TREE_CODE (ub) == INTEGER_CST
2474 && TYPE_UNSIGNED (TREE_TYPE (lb))
2475 && tree_int_cst_lt (ub, lb))
2476 {
2477 lb = wide_int_to_tree (ssizetype,
2478 offset_int::from (wi::to_wide (lb),
2479 SIGNED));
2480 ub = wide_int_to_tree (ssizetype,
2481 offset_int::from (wi::to_wide (ub),
2482 SIGNED));
2483 }
2484 length
2485 = fold_convert (sizetype,
2486 size_binop (PLUS_EXPR,
2487 build_int_cst (TREE_TYPE (lb), 1),
2488 size_binop (MINUS_EXPR, ub, lb)));
2489 }
2490
2491 /* ??? We have no way to distinguish a null-sized array from an
2492 array spanning the whole sizetype range, so we arbitrarily
2493 decide that [0, -1] is the only valid representation. */
2494 if (integer_zerop (length)
2495 && TREE_OVERFLOW (length)
2496 && integer_zerop (lb))
2497 length = size_zero_node;
2498
2499 TYPE_SIZE (type) = size_binop (MULT_EXPR, element_size,
2500 bits_from_bytes (length));
2501
2502 /* If we know the size of the element, calculate the total size
2503 directly, rather than do some division thing below. This
2504 optimization helps Fortran assumed-size arrays (where the
2505 size of the array is determined at runtime) substantially. */
2506 if (TYPE_SIZE_UNIT (element))
2507 TYPE_SIZE_UNIT (type)
2508 = size_binop (MULT_EXPR, TYPE_SIZE_UNIT (element), length);
2509 }
2510
2511 /* Now round the alignment and size,
2512 using machine-dependent criteria if any. */
2513
2514 unsigned align = TYPE_ALIGN (element);
2515 if (TYPE_USER_ALIGN (type))
2516 align = MAX (align, TYPE_ALIGN (type));
2517 else
2518 TYPE_USER_ALIGN (type) = TYPE_USER_ALIGN (element);
2519 if (!TYPE_WARN_IF_NOT_ALIGN (type))
2520 SET_TYPE_WARN_IF_NOT_ALIGN (type,
2521 TYPE_WARN_IF_NOT_ALIGN (element));
2522 #ifdef ROUND_TYPE_ALIGN
2523 align = ROUND_TYPE_ALIGN (type, align, BITS_PER_UNIT);
2524 #else
2525 align = MAX (align, BITS_PER_UNIT);
2526 #endif
2527 SET_TYPE_ALIGN (type, align);
2528 SET_TYPE_MODE (type, BLKmode);
2529 if (TYPE_SIZE (type) != 0
2530 && ! targetm.member_type_forces_blk (type, VOIDmode)
2531 /* BLKmode elements force BLKmode aggregate;
2532 else extract/store fields may lose. */
2533 && (TYPE_MODE (TREE_TYPE (type)) != BLKmode
2534 || TYPE_NO_FORCE_BLK (TREE_TYPE (type))))
2535 {
2536 SET_TYPE_MODE (type, mode_for_array (TREE_TYPE (type),
2537 TYPE_SIZE (type)));
2538 if (TYPE_MODE (type) != BLKmode
2539 && STRICT_ALIGNMENT && TYPE_ALIGN (type) < BIGGEST_ALIGNMENT
2540 && TYPE_ALIGN (type) < GET_MODE_ALIGNMENT (TYPE_MODE (type)))
2541 {
2542 TYPE_NO_FORCE_BLK (type) = 1;
2543 SET_TYPE_MODE (type, BLKmode);
2544 }
2545 }
2546 if (AGGREGATE_TYPE_P (element))
2547 TYPE_TYPELESS_STORAGE (type) = TYPE_TYPELESS_STORAGE (element);
2548 /* When the element size is constant, check that it is at least as
2549 large as the element alignment. */
2550 if (TYPE_SIZE_UNIT (element)
2551 && TREE_CODE (TYPE_SIZE_UNIT (element)) == INTEGER_CST
2552 /* If TYPE_SIZE_UNIT overflowed, then it is certainly larger than
2553 TYPE_ALIGN_UNIT. */
2554 && !TREE_OVERFLOW (TYPE_SIZE_UNIT (element))
2555 && !integer_zerop (TYPE_SIZE_UNIT (element))
2556 && compare_tree_int (TYPE_SIZE_UNIT (element),
2557 TYPE_ALIGN_UNIT (element)) < 0)
2558 error ("alignment of array elements is greater than element size");
2559 break;
2560 }
2561
2562 case RECORD_TYPE:
2563 case UNION_TYPE:
2564 case QUAL_UNION_TYPE:
2565 {
2566 tree field;
2567 record_layout_info rli;
2568
2569 /* Initialize the layout information. */
2570 rli = start_record_layout (type);
2571
2572 /* If this is a QUAL_UNION_TYPE, we want to process the fields
2573 in the reverse order in building the COND_EXPR that denotes
2574 its size. We reverse them again later. */
2575 if (TREE_CODE (type) == QUAL_UNION_TYPE)
2576 TYPE_FIELDS (type) = nreverse (TYPE_FIELDS (type));
2577
2578 /* Place all the fields. */
2579 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
2580 place_field (rli, field);
2581
2582 if (TREE_CODE (type) == QUAL_UNION_TYPE)
2583 TYPE_FIELDS (type) = nreverse (TYPE_FIELDS (type));
2584
2585 /* Finish laying out the record. */
2586 finish_record_layout (rli, /*free_p=*/true);
2587 }
2588 break;
2589
2590 default:
2591 gcc_unreachable ();
2592 }
2593
2594 /* Compute the final TYPE_SIZE, TYPE_ALIGN, etc. for TYPE. For
2595 records and unions, finish_record_layout already called this
2596 function. */
2597 if (!RECORD_OR_UNION_TYPE_P (type))
2598 finalize_type_size (type);
2599
2600 /* We should never see alias sets on incomplete aggregates. And we
2601 should not call layout_type on not incomplete aggregates. */
2602 if (AGGREGATE_TYPE_P (type))
2603 gcc_assert (!TYPE_ALIAS_SET_KNOWN_P (type));
2604 }
2605
2606 /* Return the least alignment required for type TYPE. */
2607
2608 unsigned int
2609 min_align_of_type (tree type)
2610 {
2611 unsigned int align = TYPE_ALIGN (type);
2612 if (!TYPE_USER_ALIGN (type))
2613 {
2614 align = MIN (align, BIGGEST_ALIGNMENT);
2615 #ifdef BIGGEST_FIELD_ALIGNMENT
2616 align = MIN (align, BIGGEST_FIELD_ALIGNMENT);
2617 #endif
2618 unsigned int field_align = align;
2619 #ifdef ADJUST_FIELD_ALIGN
2620 field_align = ADJUST_FIELD_ALIGN (NULL_TREE, type, field_align);
2621 #endif
2622 align = MIN (align, field_align);
2623 }
2624 return align / BITS_PER_UNIT;
2625 }
2626 \f
2627 /* Create and return a type for signed integers of PRECISION bits. */
2628
2629 tree
2630 make_signed_type (int precision)
2631 {
2632 tree type = make_node (INTEGER_TYPE);
2633
2634 TYPE_PRECISION (type) = precision;
2635
2636 fixup_signed_type (type);
2637 return type;
2638 }
2639
2640 /* Create and return a type for unsigned integers of PRECISION bits. */
2641
2642 tree
2643 make_unsigned_type (int precision)
2644 {
2645 tree type = make_node (INTEGER_TYPE);
2646
2647 TYPE_PRECISION (type) = precision;
2648
2649 fixup_unsigned_type (type);
2650 return type;
2651 }
2652 \f
2653 /* Create and return a type for fract of PRECISION bits, UNSIGNEDP,
2654 and SATP. */
2655
2656 tree
2657 make_fract_type (int precision, int unsignedp, int satp)
2658 {
2659 tree type = make_node (FIXED_POINT_TYPE);
2660
2661 TYPE_PRECISION (type) = precision;
2662
2663 if (satp)
2664 TYPE_SATURATING (type) = 1;
2665
2666 /* Lay out the type: set its alignment, size, etc. */
2667 TYPE_UNSIGNED (type) = unsignedp;
2668 enum mode_class mclass = unsignedp ? MODE_UFRACT : MODE_FRACT;
2669 SET_TYPE_MODE (type, mode_for_size (precision, mclass, 0).require ());
2670 layout_type (type);
2671
2672 return type;
2673 }
2674
2675 /* Create and return a type for accum of PRECISION bits, UNSIGNEDP,
2676 and SATP. */
2677
2678 tree
2679 make_accum_type (int precision, int unsignedp, int satp)
2680 {
2681 tree type = make_node (FIXED_POINT_TYPE);
2682
2683 TYPE_PRECISION (type) = precision;
2684
2685 if (satp)
2686 TYPE_SATURATING (type) = 1;
2687
2688 /* Lay out the type: set its alignment, size, etc. */
2689 TYPE_UNSIGNED (type) = unsignedp;
2690 enum mode_class mclass = unsignedp ? MODE_UACCUM : MODE_ACCUM;
2691 SET_TYPE_MODE (type, mode_for_size (precision, mclass, 0).require ());
2692 layout_type (type);
2693
2694 return type;
2695 }
2696
2697 /* Initialize sizetypes so layout_type can use them. */
2698
2699 void
2700 initialize_sizetypes (void)
2701 {
2702 int precision, bprecision;
2703
2704 /* Get sizetypes precision from the SIZE_TYPE target macro. */
2705 if (strcmp (SIZETYPE, "unsigned int") == 0)
2706 precision = INT_TYPE_SIZE;
2707 else if (strcmp (SIZETYPE, "long unsigned int") == 0)
2708 precision = LONG_TYPE_SIZE;
2709 else if (strcmp (SIZETYPE, "long long unsigned int") == 0)
2710 precision = LONG_LONG_TYPE_SIZE;
2711 else if (strcmp (SIZETYPE, "short unsigned int") == 0)
2712 precision = SHORT_TYPE_SIZE;
2713 else
2714 {
2715 int i;
2716
2717 precision = -1;
2718 for (i = 0; i < NUM_INT_N_ENTS; i++)
2719 if (int_n_enabled_p[i])
2720 {
2721 char name[50], altname[50];
2722 sprintf (name, "__int%d unsigned", int_n_data[i].bitsize);
2723 sprintf (altname, "__int%d__ unsigned", int_n_data[i].bitsize);
2724
2725 if (strcmp (name, SIZETYPE) == 0
2726 || strcmp (altname, SIZETYPE) == 0)
2727 {
2728 precision = int_n_data[i].bitsize;
2729 }
2730 }
2731 if (precision == -1)
2732 gcc_unreachable ();
2733 }
2734
2735 bprecision
2736 = MIN (precision + LOG2_BITS_PER_UNIT + 1, MAX_FIXED_MODE_SIZE);
2737 bprecision = GET_MODE_PRECISION (smallest_int_mode_for_size (bprecision));
2738 if (bprecision > HOST_BITS_PER_DOUBLE_INT)
2739 bprecision = HOST_BITS_PER_DOUBLE_INT;
2740
2741 /* Create stubs for sizetype and bitsizetype so we can create constants. */
2742 sizetype = make_node (INTEGER_TYPE);
2743 TYPE_NAME (sizetype) = get_identifier ("sizetype");
2744 TYPE_PRECISION (sizetype) = precision;
2745 TYPE_UNSIGNED (sizetype) = 1;
2746 bitsizetype = make_node (INTEGER_TYPE);
2747 TYPE_NAME (bitsizetype) = get_identifier ("bitsizetype");
2748 TYPE_PRECISION (bitsizetype) = bprecision;
2749 TYPE_UNSIGNED (bitsizetype) = 1;
2750
2751 /* Now layout both types manually. */
2752 scalar_int_mode mode = smallest_int_mode_for_size (precision);
2753 SET_TYPE_MODE (sizetype, mode);
2754 SET_TYPE_ALIGN (sizetype, GET_MODE_ALIGNMENT (TYPE_MODE (sizetype)));
2755 TYPE_SIZE (sizetype) = bitsize_int (precision);
2756 TYPE_SIZE_UNIT (sizetype) = size_int (GET_MODE_SIZE (mode));
2757 set_min_and_max_values_for_integral_type (sizetype, precision, UNSIGNED);
2758
2759 mode = smallest_int_mode_for_size (bprecision);
2760 SET_TYPE_MODE (bitsizetype, mode);
2761 SET_TYPE_ALIGN (bitsizetype, GET_MODE_ALIGNMENT (TYPE_MODE (bitsizetype)));
2762 TYPE_SIZE (bitsizetype) = bitsize_int (bprecision);
2763 TYPE_SIZE_UNIT (bitsizetype) = size_int (GET_MODE_SIZE (mode));
2764 set_min_and_max_values_for_integral_type (bitsizetype, bprecision, UNSIGNED);
2765
2766 /* Create the signed variants of *sizetype. */
2767 ssizetype = make_signed_type (TYPE_PRECISION (sizetype));
2768 TYPE_NAME (ssizetype) = get_identifier ("ssizetype");
2769 sbitsizetype = make_signed_type (TYPE_PRECISION (bitsizetype));
2770 TYPE_NAME (sbitsizetype) = get_identifier ("sbitsizetype");
2771 }
2772 \f
2773 /* TYPE is an integral type, i.e., an INTEGRAL_TYPE, ENUMERAL_TYPE
2774 or BOOLEAN_TYPE. Set TYPE_MIN_VALUE and TYPE_MAX_VALUE
2775 for TYPE, based on the PRECISION and whether or not the TYPE
2776 IS_UNSIGNED. PRECISION need not correspond to a width supported
2777 natively by the hardware; for example, on a machine with 8-bit,
2778 16-bit, and 32-bit register modes, PRECISION might be 7, 23, or
2779 61. */
2780
2781 void
2782 set_min_and_max_values_for_integral_type (tree type,
2783 int precision,
2784 signop sgn)
2785 {
2786 /* For bitfields with zero width we end up creating integer types
2787 with zero precision. Don't assign any minimum/maximum values
2788 to those types, they don't have any valid value. */
2789 if (precision < 1)
2790 return;
2791
2792 TYPE_MIN_VALUE (type)
2793 = wide_int_to_tree (type, wi::min_value (precision, sgn));
2794 TYPE_MAX_VALUE (type)
2795 = wide_int_to_tree (type, wi::max_value (precision, sgn));
2796 }
2797
2798 /* Set the extreme values of TYPE based on its precision in bits,
2799 then lay it out. Used when make_signed_type won't do
2800 because the tree code is not INTEGER_TYPE. */
2801
2802 void
2803 fixup_signed_type (tree type)
2804 {
2805 int precision = TYPE_PRECISION (type);
2806
2807 set_min_and_max_values_for_integral_type (type, precision, SIGNED);
2808
2809 /* Lay out the type: set its alignment, size, etc. */
2810 layout_type (type);
2811 }
2812
2813 /* Set the extreme values of TYPE based on its precision in bits,
2814 then lay it out. This is used both in `make_unsigned_type'
2815 and for enumeral types. */
2816
2817 void
2818 fixup_unsigned_type (tree type)
2819 {
2820 int precision = TYPE_PRECISION (type);
2821
2822 TYPE_UNSIGNED (type) = 1;
2823
2824 set_min_and_max_values_for_integral_type (type, precision, UNSIGNED);
2825
2826 /* Lay out the type: set its alignment, size, etc. */
2827 layout_type (type);
2828 }
2829 \f
2830 /* Construct an iterator for a bitfield that spans BITSIZE bits,
2831 starting at BITPOS.
2832
2833 BITREGION_START is the bit position of the first bit in this
2834 sequence of bit fields. BITREGION_END is the last bit in this
2835 sequence. If these two fields are non-zero, we should restrict the
2836 memory access to that range. Otherwise, we are allowed to touch
2837 any adjacent non bit-fields.
2838
2839 ALIGN is the alignment of the underlying object in bits.
2840 VOLATILEP says whether the bitfield is volatile. */
2841
2842 bit_field_mode_iterator
2843 ::bit_field_mode_iterator (HOST_WIDE_INT bitsize, HOST_WIDE_INT bitpos,
2844 poly_int64 bitregion_start,
2845 poly_int64 bitregion_end,
2846 unsigned int align, bool volatilep)
2847 : m_mode (NARROWEST_INT_MODE), m_bitsize (bitsize),
2848 m_bitpos (bitpos), m_bitregion_start (bitregion_start),
2849 m_bitregion_end (bitregion_end), m_align (align),
2850 m_volatilep (volatilep), m_count (0)
2851 {
2852 if (known_eq (m_bitregion_end, 0))
2853 {
2854 /* We can assume that any aligned chunk of ALIGN bits that overlaps
2855 the bitfield is mapped and won't trap, provided that ALIGN isn't
2856 too large. The cap is the biggest required alignment for data,
2857 or at least the word size. And force one such chunk at least. */
2858 unsigned HOST_WIDE_INT units
2859 = MIN (align, MAX (BIGGEST_ALIGNMENT, BITS_PER_WORD));
2860 if (bitsize <= 0)
2861 bitsize = 1;
2862 HOST_WIDE_INT end = bitpos + bitsize + units - 1;
2863 m_bitregion_end = end - end % units - 1;
2864 }
2865 }
2866
2867 /* Calls to this function return successively larger modes that can be used
2868 to represent the bitfield. Return true if another bitfield mode is
2869 available, storing it in *OUT_MODE if so. */
2870
2871 bool
2872 bit_field_mode_iterator::next_mode (scalar_int_mode *out_mode)
2873 {
2874 scalar_int_mode mode;
2875 for (; m_mode.exists (&mode); m_mode = GET_MODE_WIDER_MODE (mode))
2876 {
2877 unsigned int unit = GET_MODE_BITSIZE (mode);
2878
2879 /* Skip modes that don't have full precision. */
2880 if (unit != GET_MODE_PRECISION (mode))
2881 continue;
2882
2883 /* Stop if the mode is too wide to handle efficiently. */
2884 if (unit > MAX_FIXED_MODE_SIZE)
2885 break;
2886
2887 /* Don't deliver more than one multiword mode; the smallest one
2888 should be used. */
2889 if (m_count > 0 && unit > BITS_PER_WORD)
2890 break;
2891
2892 /* Skip modes that are too small. */
2893 unsigned HOST_WIDE_INT substart = (unsigned HOST_WIDE_INT) m_bitpos % unit;
2894 unsigned HOST_WIDE_INT subend = substart + m_bitsize;
2895 if (subend > unit)
2896 continue;
2897
2898 /* Stop if the mode goes outside the bitregion. */
2899 HOST_WIDE_INT start = m_bitpos - substart;
2900 if (maybe_ne (m_bitregion_start, 0)
2901 && maybe_lt (start, m_bitregion_start))
2902 break;
2903 HOST_WIDE_INT end = start + unit;
2904 if (maybe_gt (end, m_bitregion_end + 1))
2905 break;
2906
2907 /* Stop if the mode requires too much alignment. */
2908 if (GET_MODE_ALIGNMENT (mode) > m_align
2909 && targetm.slow_unaligned_access (mode, m_align))
2910 break;
2911
2912 *out_mode = mode;
2913 m_mode = GET_MODE_WIDER_MODE (mode);
2914 m_count++;
2915 return true;
2916 }
2917 return false;
2918 }
2919
2920 /* Return true if smaller modes are generally preferred for this kind
2921 of bitfield. */
2922
2923 bool
2924 bit_field_mode_iterator::prefer_smaller_modes ()
2925 {
2926 return (m_volatilep
2927 ? targetm.narrow_volatile_bitfield ()
2928 : !SLOW_BYTE_ACCESS);
2929 }
2930
2931 /* Find the best machine mode to use when referencing a bit field of length
2932 BITSIZE bits starting at BITPOS.
2933
2934 BITREGION_START is the bit position of the first bit in this
2935 sequence of bit fields. BITREGION_END is the last bit in this
2936 sequence. If these two fields are non-zero, we should restrict the
2937 memory access to that range. Otherwise, we are allowed to touch
2938 any adjacent non bit-fields.
2939
2940 The chosen mode must have no more than LARGEST_MODE_BITSIZE bits.
2941 INT_MAX is a suitable value for LARGEST_MODE_BITSIZE if the caller
2942 doesn't want to apply a specific limit.
2943
2944 If no mode meets all these conditions, we return VOIDmode.
2945
2946 The underlying object is known to be aligned to a boundary of ALIGN bits.
2947
2948 If VOLATILEP is false and SLOW_BYTE_ACCESS is false, we return the
2949 smallest mode meeting these conditions.
2950
2951 If VOLATILEP is false and SLOW_BYTE_ACCESS is true, we return the
2952 largest mode (but a mode no wider than UNITS_PER_WORD) that meets
2953 all the conditions.
2954
2955 If VOLATILEP is true the narrow_volatile_bitfields target hook is used to
2956 decide which of the above modes should be used. */
2957
2958 bool
2959 get_best_mode (int bitsize, int bitpos,
2960 poly_uint64 bitregion_start, poly_uint64 bitregion_end,
2961 unsigned int align,
2962 unsigned HOST_WIDE_INT largest_mode_bitsize, bool volatilep,
2963 scalar_int_mode *best_mode)
2964 {
2965 bit_field_mode_iterator iter (bitsize, bitpos, bitregion_start,
2966 bitregion_end, align, volatilep);
2967 scalar_int_mode mode;
2968 bool found = false;
2969 while (iter.next_mode (&mode)
2970 /* ??? For historical reasons, reject modes that would normally
2971 receive greater alignment, even if unaligned accesses are
2972 acceptable. This has both advantages and disadvantages.
2973 Removing this check means that something like:
2974
2975 struct s { unsigned int x; unsigned int y; };
2976 int f (struct s *s) { return s->x == 0 && s->y == 0; }
2977
2978 can be implemented using a single load and compare on
2979 64-bit machines that have no alignment restrictions.
2980 For example, on powerpc64-linux-gnu, we would generate:
2981
2982 ld 3,0(3)
2983 cntlzd 3,3
2984 srdi 3,3,6
2985 blr
2986
2987 rather than:
2988
2989 lwz 9,0(3)
2990 cmpwi 7,9,0
2991 bne 7,.L3
2992 lwz 3,4(3)
2993 cntlzw 3,3
2994 srwi 3,3,5
2995 extsw 3,3
2996 blr
2997 .p2align 4,,15
2998 .L3:
2999 li 3,0
3000 blr
3001
3002 However, accessing more than one field can make life harder
3003 for the gimple optimizers. For example, gcc.dg/vect/bb-slp-5.c
3004 has a series of unsigned short copies followed by a series of
3005 unsigned short comparisons. With this check, both the copies
3006 and comparisons remain 16-bit accesses and FRE is able
3007 to eliminate the latter. Without the check, the comparisons
3008 can be done using 2 64-bit operations, which FRE isn't able
3009 to handle in the same way.
3010
3011 Either way, it would probably be worth disabling this check
3012 during expand. One particular example where removing the
3013 check would help is the get_best_mode call in store_bit_field.
3014 If we are given a memory bitregion of 128 bits that is aligned
3015 to a 64-bit boundary, and the bitfield we want to modify is
3016 in the second half of the bitregion, this check causes
3017 store_bitfield to turn the memory into a 64-bit reference
3018 to the _first_ half of the region. We later use
3019 adjust_bitfield_address to get a reference to the correct half,
3020 but doing so looks to adjust_bitfield_address as though we are
3021 moving past the end of the original object, so it drops the
3022 associated MEM_EXPR and MEM_OFFSET. Removing the check
3023 causes store_bit_field to keep a 128-bit memory reference,
3024 so that the final bitfield reference still has a MEM_EXPR
3025 and MEM_OFFSET. */
3026 && GET_MODE_ALIGNMENT (mode) <= align
3027 && GET_MODE_BITSIZE (mode) <= largest_mode_bitsize)
3028 {
3029 *best_mode = mode;
3030 found = true;
3031 if (iter.prefer_smaller_modes ())
3032 break;
3033 }
3034
3035 return found;
3036 }
3037
3038 /* Gets minimal and maximal values for MODE (signed or unsigned depending on
3039 SIGN). The returned constants are made to be usable in TARGET_MODE. */
3040
3041 void
3042 get_mode_bounds (scalar_int_mode mode, int sign,
3043 scalar_int_mode target_mode,
3044 rtx *mmin, rtx *mmax)
3045 {
3046 unsigned size = GET_MODE_PRECISION (mode);
3047 unsigned HOST_WIDE_INT min_val, max_val;
3048
3049 gcc_assert (size <= HOST_BITS_PER_WIDE_INT);
3050
3051 /* Special case BImode, which has values 0 and STORE_FLAG_VALUE. */
3052 if (mode == BImode)
3053 {
3054 if (STORE_FLAG_VALUE < 0)
3055 {
3056 min_val = STORE_FLAG_VALUE;
3057 max_val = 0;
3058 }
3059 else
3060 {
3061 min_val = 0;
3062 max_val = STORE_FLAG_VALUE;
3063 }
3064 }
3065 else if (sign)
3066 {
3067 min_val = -(HOST_WIDE_INT_1U << (size - 1));
3068 max_val = (HOST_WIDE_INT_1U << (size - 1)) - 1;
3069 }
3070 else
3071 {
3072 min_val = 0;
3073 max_val = (HOST_WIDE_INT_1U << (size - 1) << 1) - 1;
3074 }
3075
3076 *mmin = gen_int_mode (min_val, target_mode);
3077 *mmax = gen_int_mode (max_val, target_mode);
3078 }
3079
3080 #include "gt-stor-layout.h"