]>
Commit | Line | Data |
---|---|---|
f2cfea4a | 1 | /* C-compiler utilities for types and variables storage layout |
3aea1f79 | 2 | Copyright (C) 1987-2014 Free Software Foundation, Inc. |
f2cfea4a | 3 | |
f12b58b3 | 4 | This file is part of GCC. |
f2cfea4a | 5 | |
f12b58b3 | 6 | GCC is free software; you can redistribute it and/or modify it under |
7 | the terms of the GNU General Public License as published by the Free | |
8c4c00c1 | 8 | Software Foundation; either version 3, or (at your option) any later |
f12b58b3 | 9 | version. |
f2cfea4a | 10 | |
f12b58b3 | 11 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
12 | WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
13 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
14 | for more details. | |
f2cfea4a | 15 | |
16 | You should have received a copy of the GNU General Public License | |
8c4c00c1 | 17 | along with GCC; see the file COPYING3. If not see |
18 | <http://www.gnu.org/licenses/>. */ | |
f2cfea4a | 19 | |
20 | ||
21 | #include "config.h" | |
405711de | 22 | #include "system.h" |
805e22b2 | 23 | #include "coretypes.h" |
24 | #include "tm.h" | |
f2cfea4a | 25 | #include "tree.h" |
9ed99284 | 26 | #include "stor-layout.h" |
27 | #include "stringpool.h" | |
28 | #include "varasm.h" | |
29 | #include "print-tree.h" | |
bd3b1088 | 30 | #include "rtl.h" |
7953c610 | 31 | #include "tm_p.h" |
a6a1ab64 | 32 | #include "flags.h" |
f2cfea4a | 33 | #include "function.h" |
c091e5a4 | 34 | #include "expr.h" |
0b205f4c | 35 | #include "diagnostic-core.h" |
f04f096b | 36 | #include "target.h" |
20325f61 | 37 | #include "langhooks.h" |
83e2a11b | 38 | #include "regs.h" |
00b76131 | 39 | #include "params.h" |
4189e677 | 40 | #include "cgraph.h" |
41 | #include "tree-inline.h" | |
42 | #include "tree-dump.h" | |
a8783bee | 43 | #include "gimplify.h" |
f2cfea4a | 44 | |
f2cfea4a | 45 | /* Data type for the expressions representing sizes of data types. |
a32bb500 | 46 | It is the first integer type laid out. */ |
748e5d45 | 47 | tree sizetype_tab[(int) stk_type_kind_last]; |
f2cfea4a | 48 | |
cdb11de0 | 49 | /* If nonzero, this is an upper limit on alignment of structure fields. |
50 | The value is measured in bits. */ | |
6b5553e5 | 51 | unsigned int maximum_field_alignment = TARGET_DEFAULT_PACK_STRUCT * BITS_PER_UNIT; |
cdb11de0 | 52 | |
98155838 | 53 | /* Nonzero if all REFERENCE_TYPEs are internal and hence should be allocated |
54 | in the address spaces' address_mode, not pointer_mode. Set only by | |
55 | internal_reference_types called only by a front end. */ | |
f1986931 | 56 | static int reference_types_internal = 0; |
57 | ||
4189e677 | 58 | static tree self_referential_size (tree); |
60b8c5b3 | 59 | static void finalize_record_size (record_layout_info); |
60 | static void finalize_type_size (tree); | |
61 | static void place_union_field (record_layout_info, tree); | |
9a27f26f | 62 | #if defined (PCC_BITFIELD_TYPE_MATTERS) || defined (BITFIELD_NBYTES_LIMITED) |
60b8c5b3 | 63 | static int excess_unit_span (HOST_WIDE_INT, HOST_WIDE_INT, HOST_WIDE_INT, |
64 | HOST_WIDE_INT, tree); | |
9a27f26f | 65 | #endif |
60b8c5b3 | 66 | extern void debug_rli (record_layout_info); |
f2cfea4a | 67 | \f |
98155838 | 68 | /* Show that REFERENCE_TYPES are internal and should use address_mode. |
69 | Called only by front end. */ | |
f1986931 | 70 | |
71 | void | |
60b8c5b3 | 72 | internal_reference_types (void) |
f1986931 | 73 | { |
74 | reference_types_internal = 1; | |
75 | } | |
76 | ||
09138ab1 | 77 | /* Given a size SIZE that may not be a constant, return a SAVE_EXPR |
f2cfea4a | 78 | to serve as the actual size-expression for a type or decl. */ |
79 | ||
7b778713 | 80 | tree |
60b8c5b3 | 81 | variable_size (tree size) |
f2cfea4a | 82 | { |
4189e677 | 83 | /* Obviously. */ |
84 | if (TREE_CONSTANT (size)) | |
85 | return size; | |
86 | ||
87 | /* If the size is self-referential, we can't make a SAVE_EXPR (see | |
88 | save_expr for the rationale). But we can do something else. */ | |
89 | if (CONTAINS_PLACEHOLDER_P (size)) | |
90 | return self_referential_size (size); | |
91 | ||
1d2bb655 | 92 | /* If we are in the global binding level, we can't make a SAVE_EXPR |
93 | since it may end up being shared across functions, so it is up | |
94 | to the front-end to deal with this case. */ | |
95 | if (lang_hooks.decls.global_bindings_p ()) | |
034b6c60 | 96 | return size; |
97 | ||
a5aff672 | 98 | return save_expr (size); |
f2cfea4a | 99 | } |
4189e677 | 100 | |
101 | /* An array of functions used for self-referential size computation. */ | |
f1f41a6c | 102 | static GTY(()) vec<tree, va_gc> *size_functions; |
4189e677 | 103 | |
104 | /* Similar to copy_tree_r but do not copy component references involving | |
105 | PLACEHOLDER_EXPRs. These nodes are spotted in find_placeholder_in_expr | |
106 | and substituted in substitute_in_expr. */ | |
107 | ||
108 | static tree | |
109 | copy_self_referential_tree_r (tree *tp, int *walk_subtrees, void *data) | |
110 | { | |
111 | enum tree_code code = TREE_CODE (*tp); | |
112 | ||
113 | /* Stop at types, decls, constants like copy_tree_r. */ | |
114 | if (TREE_CODE_CLASS (code) == tcc_type | |
115 | || TREE_CODE_CLASS (code) == tcc_declaration | |
116 | || TREE_CODE_CLASS (code) == tcc_constant) | |
117 | { | |
118 | *walk_subtrees = 0; | |
119 | return NULL_TREE; | |
120 | } | |
121 | ||
122 | /* This is the pattern built in ada/make_aligning_type. */ | |
123 | else if (code == ADDR_EXPR | |
124 | && TREE_CODE (TREE_OPERAND (*tp, 0)) == PLACEHOLDER_EXPR) | |
125 | { | |
126 | *walk_subtrees = 0; | |
127 | return NULL_TREE; | |
128 | } | |
129 | ||
130 | /* Default case: the component reference. */ | |
131 | else if (code == COMPONENT_REF) | |
132 | { | |
133 | tree inner; | |
134 | for (inner = TREE_OPERAND (*tp, 0); | |
135 | REFERENCE_CLASS_P (inner); | |
136 | inner = TREE_OPERAND (inner, 0)) | |
137 | ; | |
138 | ||
139 | if (TREE_CODE (inner) == PLACEHOLDER_EXPR) | |
140 | { | |
141 | *walk_subtrees = 0; | |
142 | return NULL_TREE; | |
143 | } | |
144 | } | |
145 | ||
146 | /* We're not supposed to have them in self-referential size trees | |
147 | because we wouldn't properly control when they are evaluated. | |
148 | However, not creating superfluous SAVE_EXPRs requires accurate | |
149 | tracking of readonly-ness all the way down to here, which we | |
150 | cannot always guarantee in practice. So punt in this case. */ | |
151 | else if (code == SAVE_EXPR) | |
152 | return error_mark_node; | |
153 | ||
17476aac | 154 | else if (code == STATEMENT_LIST) |
155 | gcc_unreachable (); | |
156 | ||
4189e677 | 157 | return copy_tree_r (tp, walk_subtrees, data); |
158 | } | |
159 | ||
160 | /* Given a SIZE expression that is self-referential, return an equivalent | |
161 | expression to serve as the actual size expression for a type. */ | |
162 | ||
163 | static tree | |
164 | self_referential_size (tree size) | |
165 | { | |
166 | static unsigned HOST_WIDE_INT fnno = 0; | |
1e094109 | 167 | vec<tree> self_refs = vNULL; |
414c3a2c | 168 | tree param_type_list = NULL, param_decl_list = NULL; |
4189e677 | 169 | tree t, ref, return_type, fntype, fnname, fndecl; |
170 | unsigned int i; | |
171 | char buf[128]; | |
f1f41a6c | 172 | vec<tree, va_gc> *args = NULL; |
4189e677 | 173 | |
174 | /* Do not factor out simple operations. */ | |
85cea2e3 | 175 | t = skip_simple_constant_arithmetic (size); |
4189e677 | 176 | if (TREE_CODE (t) == CALL_EXPR) |
177 | return size; | |
178 | ||
179 | /* Collect the list of self-references in the expression. */ | |
180 | find_placeholder_in_expr (size, &self_refs); | |
f1f41a6c | 181 | gcc_assert (self_refs.length () > 0); |
4189e677 | 182 | |
183 | /* Obtain a private copy of the expression. */ | |
184 | t = size; | |
185 | if (walk_tree (&t, copy_self_referential_tree_r, NULL, NULL) != NULL_TREE) | |
186 | return size; | |
187 | size = t; | |
188 | ||
189 | /* Build the parameter and argument lists in parallel; also | |
190 | substitute the former for the latter in the expression. */ | |
f1f41a6c | 191 | vec_alloc (args, self_refs.length ()); |
192 | FOR_EACH_VEC_ELT (self_refs, i, ref) | |
4189e677 | 193 | { |
194 | tree subst, param_name, param_type, param_decl; | |
195 | ||
196 | if (DECL_P (ref)) | |
197 | { | |
198 | /* We shouldn't have true variables here. */ | |
199 | gcc_assert (TREE_READONLY (ref)); | |
200 | subst = ref; | |
201 | } | |
202 | /* This is the pattern built in ada/make_aligning_type. */ | |
203 | else if (TREE_CODE (ref) == ADDR_EXPR) | |
204 | subst = ref; | |
205 | /* Default case: the component reference. */ | |
206 | else | |
207 | subst = TREE_OPERAND (ref, 1); | |
208 | ||
209 | sprintf (buf, "p%d", i); | |
210 | param_name = get_identifier (buf); | |
211 | param_type = TREE_TYPE (ref); | |
212 | param_decl | |
213 | = build_decl (input_location, PARM_DECL, param_name, param_type); | |
214 | if (targetm.calls.promote_prototypes (NULL_TREE) | |
215 | && INTEGRAL_TYPE_P (param_type) | |
216 | && TYPE_PRECISION (param_type) < TYPE_PRECISION (integer_type_node)) | |
217 | DECL_ARG_TYPE (param_decl) = integer_type_node; | |
218 | else | |
219 | DECL_ARG_TYPE (param_decl) = param_type; | |
220 | DECL_ARTIFICIAL (param_decl) = 1; | |
221 | TREE_READONLY (param_decl) = 1; | |
222 | ||
223 | size = substitute_in_expr (size, subst, param_decl); | |
224 | ||
225 | param_type_list = tree_cons (NULL_TREE, param_type, param_type_list); | |
226 | param_decl_list = chainon (param_decl, param_decl_list); | |
f1f41a6c | 227 | args->quick_push (ref); |
4189e677 | 228 | } |
229 | ||
f1f41a6c | 230 | self_refs.release (); |
4189e677 | 231 | |
232 | /* Append 'void' to indicate that the number of parameters is fixed. */ | |
233 | param_type_list = tree_cons (NULL_TREE, void_type_node, param_type_list); | |
234 | ||
235 | /* The 3 lists have been created in reverse order. */ | |
236 | param_type_list = nreverse (param_type_list); | |
237 | param_decl_list = nreverse (param_decl_list); | |
4189e677 | 238 | |
239 | /* Build the function type. */ | |
240 | return_type = TREE_TYPE (size); | |
241 | fntype = build_function_type (return_type, param_type_list); | |
242 | ||
243 | /* Build the function declaration. */ | |
244 | sprintf (buf, "SZ"HOST_WIDE_INT_PRINT_UNSIGNED, fnno++); | |
245 | fnname = get_file_function_name (buf); | |
246 | fndecl = build_decl (input_location, FUNCTION_DECL, fnname, fntype); | |
1767a056 | 247 | for (t = param_decl_list; t; t = DECL_CHAIN (t)) |
4189e677 | 248 | DECL_CONTEXT (t) = fndecl; |
249 | DECL_ARGUMENTS (fndecl) = param_decl_list; | |
250 | DECL_RESULT (fndecl) | |
251 | = build_decl (input_location, RESULT_DECL, 0, return_type); | |
252 | DECL_CONTEXT (DECL_RESULT (fndecl)) = fndecl; | |
253 | ||
254 | /* The function has been created by the compiler and we don't | |
255 | want to emit debug info for it. */ | |
256 | DECL_ARTIFICIAL (fndecl) = 1; | |
257 | DECL_IGNORED_P (fndecl) = 1; | |
258 | ||
259 | /* It is supposed to be "const" and never throw. */ | |
260 | TREE_READONLY (fndecl) = 1; | |
261 | TREE_NOTHROW (fndecl) = 1; | |
262 | ||
263 | /* We want it to be inlined when this is deemed profitable, as | |
264 | well as discarded if every call has been integrated. */ | |
265 | DECL_DECLARED_INLINE_P (fndecl) = 1; | |
266 | ||
267 | /* It is made up of a unique return statement. */ | |
268 | DECL_INITIAL (fndecl) = make_node (BLOCK); | |
269 | BLOCK_SUPERCONTEXT (DECL_INITIAL (fndecl)) = fndecl; | |
270 | t = build2 (MODIFY_EXPR, return_type, DECL_RESULT (fndecl), size); | |
271 | DECL_SAVED_TREE (fndecl) = build1 (RETURN_EXPR, void_type_node, t); | |
272 | TREE_STATIC (fndecl) = 1; | |
273 | ||
274 | /* Put it onto the list of size functions. */ | |
f1f41a6c | 275 | vec_safe_push (size_functions, fndecl); |
4189e677 | 276 | |
277 | /* Replace the original expression with a call to the size function. */ | |
5cca4f1d | 278 | return build_call_expr_loc_vec (UNKNOWN_LOCATION, fndecl, args); |
4189e677 | 279 | } |
280 | ||
281 | /* Take, queue and compile all the size functions. It is essential that | |
282 | the size functions be gimplified at the very end of the compilation | |
283 | in order to guarantee transparent handling of self-referential sizes. | |
284 | Otherwise the GENERIC inliner would not be able to inline them back | |
285 | at each of their call sites, thus creating artificial non-constant | |
286 | size expressions which would trigger nasty problems later on. */ | |
287 | ||
288 | void | |
289 | finalize_size_functions (void) | |
290 | { | |
291 | unsigned int i; | |
292 | tree fndecl; | |
293 | ||
f1f41a6c | 294 | for (i = 0; size_functions && size_functions->iterate (i, &fndecl); i++) |
4189e677 | 295 | { |
c6cfb282 | 296 | allocate_struct_function (fndecl, false); |
297 | set_cfun (NULL); | |
4189e677 | 298 | dump_function (TDI_original, fndecl); |
299 | gimplify_function_tree (fndecl); | |
300 | dump_function (TDI_generic, fndecl); | |
301 | cgraph_finalize_function (fndecl, false); | |
302 | } | |
303 | ||
f1f41a6c | 304 | vec_free (size_functions); |
4189e677 | 305 | } |
f2cfea4a | 306 | \f |
7c0390e7 | 307 | /* Return the machine mode to use for a nonscalar of SIZE bits. The |
47cfb7f4 | 308 | mode must be in class MCLASS, and have exactly that many value bits; |
7c0390e7 | 309 | it may have padding as well. If LIMIT is nonzero, modes of wider |
310 | than MAX_FIXED_MODE_SIZE will not be used. */ | |
f2cfea4a | 311 | |
312 | enum machine_mode | |
47cfb7f4 | 313 | mode_for_size (unsigned int size, enum mode_class mclass, int limit) |
f2cfea4a | 314 | { |
19cb6b50 | 315 | enum machine_mode mode; |
f2cfea4a | 316 | |
0fc6aef1 | 317 | if (limit && size > MAX_FIXED_MODE_SIZE) |
f2cfea4a | 318 | return BLKmode; |
319 | ||
034b6c60 | 320 | /* Get the first mode which has this size, in the specified class. */ |
47cfb7f4 | 321 | for (mode = GET_CLASS_NARROWEST_MODE (mclass); mode != VOIDmode; |
f2cfea4a | 322 | mode = GET_MODE_WIDER_MODE (mode)) |
7c0390e7 | 323 | if (GET_MODE_PRECISION (mode) == size) |
f2cfea4a | 324 | return mode; |
325 | ||
326 | return BLKmode; | |
327 | } | |
328 | ||
0fc6aef1 | 329 | /* Similar, except passed a tree node. */ |
330 | ||
331 | enum machine_mode | |
47cfb7f4 | 332 | mode_for_size_tree (const_tree size, enum mode_class mclass, int limit) |
0fc6aef1 | 333 | { |
1088cbc4 | 334 | unsigned HOST_WIDE_INT uhwi; |
335 | unsigned int ui; | |
336 | ||
e913b5cd | 337 | if (!tree_fits_uhwi_p (size)) |
0fc6aef1 | 338 | return BLKmode; |
e913b5cd | 339 | uhwi = tree_to_uhwi (size); |
1088cbc4 | 340 | ui = uhwi; |
341 | if (uhwi != ui) | |
342 | return BLKmode; | |
47cfb7f4 | 343 | return mode_for_size (ui, mclass, limit); |
0fc6aef1 | 344 | } |
345 | ||
034b6c60 | 346 | /* Similar, but never return BLKmode; return the narrowest mode that |
7c0390e7 | 347 | contains at least the requested number of value bits. */ |
034b6c60 | 348 | |
bbceecb6 | 349 | enum machine_mode |
47cfb7f4 | 350 | smallest_mode_for_size (unsigned int size, enum mode_class mclass) |
034b6c60 | 351 | { |
19cb6b50 | 352 | enum machine_mode mode; |
034b6c60 | 353 | |
354 | /* Get the first mode which has at least this size, in the | |
355 | specified class. */ | |
47cfb7f4 | 356 | for (mode = GET_CLASS_NARROWEST_MODE (mclass); mode != VOIDmode; |
034b6c60 | 357 | mode = GET_MODE_WIDER_MODE (mode)) |
7c0390e7 | 358 | if (GET_MODE_PRECISION (mode) >= size) |
034b6c60 | 359 | return mode; |
360 | ||
04e579b6 | 361 | gcc_unreachable (); |
034b6c60 | 362 | } |
363 | ||
86cde393 | 364 | /* Find an integer mode of the exact same size, or BLKmode on failure. */ |
365 | ||
366 | enum machine_mode | |
60b8c5b3 | 367 | int_mode_for_mode (enum machine_mode mode) |
86cde393 | 368 | { |
369 | switch (GET_MODE_CLASS (mode)) | |
370 | { | |
371 | case MODE_INT: | |
372 | case MODE_PARTIAL_INT: | |
373 | break; | |
374 | ||
375 | case MODE_COMPLEX_INT: | |
376 | case MODE_COMPLEX_FLOAT: | |
377 | case MODE_FLOAT: | |
069b07bf | 378 | case MODE_DECIMAL_FLOAT: |
d76983d1 | 379 | case MODE_VECTOR_INT: |
380 | case MODE_VECTOR_FLOAT: | |
06f0b99c | 381 | case MODE_FRACT: |
382 | case MODE_ACCUM: | |
383 | case MODE_UFRACT: | |
384 | case MODE_UACCUM: | |
385 | case MODE_VECTOR_FRACT: | |
386 | case MODE_VECTOR_ACCUM: | |
387 | case MODE_VECTOR_UFRACT: | |
388 | case MODE_VECTOR_UACCUM: | |
86cde393 | 389 | mode = mode_for_size (GET_MODE_BITSIZE (mode), MODE_INT, 0); |
390 | break; | |
391 | ||
392 | case MODE_RANDOM: | |
393 | if (mode == BLKmode) | |
40734805 | 394 | break; |
083a2b5e | 395 | |
1be87b72 | 396 | /* ... fall through ... */ |
86cde393 | 397 | |
398 | case MODE_CC: | |
399 | default: | |
04e579b6 | 400 | gcc_unreachable (); |
86cde393 | 401 | } |
402 | ||
403 | return mode; | |
404 | } | |
405 | ||
ac875fa4 | 406 | /* Find a mode that can be used for efficient bitwise operations on MODE. |
407 | Return BLKmode if no such mode exists. */ | |
408 | ||
409 | enum machine_mode | |
410 | bitwise_mode_for_mode (enum machine_mode mode) | |
411 | { | |
412 | /* Quick exit if we already have a suitable mode. */ | |
413 | unsigned int bitsize = GET_MODE_BITSIZE (mode); | |
414 | if (SCALAR_INT_MODE_P (mode) && bitsize <= MAX_FIXED_MODE_SIZE) | |
415 | return mode; | |
416 | ||
417 | /* Reuse the sanity checks from int_mode_for_mode. */ | |
418 | gcc_checking_assert ((int_mode_for_mode (mode), true)); | |
419 | ||
420 | /* Try to replace complex modes with complex modes. In general we | |
421 | expect both components to be processed independently, so we only | |
422 | care whether there is a register for the inner mode. */ | |
423 | if (COMPLEX_MODE_P (mode)) | |
424 | { | |
425 | enum machine_mode trial = mode; | |
426 | if (GET_MODE_CLASS (mode) != MODE_COMPLEX_INT) | |
427 | trial = mode_for_size (bitsize, MODE_COMPLEX_INT, false); | |
428 | if (trial != BLKmode | |
429 | && have_regs_of_mode[GET_MODE_INNER (trial)]) | |
430 | return trial; | |
431 | } | |
432 | ||
433 | /* Try to replace vector modes with vector modes. Also try using vector | |
434 | modes if an integer mode would be too big. */ | |
435 | if (VECTOR_MODE_P (mode) || bitsize > MAX_FIXED_MODE_SIZE) | |
436 | { | |
437 | enum machine_mode trial = mode; | |
438 | if (GET_MODE_CLASS (mode) != MODE_VECTOR_INT) | |
439 | trial = mode_for_size (bitsize, MODE_VECTOR_INT, 0); | |
440 | if (trial != BLKmode | |
441 | && have_regs_of_mode[trial] | |
442 | && targetm.vector_mode_supported_p (trial)) | |
443 | return trial; | |
444 | } | |
445 | ||
446 | /* Otherwise fall back on integers while honoring MAX_FIXED_MODE_SIZE. */ | |
447 | return mode_for_size (bitsize, MODE_INT, true); | |
448 | } | |
449 | ||
450 | /* Find a type that can be used for efficient bitwise operations on MODE. | |
451 | Return null if no such mode exists. */ | |
452 | ||
453 | tree | |
454 | bitwise_type_for_mode (enum machine_mode mode) | |
455 | { | |
456 | mode = bitwise_mode_for_mode (mode); | |
457 | if (mode == BLKmode) | |
458 | return NULL_TREE; | |
459 | ||
460 | unsigned int inner_size = GET_MODE_UNIT_BITSIZE (mode); | |
461 | tree inner_type = build_nonstandard_integer_type (inner_size, true); | |
462 | ||
463 | if (VECTOR_MODE_P (mode)) | |
464 | return build_vector_type_for_mode (inner_type, mode); | |
465 | ||
466 | if (COMPLEX_MODE_P (mode)) | |
467 | return build_complex_type (inner_type); | |
468 | ||
469 | gcc_checking_assert (GET_MODE_INNER (mode) == VOIDmode); | |
470 | return inner_type; | |
471 | } | |
472 | ||
c4740c5d | 473 | /* Find a mode that is suitable for representing a vector with |
474 | NUNITS elements of mode INNERMODE. Returns BLKmode if there | |
475 | is no suitable mode. */ | |
476 | ||
477 | enum machine_mode | |
478 | mode_for_vector (enum machine_mode innermode, unsigned nunits) | |
479 | { | |
480 | enum machine_mode mode; | |
481 | ||
482 | /* First, look for a supported vector type. */ | |
483 | if (SCALAR_FLOAT_MODE_P (innermode)) | |
484 | mode = MIN_MODE_VECTOR_FLOAT; | |
485 | else if (SCALAR_FRACT_MODE_P (innermode)) | |
486 | mode = MIN_MODE_VECTOR_FRACT; | |
487 | else if (SCALAR_UFRACT_MODE_P (innermode)) | |
488 | mode = MIN_MODE_VECTOR_UFRACT; | |
489 | else if (SCALAR_ACCUM_MODE_P (innermode)) | |
490 | mode = MIN_MODE_VECTOR_ACCUM; | |
491 | else if (SCALAR_UACCUM_MODE_P (innermode)) | |
492 | mode = MIN_MODE_VECTOR_UACCUM; | |
493 | else | |
494 | mode = MIN_MODE_VECTOR_INT; | |
495 | ||
496 | /* Do not check vector_mode_supported_p here. We'll do that | |
497 | later in vector_type_mode. */ | |
498 | for (; mode != VOIDmode ; mode = GET_MODE_WIDER_MODE (mode)) | |
499 | if (GET_MODE_NUNITS (mode) == nunits | |
500 | && GET_MODE_INNER (mode) == innermode) | |
501 | break; | |
502 | ||
503 | /* For integers, try mapping it to a same-sized scalar mode. */ | |
504 | if (mode == VOIDmode | |
505 | && GET_MODE_CLASS (innermode) == MODE_INT) | |
506 | mode = mode_for_size (nunits * GET_MODE_BITSIZE (innermode), | |
507 | MODE_INT, 0); | |
508 | ||
509 | if (mode == VOIDmode | |
510 | || (GET_MODE_CLASS (mode) == MODE_INT | |
511 | && !have_regs_of_mode[mode])) | |
512 | return BLKmode; | |
513 | ||
514 | return mode; | |
515 | } | |
516 | ||
1a3e3a66 | 517 | /* Return the alignment of MODE. This will be bounded by 1 and |
518 | BIGGEST_ALIGNMENT. */ | |
519 | ||
520 | unsigned int | |
60b8c5b3 | 521 | get_mode_alignment (enum machine_mode mode) |
1a3e3a66 | 522 | { |
47a2c1d4 | 523 | return MIN (BIGGEST_ALIGNMENT, MAX (1, mode_base_align[mode]*BITS_PER_UNIT)); |
1a3e3a66 | 524 | } |
525 | ||
432dd330 | 526 | /* Return the precision of the mode, or for a complex or vector mode the |
527 | precision of the mode of its elements. */ | |
528 | ||
529 | unsigned int | |
530 | element_precision (enum machine_mode mode) | |
531 | { | |
532 | if (COMPLEX_MODE_P (mode) || VECTOR_MODE_P (mode)) | |
533 | mode = GET_MODE_INNER (mode); | |
534 | ||
535 | return GET_MODE_PRECISION (mode); | |
536 | } | |
537 | ||
13d3ceb9 | 538 | /* Return the natural mode of an array, given that it is SIZE bytes in |
539 | total and has elements of type ELEM_TYPE. */ | |
540 | ||
541 | static enum machine_mode | |
542 | mode_for_array (tree elem_type, tree size) | |
543 | { | |
544 | tree elem_size; | |
545 | unsigned HOST_WIDE_INT int_size, int_elem_size; | |
546 | bool limit_p; | |
547 | ||
548 | /* One-element arrays get the component type's mode. */ | |
549 | elem_size = TYPE_SIZE (elem_type); | |
550 | if (simple_cst_equal (size, elem_size)) | |
551 | return TYPE_MODE (elem_type); | |
552 | ||
553 | limit_p = true; | |
e913b5cd | 554 | if (tree_fits_uhwi_p (size) && tree_fits_uhwi_p (elem_size)) |
13d3ceb9 | 555 | { |
e913b5cd | 556 | int_size = tree_to_uhwi (size); |
557 | int_elem_size = tree_to_uhwi (elem_size); | |
13d3ceb9 | 558 | if (int_elem_size > 0 |
559 | && int_size % int_elem_size == 0 | |
560 | && targetm.array_mode_supported_p (TYPE_MODE (elem_type), | |
561 | int_size / int_elem_size)) | |
562 | limit_p = false; | |
563 | } | |
564 | return mode_for_size_tree (size, MODE_INT, limit_p); | |
565 | } | |
f2cfea4a | 566 | \f |
f5712181 | 567 | /* Subroutine of layout_decl: Force alignment required for the data type. |
568 | But if the decl itself wants greater alignment, don't override that. */ | |
569 | ||
570 | static inline void | |
571 | do_type_align (tree type, tree decl) | |
572 | { | |
573 | if (TYPE_ALIGN (type) > DECL_ALIGN (decl)) | |
574 | { | |
575 | DECL_ALIGN (decl) = TYPE_ALIGN (type); | |
79bdd5ff | 576 | if (TREE_CODE (decl) == FIELD_DECL) |
577 | DECL_USER_ALIGN (decl) = TYPE_USER_ALIGN (type); | |
f5712181 | 578 | } |
579 | } | |
580 | ||
f2cfea4a | 581 | /* Set the size, mode and alignment of a ..._DECL node. |
582 | TYPE_DECL does need this for C++. | |
583 | Note that LABEL_DECL and CONST_DECL nodes do not need this, | |
584 | and FUNCTION_DECL nodes have them set up in a special (and simple) way. | |
585 | Don't call layout_decl for them. | |
586 | ||
587 | KNOWN_ALIGN is the amount of alignment we can assume this | |
588 | decl has with no special effort. It is relevant only for FIELD_DECLs | |
589 | and depends on the previous fields. | |
590 | All that matters about KNOWN_ALIGN is which powers of 2 divide it. | |
591 | If KNOWN_ALIGN is 0, it means, "as much alignment as you like": | |
592 | the record will be aligned to suit. */ | |
593 | ||
594 | void | |
60b8c5b3 | 595 | layout_decl (tree decl, unsigned int known_align) |
f2cfea4a | 596 | { |
19cb6b50 | 597 | tree type = TREE_TYPE (decl); |
598 | enum tree_code code = TREE_CODE (decl); | |
65433eb4 | 599 | rtx rtl = NULL_RTX; |
389dd41b | 600 | location_t loc = DECL_SOURCE_LOCATION (decl); |
f2cfea4a | 601 | |
602 | if (code == CONST_DECL) | |
603 | return; | |
7bd4091f | 604 | |
04e579b6 | 605 | gcc_assert (code == VAR_DECL || code == PARM_DECL || code == RESULT_DECL |
606 | || code == TYPE_DECL ||code == FIELD_DECL); | |
7bd4091f | 607 | |
65433eb4 | 608 | rtl = DECL_RTL_IF_SET (decl); |
609 | ||
f2cfea4a | 610 | if (type == error_mark_node) |
51e5c07e | 611 | type = void_type_node; |
f2cfea4a | 612 | |
02e7a332 | 613 | /* Usually the size and mode come from the data type without change, |
614 | however, the front-end may set the explicit width of the field, so its | |
615 | size may not be the same as the size of its type. This happens with | |
616 | bitfields, of course (an `int' bitfield may be only 2 bits, say), but it | |
617 | also happens with other fields. For example, the C++ front-end creates | |
618 | zero-sized fields corresponding to empty base classes, and depends on | |
619 | layout_type setting DECL_FIELD_BITPOS correctly for the field. Set the | |
62d2dc6f | 620 | size in bytes from the size in bits. If we have already set the mode, |
621 | don't set it again since we can be called twice for FIELD_DECLs. */ | |
02e7a332 | 622 | |
86ae60fd | 623 | DECL_UNSIGNED (decl) = TYPE_UNSIGNED (type); |
62d2dc6f | 624 | if (DECL_MODE (decl) == VOIDmode) |
625 | DECL_MODE (decl) = TYPE_MODE (type); | |
02e7a332 | 626 | |
034b6c60 | 627 | if (DECL_SIZE (decl) == 0) |
b278476e | 628 | { |
bc97b18f | 629 | DECL_SIZE (decl) = TYPE_SIZE (type); |
630 | DECL_SIZE_UNIT (decl) = TYPE_SIZE_UNIT (type); | |
b278476e | 631 | } |
ac068b11 | 632 | else if (DECL_SIZE_UNIT (decl) == 0) |
02e7a332 | 633 | DECL_SIZE_UNIT (decl) |
389dd41b | 634 | = fold_convert_loc (loc, sizetype, |
635 | size_binop_loc (loc, CEIL_DIV_EXPR, DECL_SIZE (decl), | |
636 | bitsize_unit_node)); | |
b278476e | 637 | |
f5712181 | 638 | if (code != FIELD_DECL) |
639 | /* For non-fields, update the alignment from the type. */ | |
640 | do_type_align (type, decl); | |
641 | else | |
642 | /* For fields, it's a bit more complicated... */ | |
40734805 | 643 | { |
6a22ca24 | 644 | bool old_user_align = DECL_USER_ALIGN (decl); |
7c68c953 | 645 | bool zero_bitfield = false; |
646 | bool packed_p = DECL_PACKED (decl); | |
647 | unsigned int mfa; | |
6a22ca24 | 648 | |
f5712181 | 649 | if (DECL_BIT_FIELD (decl)) |
650 | { | |
651 | DECL_BIT_FIELD_TYPE (decl) = type; | |
f2cfea4a | 652 | |
f5712181 | 653 | /* A zero-length bit-field affects the alignment of the next |
7c68c953 | 654 | field. In essence such bit-fields are not influenced by |
655 | any packing due to #pragma pack or attribute packed. */ | |
f5712181 | 656 | if (integer_zerop (DECL_SIZE (decl)) |
883b2e73 | 657 | && ! targetm.ms_bitfield_layout_p (DECL_FIELD_CONTEXT (decl))) |
f5712181 | 658 | { |
7c68c953 | 659 | zero_bitfield = true; |
660 | packed_p = false; | |
f5712181 | 661 | #ifdef PCC_BITFIELD_TYPE_MATTERS |
662 | if (PCC_BITFIELD_TYPE_MATTERS) | |
663 | do_type_align (type, decl); | |
664 | else | |
665 | #endif | |
3aa7cd03 | 666 | { |
f5712181 | 667 | #ifdef EMPTY_FIELD_BOUNDARY |
3aa7cd03 | 668 | if (EMPTY_FIELD_BOUNDARY > DECL_ALIGN (decl)) |
669 | { | |
670 | DECL_ALIGN (decl) = EMPTY_FIELD_BOUNDARY; | |
671 | DECL_USER_ALIGN (decl) = 0; | |
672 | } | |
f5712181 | 673 | #endif |
3aa7cd03 | 674 | } |
f5712181 | 675 | } |
676 | ||
677 | /* See if we can use an ordinary integer mode for a bit-field. | |
f30fa59a | 678 | Conditions are: a fixed size that is correct for another mode, |
7691c4ce | 679 | occupying a complete byte or bytes on proper boundary. */ |
f5712181 | 680 | if (TYPE_SIZE (type) != 0 |
681 | && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST | |
7691c4ce | 682 | && GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT) |
f5712181 | 683 | { |
684 | enum machine_mode xmode | |
685 | = mode_for_size_tree (DECL_SIZE (decl), MODE_INT, 1); | |
23f65835 | 686 | unsigned int xalign = GET_MODE_ALIGNMENT (xmode); |
f5712181 | 687 | |
9e7454d0 | 688 | if (xmode != BLKmode |
23f65835 | 689 | && !(xalign > BITS_PER_UNIT && DECL_PACKED (decl)) |
690 | && (known_align == 0 || known_align >= xalign)) | |
f5712181 | 691 | { |
23f65835 | 692 | DECL_ALIGN (decl) = MAX (xalign, DECL_ALIGN (decl)); |
f5712181 | 693 | DECL_MODE (decl) = xmode; |
694 | DECL_BIT_FIELD (decl) = 0; | |
695 | } | |
696 | } | |
697 | ||
698 | /* Turn off DECL_BIT_FIELD if we won't need it set. */ | |
699 | if (TYPE_MODE (type) == BLKmode && DECL_MODE (decl) == BLKmode | |
700 | && known_align >= TYPE_ALIGN (type) | |
701 | && DECL_ALIGN (decl) >= TYPE_ALIGN (type)) | |
702 | DECL_BIT_FIELD (decl) = 0; | |
703 | } | |
7c68c953 | 704 | else if (packed_p && DECL_USER_ALIGN (decl)) |
f5712181 | 705 | /* Don't touch DECL_ALIGN. For other packed fields, go ahead and |
a708df98 | 706 | round up; we'll reduce it again below. We want packing to |
c7bf1374 | 707 | supersede USER_ALIGN inherited from the type, but defer to |
a708df98 | 708 | alignment explicitly specified on the field decl. */; |
f5712181 | 709 | else |
6a22ca24 | 710 | do_type_align (type, decl); |
711 | ||
7b04d839 | 712 | /* If the field is packed and not explicitly aligned, give it the |
713 | minimum alignment. Note that do_type_align may set | |
714 | DECL_USER_ALIGN, so we need to check old_user_align instead. */ | |
7c68c953 | 715 | if (packed_p |
7b04d839 | 716 | && !old_user_align) |
6a22ca24 | 717 | DECL_ALIGN (decl) = MIN (DECL_ALIGN (decl), BITS_PER_UNIT); |
f5712181 | 718 | |
7c68c953 | 719 | if (! packed_p && ! DECL_USER_ALIGN (decl)) |
f2cfea4a | 720 | { |
f5712181 | 721 | /* Some targets (i.e. i386, VMS) limit struct field alignment |
722 | to a lower boundary than alignment of variables unless | |
723 | it was overridden by attribute aligned. */ | |
724 | #ifdef BIGGEST_FIELD_ALIGNMENT | |
725 | DECL_ALIGN (decl) | |
726 | = MIN (DECL_ALIGN (decl), (unsigned) BIGGEST_FIELD_ALIGNMENT); | |
727 | #endif | |
728 | #ifdef ADJUST_FIELD_ALIGN | |
729 | DECL_ALIGN (decl) = ADJUST_FIELD_ALIGN (decl, DECL_ALIGN (decl)); | |
730 | #endif | |
f2cfea4a | 731 | } |
36530340 | 732 | |
7c68c953 | 733 | if (zero_bitfield) |
734 | mfa = initial_max_fld_align * BITS_PER_UNIT; | |
735 | else | |
736 | mfa = maximum_field_alignment; | |
36530340 | 737 | /* Should this be controlled by DECL_USER_ALIGN, too? */ |
7c68c953 | 738 | if (mfa != 0) |
739 | DECL_ALIGN (decl) = MIN (DECL_ALIGN (decl), mfa); | |
f2cfea4a | 740 | } |
741 | ||
742 | /* Evaluate nonconstant size only once, either now or as soon as safe. */ | |
743 | if (DECL_SIZE (decl) != 0 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST) | |
744 | DECL_SIZE (decl) = variable_size (DECL_SIZE (decl)); | |
b278476e | 745 | if (DECL_SIZE_UNIT (decl) != 0 |
746 | && TREE_CODE (DECL_SIZE_UNIT (decl)) != INTEGER_CST) | |
747 | DECL_SIZE_UNIT (decl) = variable_size (DECL_SIZE_UNIT (decl)); | |
748 | ||
749 | /* If requested, warn about definitions of large data objects. */ | |
750 | if (warn_larger_than | |
b8e0d419 | 751 | && (code == VAR_DECL || code == PARM_DECL) |
b278476e | 752 | && ! DECL_EXTERNAL (decl)) |
753 | { | |
754 | tree size = DECL_SIZE_UNIT (decl); | |
755 | ||
756 | if (size != 0 && TREE_CODE (size) == INTEGER_CST | |
a0c2c45b | 757 | && compare_tree_int (size, larger_than_size) > 0) |
b278476e | 758 | { |
f9ae6f95 | 759 | int size_as_int = TREE_INT_CST_LOW (size); |
b278476e | 760 | |
a0c2c45b | 761 | if (compare_tree_int (size, size_as_int) == 0) |
67089c6b | 762 | warning (OPT_Wlarger_than_, "size of %q+D is %d bytes", decl, size_as_int); |
b278476e | 763 | else |
67089c6b | 764 | warning (OPT_Wlarger_than_, "size of %q+D is larger than %wd bytes", |
3cf8b391 | 765 | decl, larger_than_size); |
b278476e | 766 | } |
767 | } | |
65433eb4 | 768 | |
769 | /* If the RTL was already set, update its mode and mem attributes. */ | |
770 | if (rtl) | |
771 | { | |
772 | PUT_MODE (rtl, DECL_MODE (decl)); | |
773 | SET_DECL_RTL (decl, 0); | |
774 | set_mem_attributes (rtl, decl, 1); | |
775 | SET_DECL_RTL (decl, rtl); | |
776 | } | |
f2cfea4a | 777 | } |
dddcebdc | 778 | |
779 | /* Given a VAR_DECL, PARM_DECL or RESULT_DECL, clears the results of | |
780 | a previous call to layout_decl and calls it again. */ | |
781 | ||
782 | void | |
783 | relayout_decl (tree decl) | |
784 | { | |
785 | DECL_SIZE (decl) = DECL_SIZE_UNIT (decl) = 0; | |
786 | DECL_MODE (decl) = VOIDmode; | |
950474d6 | 787 | if (!DECL_USER_ALIGN (decl)) |
788 | DECL_ALIGN (decl) = 0; | |
dddcebdc | 789 | SET_DECL_RTL (decl, 0); |
790 | ||
791 | layout_decl (decl, 0); | |
792 | } | |
f2cfea4a | 793 | \f |
02e7a332 | 794 | /* Begin laying out type T, which may be a RECORD_TYPE, UNION_TYPE, or |
795 | QUAL_UNION_TYPE. Return a pointer to a struct record_layout_info which | |
796 | is to be passed to all other layout functions for this record. It is the | |
40734805 | 797 | responsibility of the caller to call `free' for the storage returned. |
02e7a332 | 798 | Note that garbage collection is not permitted until we finish laying |
799 | out the record. */ | |
f2cfea4a | 800 | |
99f4e085 | 801 | record_layout_info |
60b8c5b3 | 802 | start_record_layout (tree t) |
f2cfea4a | 803 | { |
f7f3687c | 804 | record_layout_info rli = XNEW (struct record_layout_info_s); |
99f4e085 | 805 | |
806 | rli->t = t; | |
02e7a332 | 807 | |
99f4e085 | 808 | /* If the type has a minimum specified alignment (via an attribute |
809 | declaration, for example) use it -- otherwise, start with a | |
810 | one-byte alignment. */ | |
811 | rli->record_align = MAX (BITS_PER_UNIT, TYPE_ALIGN (t)); | |
f5712181 | 812 | rli->unpacked_align = rli->record_align; |
02e7a332 | 813 | rli->offset_align = MAX (rli->record_align, BIGGEST_ALIGNMENT); |
f2cfea4a | 814 | |
3af966eb | 815 | #ifdef STRUCTURE_SIZE_BOUNDARY |
816 | /* Packed structures don't need to have minimum size. */ | |
0f9793f3 | 817 | if (! TYPE_PACKED (t)) |
546e12a7 | 818 | { |
819 | unsigned tmp; | |
820 | ||
821 | /* #pragma pack overrides STRUCTURE_SIZE_BOUNDARY. */ | |
822 | tmp = (unsigned) STRUCTURE_SIZE_BOUNDARY; | |
823 | if (maximum_field_alignment != 0) | |
824 | tmp = MIN (tmp, maximum_field_alignment); | |
825 | rli->record_align = MAX (rli->record_align, tmp); | |
826 | } | |
3af966eb | 827 | #endif |
f2cfea4a | 828 | |
02e7a332 | 829 | rli->offset = size_zero_node; |
830 | rli->bitpos = bitsize_zero_node; | |
f04f096b | 831 | rli->prev_field = 0; |
f1f41a6c | 832 | rli->pending_statics = 0; |
02e7a332 | 833 | rli->packed_maybe_necessary = 0; |
7bd4091f | 834 | rli->remaining_in_alignment = 0; |
02e7a332 | 835 | |
99f4e085 | 836 | return rli; |
837 | } | |
f2cfea4a | 838 | |
2765f7eb | 839 | /* Return the combined bit position for the byte offset OFFSET and the |
d9906773 | 840 | bit position BITPOS. |
841 | ||
842 | These functions operate on byte and bit positions present in FIELD_DECLs | |
843 | and assume that these expressions result in no (intermediate) overflow. | |
844 | This assumption is necessary to fold the expressions as much as possible, | |
845 | so as to avoid creating artificially variable-sized types in languages | |
846 | supporting variable-sized types like Ada. */ | |
6d731e4d | 847 | |
848 | tree | |
60b8c5b3 | 849 | bit_from_pos (tree offset, tree bitpos) |
6d731e4d | 850 | { |
d9906773 | 851 | if (TREE_CODE (offset) == PLUS_EXPR) |
852 | offset = size_binop (PLUS_EXPR, | |
853 | fold_convert (bitsizetype, TREE_OPERAND (offset, 0)), | |
854 | fold_convert (bitsizetype, TREE_OPERAND (offset, 1))); | |
855 | else | |
856 | offset = fold_convert (bitsizetype, offset); | |
6d731e4d | 857 | return size_binop (PLUS_EXPR, bitpos, |
d9906773 | 858 | size_binop (MULT_EXPR, offset, bitsize_unit_node)); |
6d731e4d | 859 | } |
860 | ||
2765f7eb | 861 | /* Return the combined truncated byte position for the byte offset OFFSET and |
d9906773 | 862 | the bit position BITPOS. */ |
2765f7eb | 863 | |
6d731e4d | 864 | tree |
60b8c5b3 | 865 | byte_from_pos (tree offset, tree bitpos) |
6d731e4d | 866 | { |
2765f7eb | 867 | tree bytepos; |
868 | if (TREE_CODE (bitpos) == MULT_EXPR | |
869 | && tree_int_cst_equal (TREE_OPERAND (bitpos, 1), bitsize_unit_node)) | |
870 | bytepos = TREE_OPERAND (bitpos, 0); | |
871 | else | |
872 | bytepos = size_binop (TRUNC_DIV_EXPR, bitpos, bitsize_unit_node); | |
873 | return size_binop (PLUS_EXPR, offset, fold_convert (sizetype, bytepos)); | |
6d731e4d | 874 | } |
875 | ||
2765f7eb | 876 | /* Split the bit position POS into a byte offset *POFFSET and a bit |
877 | position *PBITPOS with the byte offset aligned to OFF_ALIGN bits. */ | |
878 | ||
6d731e4d | 879 | void |
60b8c5b3 | 880 | pos_from_bit (tree *poffset, tree *pbitpos, unsigned int off_align, |
881 | tree pos) | |
6d731e4d | 882 | { |
2765f7eb | 883 | tree toff_align = bitsize_int (off_align); |
884 | if (TREE_CODE (pos) == MULT_EXPR | |
885 | && tree_int_cst_equal (TREE_OPERAND (pos, 1), toff_align)) | |
886 | { | |
887 | *poffset = size_binop (MULT_EXPR, | |
888 | fold_convert (sizetype, TREE_OPERAND (pos, 0)), | |
889 | size_int (off_align / BITS_PER_UNIT)); | |
890 | *pbitpos = bitsize_zero_node; | |
891 | } | |
892 | else | |
893 | { | |
894 | *poffset = size_binop (MULT_EXPR, | |
895 | fold_convert (sizetype, | |
896 | size_binop (FLOOR_DIV_EXPR, pos, | |
897 | toff_align)), | |
898 | size_int (off_align / BITS_PER_UNIT)); | |
899 | *pbitpos = size_binop (FLOOR_MOD_EXPR, pos, toff_align); | |
900 | } | |
6d731e4d | 901 | } |
902 | ||
903 | /* Given a pointer to bit and byte offsets and an offset alignment, | |
904 | normalize the offsets so they are within the alignment. */ | |
905 | ||
906 | void | |
60b8c5b3 | 907 | normalize_offset (tree *poffset, tree *pbitpos, unsigned int off_align) |
6d731e4d | 908 | { |
909 | /* If the bit position is now larger than it should be, adjust it | |
910 | downwards. */ | |
911 | if (compare_tree_int (*pbitpos, off_align) >= 0) | |
912 | { | |
2765f7eb | 913 | tree offset, bitpos; |
914 | pos_from_bit (&offset, &bitpos, off_align, *pbitpos); | |
915 | *poffset = size_binop (PLUS_EXPR, *poffset, offset); | |
916 | *pbitpos = bitpos; | |
6d731e4d | 917 | } |
918 | } | |
919 | ||
02e7a332 | 920 | /* Print debugging information about the information in RLI. */ |
83675f44 | 921 | |
4b987fac | 922 | DEBUG_FUNCTION void |
60b8c5b3 | 923 | debug_rli (record_layout_info rli) |
83675f44 | 924 | { |
02e7a332 | 925 | print_node_brief (stderr, "type", rli->t, 0); |
926 | print_node_brief (stderr, "\noffset", rli->offset, 0); | |
927 | print_node_brief (stderr, " bitpos", rli->bitpos, 0); | |
83675f44 | 928 | |
f5712181 | 929 | fprintf (stderr, "\naligns: rec = %u, unpack = %u, off = %u\n", |
930 | rli->record_align, rli->unpacked_align, | |
38ac5984 | 931 | rli->offset_align); |
7bd4091f | 932 | |
933 | /* The ms_struct code is the only that uses this. */ | |
934 | if (targetm.ms_bitfield_layout_p (rli->t)) | |
674b377b | 935 | fprintf (stderr, "remaining in alignment = %u\n", rli->remaining_in_alignment); |
7bd4091f | 936 | |
02e7a332 | 937 | if (rli->packed_maybe_necessary) |
938 | fprintf (stderr, "packed may be necessary\n"); | |
939 | ||
f1f41a6c | 940 | if (!vec_safe_is_empty (rli->pending_statics)) |
02e7a332 | 941 | { |
942 | fprintf (stderr, "pending statics:\n"); | |
364ba361 | 943 | debug_vec_tree (rli->pending_statics); |
02e7a332 | 944 | } |
945 | } | |
946 | ||
947 | /* Given an RLI with a possibly-incremented BITPOS, adjust OFFSET and | |
948 | BITPOS if necessary to keep BITPOS below OFFSET_ALIGN. */ | |
949 | ||
950 | void | |
60b8c5b3 | 951 | normalize_rli (record_layout_info rli) |
02e7a332 | 952 | { |
6d731e4d | 953 | normalize_offset (&rli->offset, &rli->bitpos, rli->offset_align); |
02e7a332 | 954 | } |
83675f44 | 955 | |
02e7a332 | 956 | /* Returns the size in bytes allocated so far. */ |
957 | ||
958 | tree | |
60b8c5b3 | 959 | rli_size_unit_so_far (record_layout_info rli) |
02e7a332 | 960 | { |
6d731e4d | 961 | return byte_from_pos (rli->offset, rli->bitpos); |
02e7a332 | 962 | } |
963 | ||
964 | /* Returns the size in bits allocated so far. */ | |
965 | ||
966 | tree | |
60b8c5b3 | 967 | rli_size_so_far (record_layout_info rli) |
02e7a332 | 968 | { |
6d731e4d | 969 | return bit_from_pos (rli->offset, rli->bitpos); |
02e7a332 | 970 | } |
971 | ||
4b387c35 | 972 | /* FIELD is about to be added to RLI->T. The alignment (in bits) of |
23325b33 | 973 | the next available location within the record is given by KNOWN_ALIGN. |
974 | Update the variable alignment fields in RLI, and return the alignment | |
975 | to give the FIELD. */ | |
02e7a332 | 976 | |
4ee9c684 | 977 | unsigned int |
60b8c5b3 | 978 | update_alignment_for_field (record_layout_info rli, tree field, |
979 | unsigned int known_align) | |
99f4e085 | 980 | { |
981 | /* The alignment required for FIELD. */ | |
982 | unsigned int desired_align; | |
99f4e085 | 983 | /* The type of this field. */ |
984 | tree type = TREE_TYPE (field); | |
4b387c35 | 985 | /* True if the field was explicitly aligned by the user. */ |
986 | bool user_align; | |
f5712181 | 987 | bool is_bitfield; |
99f4e085 | 988 | |
f6cf83a8 | 989 | /* Do not attempt to align an ERROR_MARK node */ |
990 | if (TREE_CODE (type) == ERROR_MARK) | |
991 | return 0; | |
992 | ||
f5712181 | 993 | /* Lay out the field so we know what alignment it needs. */ |
994 | layout_decl (field, known_align); | |
02e7a332 | 995 | desired_align = DECL_ALIGN (field); |
aca14577 | 996 | user_align = DECL_USER_ALIGN (field); |
02e7a332 | 997 | |
f5712181 | 998 | is_bitfield = (type != error_mark_node |
999 | && DECL_BIT_FIELD_TYPE (field) | |
1000 | && ! integer_zerop (TYPE_SIZE (type))); | |
f2cfea4a | 1001 | |
99f4e085 | 1002 | /* Record must have at least as much alignment as any field. |
1003 | Otherwise, the alignment of the field within the record is | |
1004 | meaningless. */ | |
7bd4091f | 1005 | if (targetm.ms_bitfield_layout_p (rli->t)) |
f04f096b | 1006 | { |
8642f3d3 | 1007 | /* Here, the alignment of the underlying type of a bitfield can |
1008 | affect the alignment of a record; even a zero-sized field | |
1009 | can do this. The alignment should be to the alignment of | |
1010 | the type, except that for zero-size bitfields this only | |
f712a0dc | 1011 | applies if there was an immediately prior, nonzero-size |
8642f3d3 | 1012 | bitfield. (That's the way it is, experimentally.) */ |
089ea875 | 1013 | if ((!is_bitfield && !DECL_PACKED (field)) |
a6949f31 | 1014 | || ((DECL_SIZE (field) == NULL_TREE |
1015 | || !integer_zerop (DECL_SIZE (field))) | |
7bd4091f | 1016 | ? !DECL_PACKED (field) |
1017 | : (rli->prev_field | |
1018 | && DECL_BIT_FIELD_TYPE (rli->prev_field) | |
1019 | && ! integer_zerop (DECL_SIZE (rli->prev_field))))) | |
f04f096b | 1020 | { |
8642f3d3 | 1021 | unsigned int type_align = TYPE_ALIGN (type); |
1022 | type_align = MAX (type_align, desired_align); | |
1023 | if (maximum_field_alignment != 0) | |
1024 | type_align = MIN (type_align, maximum_field_alignment); | |
1025 | rli->record_align = MAX (rli->record_align, type_align); | |
f04f096b | 1026 | rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type)); |
1027 | } | |
40734805 | 1028 | } |
fca12917 | 1029 | #ifdef PCC_BITFIELD_TYPE_MATTERS |
f5712181 | 1030 | else if (is_bitfield && PCC_BITFIELD_TYPE_MATTERS) |
99f4e085 | 1031 | { |
4975da72 | 1032 | /* Named bit-fields cause the entire structure to have the |
25ba5be6 | 1033 | alignment implied by their type. Some targets also apply the same |
1034 | rules to unnamed bitfields. */ | |
1035 | if (DECL_NAME (field) != 0 | |
1036 | || targetm.align_anon_bitfield ()) | |
f2cfea4a | 1037 | { |
99f4e085 | 1038 | unsigned int type_align = TYPE_ALIGN (type); |
fe352cf1 | 1039 | |
77d0f168 | 1040 | #ifdef ADJUST_FIELD_ALIGN |
1041 | if (! TYPE_USER_ALIGN (type)) | |
1042 | type_align = ADJUST_FIELD_ALIGN (field, type_align); | |
1043 | #endif | |
1044 | ||
7c68c953 | 1045 | /* Targets might chose to handle unnamed and hence possibly |
1046 | zero-width bitfield. Those are not influenced by #pragmas | |
1047 | or packed attributes. */ | |
1048 | if (integer_zerop (DECL_SIZE (field))) | |
1049 | { | |
1050 | if (initial_max_fld_align) | |
1051 | type_align = MIN (type_align, | |
1052 | initial_max_fld_align * BITS_PER_UNIT); | |
1053 | } | |
1054 | else if (maximum_field_alignment != 0) | |
99f4e085 | 1055 | type_align = MIN (type_align, maximum_field_alignment); |
1056 | else if (DECL_PACKED (field)) | |
1057 | type_align = MIN (type_align, BITS_PER_UNIT); | |
87994a83 | 1058 | |
4975da72 | 1059 | /* The alignment of the record is increased to the maximum |
1060 | of the current alignment, the alignment indicated on the | |
1061 | field (i.e., the alignment specified by an __aligned__ | |
1062 | attribute), and the alignment indicated by the type of | |
1063 | the field. */ | |
1064 | rli->record_align = MAX (rli->record_align, desired_align); | |
99f4e085 | 1065 | rli->record_align = MAX (rli->record_align, type_align); |
4975da72 | 1066 | |
fca12917 | 1067 | if (warn_packed) |
38ac5984 | 1068 | rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type)); |
fced8f42 | 1069 | user_align |= TYPE_USER_ALIGN (type); |
fca12917 | 1070 | } |
99f4e085 | 1071 | } |
99f4e085 | 1072 | #endif |
f5712181 | 1073 | else |
99f4e085 | 1074 | { |
1075 | rli->record_align = MAX (rli->record_align, desired_align); | |
02e7a332 | 1076 | rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type)); |
99f4e085 | 1077 | } |
fca12917 | 1078 | |
4b387c35 | 1079 | TYPE_USER_ALIGN (rli->t) |= user_align; |
1080 | ||
1081 | return desired_align; | |
1082 | } | |
1083 | ||
1084 | /* Called from place_field to handle unions. */ | |
1085 | ||
1086 | static void | |
60b8c5b3 | 1087 | place_union_field (record_layout_info rli, tree field) |
4b387c35 | 1088 | { |
1089 | update_alignment_for_field (rli, field, /*known_align=*/0); | |
1090 | ||
1091 | DECL_FIELD_OFFSET (field) = size_zero_node; | |
1092 | DECL_FIELD_BIT_OFFSET (field) = bitsize_zero_node; | |
1093 | SET_DECL_OFFSET_ALIGN (field, BIGGEST_ALIGNMENT); | |
1094 | ||
7bd4091f | 1095 | /* If this is an ERROR_MARK return *after* having set the |
f6cf83a8 | 1096 | field at the start of the union. This helps when parsing |
1097 | invalid fields. */ | |
1098 | if (TREE_CODE (TREE_TYPE (field)) == ERROR_MARK) | |
1099 | return; | |
1100 | ||
4b387c35 | 1101 | /* We assume the union's size will be a multiple of a byte so we don't |
1102 | bother with BITPOS. */ | |
1103 | if (TREE_CODE (rli->t) == UNION_TYPE) | |
1104 | rli->offset = size_binop (MAX_EXPR, rli->offset, DECL_SIZE_UNIT (field)); | |
1105 | else if (TREE_CODE (rli->t) == QUAL_UNION_TYPE) | |
178825bb | 1106 | rli->offset = fold_build3 (COND_EXPR, sizetype, DECL_QUALIFIER (field), |
faa43f85 | 1107 | DECL_SIZE_UNIT (field), rli->offset); |
4b387c35 | 1108 | } |
1109 | ||
9a27f26f | 1110 | #if defined (PCC_BITFIELD_TYPE_MATTERS) || defined (BITFIELD_NBYTES_LIMITED) |
805e22b2 | 1111 | /* A bitfield of SIZE with a required access alignment of ALIGN is allocated |
a8b24921 | 1112 | at BYTE_OFFSET / BIT_OFFSET. Return nonzero if the field would span more |
805e22b2 | 1113 | units of alignment than the underlying TYPE. */ |
1114 | static int | |
60b8c5b3 | 1115 | excess_unit_span (HOST_WIDE_INT byte_offset, HOST_WIDE_INT bit_offset, |
1116 | HOST_WIDE_INT size, HOST_WIDE_INT align, tree type) | |
805e22b2 | 1117 | { |
1118 | /* Note that the calculation of OFFSET might overflow; we calculate it so | |
1119 | that we still get the right result as long as ALIGN is a power of two. */ | |
1120 | unsigned HOST_WIDE_INT offset = byte_offset * BITS_PER_UNIT + bit_offset; | |
1121 | ||
1122 | offset = offset % align; | |
1123 | return ((offset + size + align - 1) / align | |
aa59f000 | 1124 | > tree_to_uhwi (TYPE_SIZE (type)) / align); |
805e22b2 | 1125 | } |
9a27f26f | 1126 | #endif |
805e22b2 | 1127 | |
4b387c35 | 1128 | /* RLI contains information about the layout of a RECORD_TYPE. FIELD |
1129 | is a FIELD_DECL to be added after those fields already present in | |
1130 | T. (FIELD is not actually added to the TYPE_FIELDS list here; | |
1131 | callers that desire that behavior must manually perform that step.) */ | |
1132 | ||
1133 | void | |
60b8c5b3 | 1134 | place_field (record_layout_info rli, tree field) |
4b387c35 | 1135 | { |
1136 | /* The alignment required for FIELD. */ | |
1137 | unsigned int desired_align; | |
1138 | /* The alignment FIELD would have if we just dropped it into the | |
1139 | record as it presently stands. */ | |
1140 | unsigned int known_align; | |
1141 | unsigned int actual_align; | |
1142 | /* The type of this field. */ | |
1143 | tree type = TREE_TYPE (field); | |
1144 | ||
65a7c526 | 1145 | gcc_assert (TREE_CODE (field) != ERROR_MARK); |
4b387c35 | 1146 | |
1147 | /* If FIELD is static, then treat it like a separate variable, not | |
1148 | really like a structure field. If it is a FUNCTION_DECL, it's a | |
1149 | method. In both cases, all we do is lay out the decl, and we do | |
1150 | it *after* the record is laid out. */ | |
1151 | if (TREE_CODE (field) == VAR_DECL) | |
1152 | { | |
f1f41a6c | 1153 | vec_safe_push (rli->pending_statics, field); |
4b387c35 | 1154 | return; |
1155 | } | |
1156 | ||
1157 | /* Enumerators and enum types which are local to this class need not | |
1158 | be laid out. Likewise for initialized constant fields. */ | |
1159 | else if (TREE_CODE (field) != FIELD_DECL) | |
1160 | return; | |
1161 | ||
1162 | /* Unions are laid out very differently than records, so split | |
1163 | that code off to another function. */ | |
1164 | else if (TREE_CODE (rli->t) != RECORD_TYPE) | |
1165 | { | |
1166 | place_union_field (rli, field); | |
1167 | return; | |
1168 | } | |
1169 | ||
7bd4091f | 1170 | else if (TREE_CODE (type) == ERROR_MARK) |
f6cf83a8 | 1171 | { |
1172 | /* Place this field at the current allocation position, so we | |
1173 | maintain monotonicity. */ | |
1174 | DECL_FIELD_OFFSET (field) = rli->offset; | |
1175 | DECL_FIELD_BIT_OFFSET (field) = rli->bitpos; | |
1176 | SET_DECL_OFFSET_ALIGN (field, rli->offset_align); | |
1177 | return; | |
1178 | } | |
1179 | ||
4b387c35 | 1180 | /* Work out the known alignment so far. Note that A & (-A) is the |
1181 | value of the least-significant bit in A that is one. */ | |
1182 | if (! integer_zerop (rli->bitpos)) | |
e913b5cd | 1183 | known_align = (tree_to_uhwi (rli->bitpos) |
1184 | & - tree_to_uhwi (rli->bitpos)); | |
4b387c35 | 1185 | else if (integer_zerop (rli->offset)) |
23325b33 | 1186 | known_align = 0; |
e913b5cd | 1187 | else if (tree_fits_uhwi_p (rli->offset)) |
4b387c35 | 1188 | known_align = (BITS_PER_UNIT |
e913b5cd | 1189 | * (tree_to_uhwi (rli->offset) |
1190 | & - tree_to_uhwi (rli->offset))); | |
4b387c35 | 1191 | else |
1192 | known_align = rli->offset_align; | |
60b8c5b3 | 1193 | |
4b387c35 | 1194 | desired_align = update_alignment_for_field (rli, field, known_align); |
23325b33 | 1195 | if (known_align == 0) |
1196 | known_align = MAX (BIGGEST_ALIGNMENT, rli->record_align); | |
4b387c35 | 1197 | |
99f4e085 | 1198 | if (warn_packed && DECL_PACKED (field)) |
1199 | { | |
f5712181 | 1200 | if (known_align >= TYPE_ALIGN (type)) |
fca12917 | 1201 | { |
99f4e085 | 1202 | if (TYPE_ALIGN (type) > desired_align) |
fca12917 | 1203 | { |
99f4e085 | 1204 | if (STRICT_ALIGNMENT) |
3cf8b391 | 1205 | warning (OPT_Wattributes, "packed attribute causes " |
1206 | "inefficient alignment for %q+D", field); | |
acca8c42 | 1207 | /* Don't warn if DECL_PACKED was set by the type. */ |
1208 | else if (!TYPE_PACKED (rli->t)) | |
3cf8b391 | 1209 | warning (OPT_Wattributes, "packed attribute is " |
1210 | "unnecessary for %q+D", field); | |
fca12917 | 1211 | } |
fca12917 | 1212 | } |
99f4e085 | 1213 | else |
1214 | rli->packed_maybe_necessary = 1; | |
1215 | } | |
f2cfea4a | 1216 | |
99f4e085 | 1217 | /* Does this field automatically have alignment it needs by virtue |
b527cbf0 | 1218 | of the fields that precede it and the record's own alignment? */ |
1219 | if (known_align < desired_align) | |
99f4e085 | 1220 | { |
1221 | /* No, we need to skip space before this field. | |
1222 | Bump the cumulative size to multiple of field alignment. */ | |
f2cfea4a | 1223 | |
b527cbf0 | 1224 | if (!targetm.ms_bitfield_layout_p (rli->t) |
1225 | && DECL_SOURCE_LOCATION (field) != BUILTINS_LOCATION) | |
d1251492 | 1226 | warning (OPT_Wpadded, "padding struct to align %q+D", field); |
fca12917 | 1227 | |
02e7a332 | 1228 | /* If the alignment is still within offset_align, just align |
1229 | the bit position. */ | |
1230 | if (desired_align < rli->offset_align) | |
1231 | rli->bitpos = round_up (rli->bitpos, desired_align); | |
99f4e085 | 1232 | else |
1233 | { | |
02e7a332 | 1234 | /* First adjust OFFSET by the partial bits, then align. */ |
1235 | rli->offset | |
1236 | = size_binop (PLUS_EXPR, rli->offset, | |
5d7ed6c7 | 1237 | fold_convert (sizetype, |
1238 | size_binop (CEIL_DIV_EXPR, rli->bitpos, | |
1239 | bitsize_unit_node))); | |
02e7a332 | 1240 | rli->bitpos = bitsize_zero_node; |
1241 | ||
1242 | rli->offset = round_up (rli->offset, desired_align / BITS_PER_UNIT); | |
f2cfea4a | 1243 | } |
02e7a332 | 1244 | |
53de1faf | 1245 | if (! TREE_CONSTANT (rli->offset)) |
1246 | rli->offset_align = desired_align; | |
b527cbf0 | 1247 | if (targetm.ms_bitfield_layout_p (rli->t)) |
1248 | rli->prev_field = NULL; | |
99f4e085 | 1249 | } |
f2cfea4a | 1250 | |
02e7a332 | 1251 | /* Handle compatibility with PCC. Note that if the record has any |
1252 | variable-sized fields, we need not worry about compatibility. */ | |
f2cfea4a | 1253 | #ifdef PCC_BITFIELD_TYPE_MATTERS |
99f4e085 | 1254 | if (PCC_BITFIELD_TYPE_MATTERS |
6fb33aa0 | 1255 | && ! targetm.ms_bitfield_layout_p (rli->t) |
99f4e085 | 1256 | && TREE_CODE (field) == FIELD_DECL |
1257 | && type != error_mark_node | |
02e7a332 | 1258 | && DECL_BIT_FIELD (field) |
9fd767c5 | 1259 | && (! DECL_PACKED (field) |
1260 | /* Enter for these packed fields only to issue a warning. */ | |
1261 | || TYPE_ALIGN (type) <= BITS_PER_UNIT) | |
99f4e085 | 1262 | && maximum_field_alignment == 0 |
02e7a332 | 1263 | && ! integer_zerop (DECL_SIZE (field)) |
e913b5cd | 1264 | && tree_fits_uhwi_p (DECL_SIZE (field)) |
e913b5cd | 1265 | && tree_fits_uhwi_p (rli->offset) |
1266 | && tree_fits_uhwi_p (TYPE_SIZE (type))) | |
99f4e085 | 1267 | { |
1268 | unsigned int type_align = TYPE_ALIGN (type); | |
02e7a332 | 1269 | tree dsize = DECL_SIZE (field); |
e913b5cd | 1270 | HOST_WIDE_INT field_size = tree_to_uhwi (dsize); |
69c1cbfa | 1271 | HOST_WIDE_INT offset = tree_to_uhwi (rli->offset); |
e913b5cd | 1272 | HOST_WIDE_INT bit_offset = tree_to_shwi (rli->bitpos); |
99f4e085 | 1273 | |
77d0f168 | 1274 | #ifdef ADJUST_FIELD_ALIGN |
1275 | if (! TYPE_USER_ALIGN (type)) | |
1276 | type_align = ADJUST_FIELD_ALIGN (field, type_align); | |
1277 | #endif | |
1278 | ||
99f4e085 | 1279 | /* A bit field may not span more units of alignment of its type |
1280 | than its type itself. Advance to next boundary if necessary. */ | |
805e22b2 | 1281 | if (excess_unit_span (offset, bit_offset, field_size, type_align, type)) |
9fd767c5 | 1282 | { |
1283 | if (DECL_PACKED (field)) | |
1284 | { | |
7a6a48c9 | 1285 | if (warn_packed_bitfield_compat == 1) |
9fd767c5 | 1286 | inform |
1287 | (input_location, | |
bf776685 | 1288 | "offset of packed bit-field %qD has changed in GCC 4.4", |
9fd767c5 | 1289 | field); |
1290 | } | |
1291 | else | |
178825bb | 1292 | rli->bitpos = round_up (rli->bitpos, type_align); |
9fd767c5 | 1293 | } |
fced8f42 | 1294 | |
9fd767c5 | 1295 | if (! DECL_PACKED (field)) |
1296 | TYPE_USER_ALIGN (rli->t) |= TYPE_USER_ALIGN (type); | |
99f4e085 | 1297 | } |
f2cfea4a | 1298 | #endif |
1299 | ||
f2cfea4a | 1300 | #ifdef BITFIELD_NBYTES_LIMITED |
99f4e085 | 1301 | if (BITFIELD_NBYTES_LIMITED |
6fb33aa0 | 1302 | && ! targetm.ms_bitfield_layout_p (rli->t) |
99f4e085 | 1303 | && TREE_CODE (field) == FIELD_DECL |
1304 | && type != error_mark_node | |
1305 | && DECL_BIT_FIELD_TYPE (field) | |
02e7a332 | 1306 | && ! DECL_PACKED (field) |
1307 | && ! integer_zerop (DECL_SIZE (field)) | |
e913b5cd | 1308 | && tree_fits_uhwi_p (DECL_SIZE (field)) |
08f4222b | 1309 | && tree_fits_uhwi_p (rli->offset) |
e913b5cd | 1310 | && tree_fits_uhwi_p (TYPE_SIZE (type))) |
99f4e085 | 1311 | { |
1312 | unsigned int type_align = TYPE_ALIGN (type); | |
02e7a332 | 1313 | tree dsize = DECL_SIZE (field); |
e913b5cd | 1314 | HOST_WIDE_INT field_size = tree_to_uhwi (dsize); |
69c1cbfa | 1315 | HOST_WIDE_INT offset = tree_to_uhwi (rli->offset); |
e913b5cd | 1316 | HOST_WIDE_INT bit_offset = tree_to_shwi (rli->bitpos); |
87994a83 | 1317 | |
77d0f168 | 1318 | #ifdef ADJUST_FIELD_ALIGN |
1319 | if (! TYPE_USER_ALIGN (type)) | |
1320 | type_align = ADJUST_FIELD_ALIGN (field, type_align); | |
1321 | #endif | |
1322 | ||
99f4e085 | 1323 | if (maximum_field_alignment != 0) |
1324 | type_align = MIN (type_align, maximum_field_alignment); | |
1325 | /* ??? This test is opposite the test in the containing if | |
1326 | statement, so this code is unreachable currently. */ | |
1327 | else if (DECL_PACKED (field)) | |
1328 | type_align = MIN (type_align, BITS_PER_UNIT); | |
1329 | ||
1330 | /* A bit field may not span the unit of alignment of its type. | |
1331 | Advance to next boundary if necessary. */ | |
805e22b2 | 1332 | if (excess_unit_span (offset, bit_offset, field_size, type_align, type)) |
02e7a332 | 1333 | rli->bitpos = round_up (rli->bitpos, type_align); |
fced8f42 | 1334 | |
4b387c35 | 1335 | TYPE_USER_ALIGN (rli->t) |= TYPE_USER_ALIGN (type); |
99f4e085 | 1336 | } |
f2cfea4a | 1337 | #endif |
1338 | ||
8642f3d3 | 1339 | /* See the docs for TARGET_MS_BITFIELD_LAYOUT_P for details. |
1340 | A subtlety: | |
1341 | When a bit field is inserted into a packed record, the whole | |
1342 | size of the underlying type is used by one or more same-size | |
805e22b2 | 1343 | adjacent bitfields. (That is, if its long:3, 32 bits is |
8642f3d3 | 1344 | used in the record, and any additional adjacent long bitfields are |
1345 | packed into the same chunk of 32 bits. However, if the size | |
1346 | changes, a new field of that size is allocated.) In an unpacked | |
de132707 | 1347 | record, this is the same as using alignment, but not equivalent |
805e22b2 | 1348 | when packing. |
8642f3d3 | 1349 | |
de132707 | 1350 | Note: for compatibility, we use the type size, not the type alignment |
8642f3d3 | 1351 | to determine alignment, since that matches the documentation */ |
1352 | ||
7bd4091f | 1353 | if (targetm.ms_bitfield_layout_p (rli->t)) |
f04f096b | 1354 | { |
8642f3d3 | 1355 | tree prev_saved = rli->prev_field; |
8aea3a7e | 1356 | tree prev_type = prev_saved ? DECL_BIT_FIELD_TYPE (prev_saved) : NULL; |
f04f096b | 1357 | |
7bd4091f | 1358 | /* This is a bitfield if it exists. */ |
1359 | if (rli->prev_field) | |
8642f3d3 | 1360 | { |
1361 | /* If both are bitfields, nonzero, and the same size, this is | |
1362 | the middle of a run. Zero declared size fields are special | |
1363 | and handled as "end of run". (Note: it's nonzero declared | |
1364 | size, but equal type sizes!) (Since we know that both | |
1365 | the current and previous fields are bitfields by the | |
1366 | time we check it, DECL_SIZE must be present for both.) */ | |
1367 | if (DECL_BIT_FIELD_TYPE (field) | |
1368 | && !integer_zerop (DECL_SIZE (field)) | |
1369 | && !integer_zerop (DECL_SIZE (rli->prev_field)) | |
e913b5cd | 1370 | && tree_fits_shwi_p (DECL_SIZE (rli->prev_field)) |
69c1cbfa | 1371 | && tree_fits_uhwi_p (TYPE_SIZE (type)) |
8aea3a7e | 1372 | && simple_cst_equal (TYPE_SIZE (type), TYPE_SIZE (prev_type))) |
8642f3d3 | 1373 | { |
1374 | /* We're in the middle of a run of equal type size fields; make | |
1375 | sure we realign if we run out of bits. (Not decl size, | |
1376 | type size!) */ | |
e913b5cd | 1377 | HOST_WIDE_INT bitsize = tree_to_uhwi (DECL_SIZE (field)); |
8642f3d3 | 1378 | |
1379 | if (rli->remaining_in_alignment < bitsize) | |
1380 | { | |
e913b5cd | 1381 | HOST_WIDE_INT typesize = tree_to_uhwi (TYPE_SIZE (type)); |
8aea3a7e | 1382 | |
7bd4091f | 1383 | /* out of bits; bump up to next 'word'. */ |
7bd4091f | 1384 | rli->bitpos |
8aea3a7e | 1385 | = size_binop (PLUS_EXPR, rli->bitpos, |
1386 | bitsize_int (rli->remaining_in_alignment)); | |
7bd4091f | 1387 | rli->prev_field = field; |
8aea3a7e | 1388 | if (typesize < bitsize) |
1389 | rli->remaining_in_alignment = 0; | |
1390 | else | |
1391 | rli->remaining_in_alignment = typesize - bitsize; | |
8642f3d3 | 1392 | } |
8aea3a7e | 1393 | else |
1394 | rli->remaining_in_alignment -= bitsize; | |
8642f3d3 | 1395 | } |
1396 | else | |
1397 | { | |
805e22b2 | 1398 | /* End of a run: if leaving a run of bitfields of the same type |
1399 | size, we have to "use up" the rest of the bits of the type | |
8642f3d3 | 1400 | size. |
1401 | ||
1402 | Compute the new position as the sum of the size for the prior | |
1403 | type and where we first started working on that type. | |
1404 | Note: since the beginning of the field was aligned then | |
1405 | of course the end will be too. No round needed. */ | |
1406 | ||
a6cf93fb | 1407 | if (!integer_zerop (DECL_SIZE (rli->prev_field))) |
8642f3d3 | 1408 | { |
7bd4091f | 1409 | rli->bitpos |
1410 | = size_binop (PLUS_EXPR, rli->bitpos, | |
1411 | bitsize_int (rli->remaining_in_alignment)); | |
8642f3d3 | 1412 | } |
1413 | else | |
5f1e9331 | 1414 | /* We "use up" size zero fields; the code below should behave |
1415 | as if the prior field was not a bitfield. */ | |
1416 | prev_saved = NULL; | |
8642f3d3 | 1417 | |
805e22b2 | 1418 | /* Cause a new bitfield to be captured, either this time (if |
9cb8e99f | 1419 | currently a bitfield) or next time we see one. */ |
9af5ce0c | 1420 | if (!DECL_BIT_FIELD_TYPE (field) |
a6cf93fb | 1421 | || integer_zerop (DECL_SIZE (field))) |
5f1e9331 | 1422 | rli->prev_field = NULL; |
8642f3d3 | 1423 | } |
5f1e9331 | 1424 | |
8642f3d3 | 1425 | normalize_rli (rli); |
1426 | } | |
1427 | ||
3157acc6 | 1428 | /* If we're starting a new run of same type size bitfields |
8642f3d3 | 1429 | (or a run of non-bitfields), set up the "first of the run" |
805e22b2 | 1430 | fields. |
8642f3d3 | 1431 | |
1432 | That is, if the current field is not a bitfield, or if there | |
1433 | was a prior bitfield the type sizes differ, or if there wasn't | |
1434 | a prior bitfield the size of the current field is nonzero. | |
1435 | ||
1436 | Note: we must be sure to test ONLY the type size if there was | |
1437 | a prior bitfield and ONLY for the current field being zero if | |
1438 | there wasn't. */ | |
1439 | ||
1440 | if (!DECL_BIT_FIELD_TYPE (field) | |
a6cf93fb | 1441 | || (prev_saved != NULL |
8aea3a7e | 1442 | ? !simple_cst_equal (TYPE_SIZE (type), TYPE_SIZE (prev_type)) |
5f1e9331 | 1443 | : !integer_zerop (DECL_SIZE (field)) )) |
8642f3d3 | 1444 | { |
5f1e9331 | 1445 | /* Never smaller than a byte for compatibility. */ |
1446 | unsigned int type_align = BITS_PER_UNIT; | |
8642f3d3 | 1447 | |
805e22b2 | 1448 | /* (When not a bitfield), we could be seeing a flex array (with |
8642f3d3 | 1449 | no DECL_SIZE). Since we won't be using remaining_in_alignment |
805e22b2 | 1450 | until we see a bitfield (and come by here again) we just skip |
8642f3d3 | 1451 | calculating it. */ |
5f1e9331 | 1452 | if (DECL_SIZE (field) != NULL |
e913b5cd | 1453 | && tree_fits_uhwi_p (TYPE_SIZE (TREE_TYPE (field))) |
1454 | && tree_fits_uhwi_p (DECL_SIZE (field))) | |
8aea3a7e | 1455 | { |
c7e8d0da | 1456 | unsigned HOST_WIDE_INT bitsize |
e913b5cd | 1457 | = tree_to_uhwi (DECL_SIZE (field)); |
c7e8d0da | 1458 | unsigned HOST_WIDE_INT typesize |
e913b5cd | 1459 | = tree_to_uhwi (TYPE_SIZE (TREE_TYPE (field))); |
8aea3a7e | 1460 | |
1461 | if (typesize < bitsize) | |
1462 | rli->remaining_in_alignment = 0; | |
1463 | else | |
1464 | rli->remaining_in_alignment = typesize - bitsize; | |
1465 | } | |
8642f3d3 | 1466 | |
9cb8e99f | 1467 | /* Now align (conventionally) for the new type. */ |
7bd4091f | 1468 | type_align = TYPE_ALIGN (TREE_TYPE (field)); |
f04f096b | 1469 | |
8642f3d3 | 1470 | if (maximum_field_alignment != 0) |
1471 | type_align = MIN (type_align, maximum_field_alignment); | |
f04f096b | 1472 | |
178825bb | 1473 | rli->bitpos = round_up (rli->bitpos, type_align); |
5f1e9331 | 1474 | |
8642f3d3 | 1475 | /* If we really aligned, don't allow subsequent bitfields |
9cb8e99f | 1476 | to undo that. */ |
8642f3d3 | 1477 | rli->prev_field = NULL; |
1478 | } | |
f04f096b | 1479 | } |
1480 | ||
02e7a332 | 1481 | /* Offset so far becomes the position of this field after normalizing. */ |
1482 | normalize_rli (rli); | |
1483 | DECL_FIELD_OFFSET (field) = rli->offset; | |
1484 | DECL_FIELD_BIT_OFFSET (field) = rli->bitpos; | |
b4bb829f | 1485 | SET_DECL_OFFSET_ALIGN (field, rli->offset_align); |
02e7a332 | 1486 | |
46515aeb | 1487 | /* Evaluate nonconstant offsets only once, either now or as soon as safe. */ |
1488 | if (TREE_CODE (DECL_FIELD_OFFSET (field)) != INTEGER_CST) | |
1489 | DECL_FIELD_OFFSET (field) = variable_size (DECL_FIELD_OFFSET (field)); | |
1490 | ||
02e7a332 | 1491 | /* If this field ended up more aligned than we thought it would be (we |
1492 | approximate this by seeing if its position changed), lay out the field | |
1493 | again; perhaps we can use an integral mode for it now. */ | |
62d2dc6f | 1494 | if (! integer_zerop (DECL_FIELD_BIT_OFFSET (field))) |
e913b5cd | 1495 | actual_align = (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field)) |
1496 | & - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field))); | |
62d2dc6f | 1497 | else if (integer_zerop (DECL_FIELD_OFFSET (field))) |
23325b33 | 1498 | actual_align = MAX (BIGGEST_ALIGNMENT, rli->record_align); |
e913b5cd | 1499 | else if (tree_fits_uhwi_p (DECL_FIELD_OFFSET (field))) |
02e7a332 | 1500 | actual_align = (BITS_PER_UNIT |
e913b5cd | 1501 | * (tree_to_uhwi (DECL_FIELD_OFFSET (field)) |
1502 | & - tree_to_uhwi (DECL_FIELD_OFFSET (field)))); | |
99f4e085 | 1503 | else |
02e7a332 | 1504 | actual_align = DECL_OFFSET_ALIGN (field); |
23325b33 | 1505 | /* ACTUAL_ALIGN is still the actual alignment *within the record* . |
1506 | store / extract bit field operations will check the alignment of the | |
1507 | record against the mode of bit fields. */ | |
02e7a332 | 1508 | |
1509 | if (known_align != actual_align) | |
1510 | layout_decl (field, actual_align); | |
1511 | ||
7bd4091f | 1512 | if (rli->prev_field == NULL && DECL_BIT_FIELD_TYPE (field)) |
1513 | rli->prev_field = field; | |
f04f096b | 1514 | |
02e7a332 | 1515 | /* Now add size of this field to the size of the record. If the size is |
1516 | not constant, treat the field as being a multiple of bytes and just | |
1517 | adjust the offset, resetting the bit position. Otherwise, apportion the | |
1518 | size amongst the bit position and offset. First handle the case of an | |
1519 | unspecified size, which can happen when we have an invalid nested struct | |
1520 | definition, such as struct j { struct j { int i; } }. The error message | |
1521 | is printed in finish_struct. */ | |
1522 | if (DECL_SIZE (field) == 0) | |
1523 | /* Do nothing. */; | |
7e50ecae | 1524 | else if (TREE_CODE (DECL_SIZE (field)) != INTEGER_CST |
f96bd2bf | 1525 | || TREE_OVERFLOW (DECL_SIZE (field))) |
99f4e085 | 1526 | { |
02e7a332 | 1527 | rli->offset |
1528 | = size_binop (PLUS_EXPR, rli->offset, | |
5d7ed6c7 | 1529 | fold_convert (sizetype, |
1530 | size_binop (CEIL_DIV_EXPR, rli->bitpos, | |
1531 | bitsize_unit_node))); | |
02e7a332 | 1532 | rli->offset |
1533 | = size_binop (PLUS_EXPR, rli->offset, DECL_SIZE_UNIT (field)); | |
1534 | rli->bitpos = bitsize_zero_node; | |
fcf31ac6 | 1535 | rli->offset_align = MIN (rli->offset_align, desired_align); |
99f4e085 | 1536 | } |
7bd4091f | 1537 | else if (targetm.ms_bitfield_layout_p (rli->t)) |
1538 | { | |
1539 | rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos, DECL_SIZE (field)); | |
1540 | ||
1541 | /* If we ended a bitfield before the full length of the type then | |
1542 | pad the struct out to the full length of the last type. */ | |
1767a056 | 1543 | if ((DECL_CHAIN (field) == NULL |
1544 | || TREE_CODE (DECL_CHAIN (field)) != FIELD_DECL) | |
7bd4091f | 1545 | && DECL_BIT_FIELD_TYPE (field) |
1546 | && !integer_zerop (DECL_SIZE (field))) | |
1547 | rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos, | |
1548 | bitsize_int (rli->remaining_in_alignment)); | |
1549 | ||
1550 | normalize_rli (rli); | |
1551 | } | |
99f4e085 | 1552 | else |
1553 | { | |
02e7a332 | 1554 | rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos, DECL_SIZE (field)); |
1555 | normalize_rli (rli); | |
f2cfea4a | 1556 | } |
99f4e085 | 1557 | } |
f2cfea4a | 1558 | |
99f4e085 | 1559 | /* Assuming that all the fields have been laid out, this function uses |
1560 | RLI to compute the final TYPE_SIZE, TYPE_ALIGN, etc. for the type | |
de132707 | 1561 | indicated by RLI. */ |
f2cfea4a | 1562 | |
99f4e085 | 1563 | static void |
60b8c5b3 | 1564 | finalize_record_size (record_layout_info rli) |
99f4e085 | 1565 | { |
02e7a332 | 1566 | tree unpadded_size, unpadded_size_unit; |
1567 | ||
9ac9c432 | 1568 | /* Now we want just byte and bit offsets, so set the offset alignment |
1569 | to be a byte and then normalize. */ | |
1570 | rli->offset_align = BITS_PER_UNIT; | |
1571 | normalize_rli (rli); | |
f2cfea4a | 1572 | |
1573 | /* Determine the desired alignment. */ | |
1574 | #ifdef ROUND_TYPE_ALIGN | |
99f4e085 | 1575 | TYPE_ALIGN (rli->t) = ROUND_TYPE_ALIGN (rli->t, TYPE_ALIGN (rli->t), |
fd5279fc | 1576 | rli->record_align); |
f2cfea4a | 1577 | #else |
99f4e085 | 1578 | TYPE_ALIGN (rli->t) = MAX (TYPE_ALIGN (rli->t), rli->record_align); |
f2cfea4a | 1579 | #endif |
1580 | ||
9ac9c432 | 1581 | /* Compute the size so far. Be sure to allow for extra bits in the |
1582 | size in bytes. We have guaranteed above that it will be no more | |
1583 | than a single byte. */ | |
1584 | unpadded_size = rli_size_so_far (rli); | |
1585 | unpadded_size_unit = rli_size_unit_so_far (rli); | |
1586 | if (! integer_zerop (rli->bitpos)) | |
1587 | unpadded_size_unit | |
1588 | = size_binop (PLUS_EXPR, unpadded_size_unit, size_one_node); | |
02e7a332 | 1589 | |
9aa78eb0 | 1590 | if (TREE_CODE (unpadded_size_unit) == INTEGER_CST |
1591 | && !TREE_OVERFLOW (unpadded_size_unit) | |
1592 | && !valid_constant_size_p (unpadded_size_unit)) | |
1593 | error ("type %qT is too large", rli->t); | |
1594 | ||
2358393e | 1595 | /* Round the size up to be a multiple of the required alignment. */ |
178825bb | 1596 | TYPE_SIZE (rli->t) = round_up (unpadded_size, TYPE_ALIGN (rli->t)); |
d37625c0 | 1597 | TYPE_SIZE_UNIT (rli->t) |
178825bb | 1598 | = round_up (unpadded_size_unit, TYPE_ALIGN_UNIT (rli->t)); |
fe352cf1 | 1599 | |
6bf97f82 | 1600 | if (TREE_CONSTANT (unpadded_size) |
d1251492 | 1601 | && simple_cst_equal (unpadded_size, TYPE_SIZE (rli->t)) == 0 |
1602 | && input_location != BUILTINS_LOCATION) | |
6bf97f82 | 1603 | warning (OPT_Wpadded, "padding struct size to alignment boundary"); |
40734805 | 1604 | |
02e7a332 | 1605 | if (warn_packed && TREE_CODE (rli->t) == RECORD_TYPE |
1606 | && TYPE_PACKED (rli->t) && ! rli->packed_maybe_necessary | |
1607 | && TREE_CONSTANT (unpadded_size)) | |
fca12917 | 1608 | { |
1609 | tree unpacked_size; | |
fe352cf1 | 1610 | |
fca12917 | 1611 | #ifdef ROUND_TYPE_ALIGN |
99f4e085 | 1612 | rli->unpacked_align |
1613 | = ROUND_TYPE_ALIGN (rli->t, TYPE_ALIGN (rli->t), rli->unpacked_align); | |
fca12917 | 1614 | #else |
99f4e085 | 1615 | rli->unpacked_align = MAX (TYPE_ALIGN (rli->t), rli->unpacked_align); |
fca12917 | 1616 | #endif |
02e7a332 | 1617 | |
178825bb | 1618 | unpacked_size = round_up (TYPE_SIZE (rli->t), rli->unpacked_align); |
99f4e085 | 1619 | if (simple_cst_equal (unpacked_size, TYPE_SIZE (rli->t))) |
fca12917 | 1620 | { |
99f4e085 | 1621 | if (TYPE_NAME (rli->t)) |
fca12917 | 1622 | { |
abd3e6b5 | 1623 | tree name; |
fe352cf1 | 1624 | |
99f4e085 | 1625 | if (TREE_CODE (TYPE_NAME (rli->t)) == IDENTIFIER_NODE) |
abd3e6b5 | 1626 | name = TYPE_NAME (rli->t); |
fca12917 | 1627 | else |
abd3e6b5 | 1628 | name = DECL_NAME (TYPE_NAME (rli->t)); |
02e7a332 | 1629 | |
fca12917 | 1630 | if (STRICT_ALIGNMENT) |
6bf97f82 | 1631 | warning (OPT_Wpacked, "packed attribute causes inefficient " |
abd3e6b5 | 1632 | "alignment for %qE", name); |
fca12917 | 1633 | else |
6bf97f82 | 1634 | warning (OPT_Wpacked, |
abd3e6b5 | 1635 | "packed attribute is unnecessary for %qE", name); |
fca12917 | 1636 | } |
1637 | else | |
1638 | { | |
1639 | if (STRICT_ALIGNMENT) | |
6bf97f82 | 1640 | warning (OPT_Wpacked, |
9b2d6d13 | 1641 | "packed attribute causes inefficient alignment"); |
fca12917 | 1642 | else |
6bf97f82 | 1643 | warning (OPT_Wpacked, "packed attribute is unnecessary"); |
fca12917 | 1644 | } |
1645 | } | |
fca12917 | 1646 | } |
99f4e085 | 1647 | } |
1648 | ||
1649 | /* Compute the TYPE_MODE for the TYPE (which is a RECORD_TYPE). */ | |
f2cfea4a | 1650 | |
9ac9c432 | 1651 | void |
60b8c5b3 | 1652 | compute_record_mode (tree type) |
99f4e085 | 1653 | { |
02e7a332 | 1654 | tree field; |
1655 | enum machine_mode mode = VOIDmode; | |
1656 | ||
99f4e085 | 1657 | /* Most RECORD_TYPEs have BLKmode, so we start off assuming that. |
1658 | However, if possible, we use a mode that fits in a register | |
1659 | instead, in order to allow for better optimization down the | |
1660 | line. */ | |
342ad2d6 | 1661 | SET_TYPE_MODE (type, BLKmode); |
99f4e085 | 1662 | |
e913b5cd | 1663 | if (! tree_fits_uhwi_p (TYPE_SIZE (type))) |
02e7a332 | 1664 | return; |
99f4e085 | 1665 | |
02e7a332 | 1666 | /* A record which has any BLKmode members must itself be |
1667 | BLKmode; it can't go in a register. Unless the member is | |
1668 | BLKmode only because it isn't aligned. */ | |
1767a056 | 1669 | for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field)) |
02e7a332 | 1670 | { |
02e7a332 | 1671 | if (TREE_CODE (field) != FIELD_DECL) |
1672 | continue; | |
99f4e085 | 1673 | |
02e7a332 | 1674 | if (TREE_CODE (TREE_TYPE (field)) == ERROR_MARK |
1675 | || (TYPE_MODE (TREE_TYPE (field)) == BLKmode | |
0e9fefce | 1676 | && ! TYPE_NO_FORCE_BLK (TREE_TYPE (field)) |
1677 | && !(TYPE_SIZE (TREE_TYPE (field)) != 0 | |
1678 | && integer_zerop (TYPE_SIZE (TREE_TYPE (field))))) | |
e913b5cd | 1679 | || ! tree_fits_uhwi_p (bit_position (field)) |
0b241033 | 1680 | || DECL_SIZE (field) == 0 |
e913b5cd | 1681 | || ! tree_fits_uhwi_p (DECL_SIZE (field))) |
02e7a332 | 1682 | return; |
1683 | ||
02e7a332 | 1684 | /* If this field is the whole struct, remember its mode so |
1685 | that, say, we can put a double in a class into a DF | |
b708a05c | 1686 | register instead of forcing it to live in the stack. */ |
1687 | if (simple_cst_equal (TYPE_SIZE (type), DECL_SIZE (field))) | |
02e7a332 | 1688 | mode = DECL_MODE (field); |
99f4e085 | 1689 | |
f91ed644 | 1690 | /* With some targets, it is sub-optimal to access an aligned |
1691 | BLKmode structure as a scalar. */ | |
1692 | if (targetm.member_type_forces_blk (field, mode)) | |
02e7a332 | 1693 | return; |
02e7a332 | 1694 | } |
99f4e085 | 1695 | |
aedd07a7 | 1696 | /* If we only have one real field; use its mode if that mode's size |
1697 | matches the type's size. This only applies to RECORD_TYPE. This | |
1698 | does not apply to unions. */ | |
1699 | if (TREE_CODE (type) == RECORD_TYPE && mode != VOIDmode | |
e913b5cd | 1700 | && tree_fits_uhwi_p (TYPE_SIZE (type)) |
1701 | && GET_MODE_BITSIZE (mode) == tree_to_uhwi (TYPE_SIZE (type))) | |
342ad2d6 | 1702 | SET_TYPE_MODE (type, mode); |
c0d93be8 | 1703 | else |
342ad2d6 | 1704 | SET_TYPE_MODE (type, mode_for_size_tree (TYPE_SIZE (type), MODE_INT, 1)); |
02e7a332 | 1705 | |
1706 | /* If structure's known alignment is less than what the scalar | |
1707 | mode would need, and it matters, then stick with BLKmode. */ | |
1708 | if (TYPE_MODE (type) != BLKmode | |
1709 | && STRICT_ALIGNMENT | |
1710 | && ! (TYPE_ALIGN (type) >= BIGGEST_ALIGNMENT | |
1711 | || TYPE_ALIGN (type) >= GET_MODE_ALIGNMENT (TYPE_MODE (type)))) | |
1712 | { | |
1713 | /* If this is the only reason this type is BLKmode, then | |
1714 | don't force containing types to be BLKmode. */ | |
1715 | TYPE_NO_FORCE_BLK (type) = 1; | |
342ad2d6 | 1716 | SET_TYPE_MODE (type, BLKmode); |
99f4e085 | 1717 | } |
f2cfea4a | 1718 | } |
99f4e085 | 1719 | |
1720 | /* Compute TYPE_SIZE and TYPE_ALIGN for TYPE, once it has been laid | |
1721 | out. */ | |
1722 | ||
1723 | static void | |
60b8c5b3 | 1724 | finalize_type_size (tree type) |
99f4e085 | 1725 | { |
1726 | /* Normally, use the alignment corresponding to the mode chosen. | |
1727 | However, where strict alignment is not required, avoid | |
1728 | over-aligning structures, since most compilers do not do this | |
d1b5d503 | 1729 | alignment. */ |
99f4e085 | 1730 | |
1731 | if (TYPE_MODE (type) != BLKmode && TYPE_MODE (type) != VOIDmode | |
d1b5d503 | 1732 | && (STRICT_ALIGNMENT |
99f4e085 | 1733 | || (TREE_CODE (type) != RECORD_TYPE && TREE_CODE (type) != UNION_TYPE |
1734 | && TREE_CODE (type) != QUAL_UNION_TYPE | |
1735 | && TREE_CODE (type) != ARRAY_TYPE))) | |
aca14577 | 1736 | { |
d1b5d503 | 1737 | unsigned mode_align = GET_MODE_ALIGNMENT (TYPE_MODE (type)); |
1738 | ||
1739 | /* Don't override a larger alignment requirement coming from a user | |
1740 | alignment of one of the fields. */ | |
1741 | if (mode_align >= TYPE_ALIGN (type)) | |
1742 | { | |
1743 | TYPE_ALIGN (type) = mode_align; | |
1744 | TYPE_USER_ALIGN (type) = 0; | |
1745 | } | |
aca14577 | 1746 | } |
99f4e085 | 1747 | |
1748 | /* Do machine-dependent extra alignment. */ | |
1749 | #ifdef ROUND_TYPE_ALIGN | |
1750 | TYPE_ALIGN (type) | |
1751 | = ROUND_TYPE_ALIGN (type, TYPE_ALIGN (type), BITS_PER_UNIT); | |
1752 | #endif | |
1753 | ||
99f4e085 | 1754 | /* If we failed to find a simple way to calculate the unit size |
02e7a332 | 1755 | of the type, find it by division. */ |
99f4e085 | 1756 | if (TYPE_SIZE_UNIT (type) == 0 && TYPE_SIZE (type) != 0) |
1757 | /* TYPE_SIZE (type) is computed in bitsizetype. After the division, the | |
1758 | result will fit in sizetype. We will get more efficient code using | |
1759 | sizetype, so we force a conversion. */ | |
1760 | TYPE_SIZE_UNIT (type) | |
5d7ed6c7 | 1761 | = fold_convert (sizetype, |
1762 | size_binop (FLOOR_DIV_EXPR, TYPE_SIZE (type), | |
1763 | bitsize_unit_node)); | |
99f4e085 | 1764 | |
02e7a332 | 1765 | if (TYPE_SIZE (type) != 0) |
1766 | { | |
178825bb | 1767 | TYPE_SIZE (type) = round_up (TYPE_SIZE (type), TYPE_ALIGN (type)); |
1768 | TYPE_SIZE_UNIT (type) | |
1769 | = round_up (TYPE_SIZE_UNIT (type), TYPE_ALIGN_UNIT (type)); | |
02e7a332 | 1770 | } |
1771 | ||
1772 | /* Evaluate nonconstant sizes only once, either now or as soon as safe. */ | |
1773 | if (TYPE_SIZE (type) != 0 && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST) | |
1774 | TYPE_SIZE (type) = variable_size (TYPE_SIZE (type)); | |
99f4e085 | 1775 | if (TYPE_SIZE_UNIT (type) != 0 |
1776 | && TREE_CODE (TYPE_SIZE_UNIT (type)) != INTEGER_CST) | |
1777 | TYPE_SIZE_UNIT (type) = variable_size (TYPE_SIZE_UNIT (type)); | |
1778 | ||
1779 | /* Also layout any other variants of the type. */ | |
1780 | if (TYPE_NEXT_VARIANT (type) | |
1781 | || type != TYPE_MAIN_VARIANT (type)) | |
1782 | { | |
1783 | tree variant; | |
1784 | /* Record layout info of this variant. */ | |
1785 | tree size = TYPE_SIZE (type); | |
1786 | tree size_unit = TYPE_SIZE_UNIT (type); | |
1787 | unsigned int align = TYPE_ALIGN (type); | |
aca14577 | 1788 | unsigned int user_align = TYPE_USER_ALIGN (type); |
99f4e085 | 1789 | enum machine_mode mode = TYPE_MODE (type); |
1790 | ||
1791 | /* Copy it into all variants. */ | |
1792 | for (variant = TYPE_MAIN_VARIANT (type); | |
1793 | variant != 0; | |
1794 | variant = TYPE_NEXT_VARIANT (variant)) | |
1795 | { | |
1796 | TYPE_SIZE (variant) = size; | |
1797 | TYPE_SIZE_UNIT (variant) = size_unit; | |
1798 | TYPE_ALIGN (variant) = align; | |
aca14577 | 1799 | TYPE_USER_ALIGN (variant) = user_align; |
342ad2d6 | 1800 | SET_TYPE_MODE (variant, mode); |
99f4e085 | 1801 | } |
1802 | } | |
1803 | } | |
1804 | ||
8d8a34f9 | 1805 | /* Return a new underlying object for a bitfield started with FIELD. */ |
1806 | ||
1807 | static tree | |
1808 | start_bitfield_representative (tree field) | |
1809 | { | |
1810 | tree repr = make_node (FIELD_DECL); | |
1811 | DECL_FIELD_OFFSET (repr) = DECL_FIELD_OFFSET (field); | |
1812 | /* Force the representative to begin at a BITS_PER_UNIT aligned | |
1813 | boundary - C++ may use tail-padding of a base object to | |
1814 | continue packing bits so the bitfield region does not start | |
1815 | at bit zero (see g++.dg/abi/bitfield5.C for example). | |
1816 | Unallocated bits may happen for other reasons as well, | |
1817 | for example Ada which allows explicit bit-granular structure layout. */ | |
1818 | DECL_FIELD_BIT_OFFSET (repr) | |
1819 | = size_binop (BIT_AND_EXPR, | |
1820 | DECL_FIELD_BIT_OFFSET (field), | |
1821 | bitsize_int (~(BITS_PER_UNIT - 1))); | |
1822 | SET_DECL_OFFSET_ALIGN (repr, DECL_OFFSET_ALIGN (field)); | |
1823 | DECL_SIZE (repr) = DECL_SIZE (field); | |
1824 | DECL_SIZE_UNIT (repr) = DECL_SIZE_UNIT (field); | |
1825 | DECL_PACKED (repr) = DECL_PACKED (field); | |
1826 | DECL_CONTEXT (repr) = DECL_CONTEXT (field); | |
1827 | return repr; | |
1828 | } | |
1829 | ||
1830 | /* Finish up a bitfield group that was started by creating the underlying | |
1831 | object REPR with the last field in the bitfield group FIELD. */ | |
1832 | ||
1833 | static void | |
1834 | finish_bitfield_representative (tree repr, tree field) | |
1835 | { | |
1836 | unsigned HOST_WIDE_INT bitsize, maxbitsize; | |
1837 | enum machine_mode mode; | |
1838 | tree nextf, size; | |
1839 | ||
1840 | size = size_diffop (DECL_FIELD_OFFSET (field), | |
1841 | DECL_FIELD_OFFSET (repr)); | |
e913b5cd | 1842 | gcc_assert (tree_fits_uhwi_p (size)); |
1843 | bitsize = (tree_to_uhwi (size) * BITS_PER_UNIT | |
1844 | + tree_to_uhwi (DECL_FIELD_BIT_OFFSET (field)) | |
1845 | - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr)) | |
1846 | + tree_to_uhwi (DECL_SIZE (field))); | |
8d8a34f9 | 1847 | |
75188dc6 | 1848 | /* Round up bitsize to multiples of BITS_PER_UNIT. */ |
1849 | bitsize = (bitsize + BITS_PER_UNIT - 1) & ~(BITS_PER_UNIT - 1); | |
1850 | ||
8d8a34f9 | 1851 | /* Now nothing tells us how to pad out bitsize ... */ |
1852 | nextf = DECL_CHAIN (field); | |
1853 | while (nextf && TREE_CODE (nextf) != FIELD_DECL) | |
1854 | nextf = DECL_CHAIN (nextf); | |
1855 | if (nextf) | |
1856 | { | |
1857 | tree maxsize; | |
9d75589a | 1858 | /* If there was an error, the field may be not laid out |
8d8a34f9 | 1859 | correctly. Don't bother to do anything. */ |
1860 | if (TREE_TYPE (nextf) == error_mark_node) | |
1861 | return; | |
1862 | maxsize = size_diffop (DECL_FIELD_OFFSET (nextf), | |
1863 | DECL_FIELD_OFFSET (repr)); | |
e913b5cd | 1864 | if (tree_fits_uhwi_p (maxsize)) |
fa42e1a4 | 1865 | { |
e913b5cd | 1866 | maxbitsize = (tree_to_uhwi (maxsize) * BITS_PER_UNIT |
1867 | + tree_to_uhwi (DECL_FIELD_BIT_OFFSET (nextf)) | |
1868 | - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr))); | |
fa42e1a4 | 1869 | /* If the group ends within a bitfield nextf does not need to be |
1870 | aligned to BITS_PER_UNIT. Thus round up. */ | |
1871 | maxbitsize = (maxbitsize + BITS_PER_UNIT - 1) & ~(BITS_PER_UNIT - 1); | |
1872 | } | |
1873 | else | |
1874 | maxbitsize = bitsize; | |
8d8a34f9 | 1875 | } |
1876 | else | |
1877 | { | |
1878 | /* ??? If you consider that tail-padding of this struct might be | |
1879 | re-used when deriving from it we cannot really do the following | |
75188dc6 | 1880 | and thus need to set maxsize to bitsize? Also we cannot |
1881 | generally rely on maxsize to fold to an integer constant, so | |
1882 | use bitsize as fallback for this case. */ | |
8d8a34f9 | 1883 | tree maxsize = size_diffop (TYPE_SIZE_UNIT (DECL_CONTEXT (field)), |
1884 | DECL_FIELD_OFFSET (repr)); | |
e913b5cd | 1885 | if (tree_fits_uhwi_p (maxsize)) |
1886 | maxbitsize = (tree_to_uhwi (maxsize) * BITS_PER_UNIT | |
1887 | - tree_to_uhwi (DECL_FIELD_BIT_OFFSET (repr))); | |
75188dc6 | 1888 | else |
1889 | maxbitsize = bitsize; | |
8d8a34f9 | 1890 | } |
1891 | ||
1892 | /* Only if we don't artificially break up the representative in | |
1893 | the middle of a large bitfield with different possibly | |
1894 | overlapping representatives. And all representatives start | |
1895 | at byte offset. */ | |
1896 | gcc_assert (maxbitsize % BITS_PER_UNIT == 0); | |
1897 | ||
8d8a34f9 | 1898 | /* Find the smallest nice mode to use. */ |
1899 | for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode; | |
1900 | mode = GET_MODE_WIDER_MODE (mode)) | |
1901 | if (GET_MODE_BITSIZE (mode) >= bitsize) | |
1902 | break; | |
1903 | if (mode != VOIDmode | |
1904 | && (GET_MODE_BITSIZE (mode) > maxbitsize | |
1905 | || GET_MODE_BITSIZE (mode) > MAX_FIXED_MODE_SIZE)) | |
1906 | mode = VOIDmode; | |
1907 | ||
1908 | if (mode == VOIDmode) | |
1909 | { | |
1910 | /* We really want a BLKmode representative only as a last resort, | |
1911 | considering the member b in | |
1912 | struct { int a : 7; int b : 17; int c; } __attribute__((packed)); | |
1913 | Otherwise we simply want to split the representative up | |
1914 | allowing for overlaps within the bitfield region as required for | |
1915 | struct { int a : 7; int b : 7; | |
1916 | int c : 10; int d; } __attribute__((packed)); | |
1917 | [0, 15] HImode for a and b, [8, 23] HImode for c. */ | |
1918 | DECL_SIZE (repr) = bitsize_int (bitsize); | |
1919 | DECL_SIZE_UNIT (repr) = size_int (bitsize / BITS_PER_UNIT); | |
1920 | DECL_MODE (repr) = BLKmode; | |
1921 | TREE_TYPE (repr) = build_array_type_nelts (unsigned_char_type_node, | |
1922 | bitsize / BITS_PER_UNIT); | |
1923 | } | |
1924 | else | |
1925 | { | |
1926 | unsigned HOST_WIDE_INT modesize = GET_MODE_BITSIZE (mode); | |
1927 | DECL_SIZE (repr) = bitsize_int (modesize); | |
1928 | DECL_SIZE_UNIT (repr) = size_int (modesize / BITS_PER_UNIT); | |
1929 | DECL_MODE (repr) = mode; | |
1930 | TREE_TYPE (repr) = lang_hooks.types.type_for_mode (mode, 1); | |
1931 | } | |
1932 | ||
1933 | /* Remember whether the bitfield group is at the end of the | |
1934 | structure or not. */ | |
1935 | DECL_CHAIN (repr) = nextf; | |
1936 | } | |
1937 | ||
1938 | /* Compute and set FIELD_DECLs for the underlying objects we should | |
9d75589a | 1939 | use for bitfield access for the structure laid out with RLI. */ |
8d8a34f9 | 1940 | |
1941 | static void | |
1942 | finish_bitfield_layout (record_layout_info rli) | |
1943 | { | |
1944 | tree field, prev; | |
1945 | tree repr = NULL_TREE; | |
1946 | ||
1947 | /* Unions would be special, for the ease of type-punning optimizations | |
1948 | we could use the underlying type as hint for the representative | |
1949 | if the bitfield would fit and the representative would not exceed | |
1950 | the union in size. */ | |
1951 | if (TREE_CODE (rli->t) != RECORD_TYPE) | |
1952 | return; | |
1953 | ||
1954 | for (prev = NULL_TREE, field = TYPE_FIELDS (rli->t); | |
1955 | field; field = DECL_CHAIN (field)) | |
1956 | { | |
1957 | if (TREE_CODE (field) != FIELD_DECL) | |
1958 | continue; | |
1959 | ||
1960 | /* In the C++ memory model, consecutive bit fields in a structure are | |
1961 | considered one memory location and updating a memory location | |
1962 | may not store into adjacent memory locations. */ | |
1963 | if (!repr | |
1964 | && DECL_BIT_FIELD_TYPE (field)) | |
1965 | { | |
1966 | /* Start new representative. */ | |
1967 | repr = start_bitfield_representative (field); | |
1968 | } | |
1969 | else if (repr | |
1970 | && ! DECL_BIT_FIELD_TYPE (field)) | |
1971 | { | |
1972 | /* Finish off new representative. */ | |
1973 | finish_bitfield_representative (repr, prev); | |
1974 | repr = NULL_TREE; | |
1975 | } | |
1976 | else if (DECL_BIT_FIELD_TYPE (field)) | |
1977 | { | |
fa42e1a4 | 1978 | gcc_assert (repr != NULL_TREE); |
1979 | ||
8d8a34f9 | 1980 | /* Zero-size bitfields finish off a representative and |
1981 | do not have a representative themselves. This is | |
1982 | required by the C++ memory model. */ | |
1983 | if (integer_zerop (DECL_SIZE (field))) | |
1984 | { | |
1985 | finish_bitfield_representative (repr, prev); | |
1986 | repr = NULL_TREE; | |
1987 | } | |
fa42e1a4 | 1988 | |
1989 | /* We assume that either DECL_FIELD_OFFSET of the representative | |
1990 | and each bitfield member is a constant or they are equal. | |
1991 | This is because we need to be able to compute the bit-offset | |
1992 | of each field relative to the representative in get_bit_range | |
1993 | during RTL expansion. | |
1994 | If these constraints are not met, simply force a new | |
1995 | representative to be generated. That will at most | |
1996 | generate worse code but still maintain correctness with | |
1997 | respect to the C++ memory model. */ | |
e913b5cd | 1998 | else if (!((tree_fits_uhwi_p (DECL_FIELD_OFFSET (repr)) |
1999 | && tree_fits_uhwi_p (DECL_FIELD_OFFSET (field))) | |
fa42e1a4 | 2000 | || operand_equal_p (DECL_FIELD_OFFSET (repr), |
2001 | DECL_FIELD_OFFSET (field), 0))) | |
2002 | { | |
2003 | finish_bitfield_representative (repr, prev); | |
2004 | repr = start_bitfield_representative (field); | |
2005 | } | |
8d8a34f9 | 2006 | } |
2007 | else | |
2008 | continue; | |
2009 | ||
2010 | if (repr) | |
2011 | DECL_BIT_FIELD_REPRESENTATIVE (field) = repr; | |
2012 | ||
2013 | prev = field; | |
2014 | } | |
2015 | ||
2016 | if (repr) | |
2017 | finish_bitfield_representative (repr, prev); | |
2018 | } | |
2019 | ||
99f4e085 | 2020 | /* Do all of the work required to layout the type indicated by RLI, |
2021 | once the fields have been laid out. This function will call `free' | |
23ed74d8 | 2022 | for RLI, unless FREE_P is false. Passing a value other than false |
2023 | for FREE_P is bad practice; this option only exists to support the | |
2024 | G++ 3.2 ABI. */ | |
99f4e085 | 2025 | |
2026 | void | |
60b8c5b3 | 2027 | finish_record_layout (record_layout_info rli, int free_p) |
99f4e085 | 2028 | { |
2bc7da70 | 2029 | tree variant; |
2030 | ||
02e7a332 | 2031 | /* Compute the final size. */ |
2032 | finalize_record_size (rli); | |
2033 | ||
2034 | /* Compute the TYPE_MODE for the record. */ | |
2035 | compute_record_mode (rli->t); | |
83675f44 | 2036 | |
48fdacd0 | 2037 | /* Perform any last tweaks to the TYPE_SIZE, etc. */ |
2038 | finalize_type_size (rli->t); | |
2039 | ||
8d8a34f9 | 2040 | /* Compute bitfield representatives. */ |
2041 | finish_bitfield_layout (rli); | |
2042 | ||
2bc7da70 | 2043 | /* Propagate TYPE_PACKED to variants. With C++ templates, |
2044 | handle_packed_attribute is too early to do this. */ | |
2045 | for (variant = TYPE_NEXT_VARIANT (rli->t); variant; | |
2046 | variant = TYPE_NEXT_VARIANT (variant)) | |
2047 | TYPE_PACKED (variant) = TYPE_PACKED (rli->t); | |
2048 | ||
99f4e085 | 2049 | /* Lay out any static members. This is done now because their type |
2050 | may use the record's type. */ | |
f1f41a6c | 2051 | while (!vec_safe_is_empty (rli->pending_statics)) |
2052 | layout_decl (rli->pending_statics->pop (), 0); | |
83675f44 | 2053 | |
99f4e085 | 2054 | /* Clean up. */ |
23ed74d8 | 2055 | if (free_p) |
364ba361 | 2056 | { |
f1f41a6c | 2057 | vec_free (rli->pending_statics); |
364ba361 | 2058 | free (rli); |
2059 | } | |
99f4e085 | 2060 | } |
f2cfea4a | 2061 | \f |
805e22b2 | 2062 | |
2063 | /* Finish processing a builtin RECORD_TYPE type TYPE. It's name is | |
2064 | NAME, its fields are chained in reverse on FIELDS. | |
2065 | ||
2066 | If ALIGN_TYPE is non-null, it is given the same alignment as | |
2067 | ALIGN_TYPE. */ | |
2068 | ||
2069 | void | |
60b8c5b3 | 2070 | finish_builtin_struct (tree type, const char *name, tree fields, |
2071 | tree align_type) | |
805e22b2 | 2072 | { |
f2332b21 | 2073 | tree tail, next; |
805e22b2 | 2074 | |
2075 | for (tail = NULL_TREE; fields; tail = fields, fields = next) | |
2076 | { | |
2077 | DECL_FIELD_CONTEXT (fields) = type; | |
1767a056 | 2078 | next = DECL_CHAIN (fields); |
2079 | DECL_CHAIN (fields) = tail; | |
805e22b2 | 2080 | } |
2081 | TYPE_FIELDS (type) = tail; | |
2082 | ||
2083 | if (align_type) | |
2084 | { | |
2085 | TYPE_ALIGN (type) = TYPE_ALIGN (align_type); | |
2086 | TYPE_USER_ALIGN (type) = TYPE_USER_ALIGN (align_type); | |
2087 | } | |
2088 | ||
2089 | layout_type (type); | |
2090 | #if 0 /* not yet, should get fixed properly later */ | |
2091 | TYPE_NAME (type) = make_type_decl (get_identifier (name), type); | |
2092 | #else | |
e60a6f7b | 2093 | TYPE_NAME (type) = build_decl (BUILTINS_LOCATION, |
2094 | TYPE_DECL, get_identifier (name), type); | |
805e22b2 | 2095 | #endif |
2096 | TYPE_STUB_DECL (type) = TYPE_NAME (type); | |
2097 | layout_decl (TYPE_NAME (type), 0); | |
2098 | } | |
2099 | ||
f2cfea4a | 2100 | /* Calculate the mode, size, and alignment for TYPE. |
2101 | For an array type, calculate the element separation as well. | |
2102 | Record TYPE on the chain of permanent or temporary types | |
2103 | so that dbxout will find out about it. | |
2104 | ||
2105 | TYPE_SIZE of a type is nonzero if the type has been laid out already. | |
2106 | layout_type does nothing on such a type. | |
2107 | ||
2108 | If the type is incomplete, its TYPE_SIZE remains zero. */ | |
2109 | ||
2110 | void | |
60b8c5b3 | 2111 | layout_type (tree type) |
f2cfea4a | 2112 | { |
04e579b6 | 2113 | gcc_assert (type); |
f2cfea4a | 2114 | |
4ee9c684 | 2115 | if (type == error_mark_node) |
2116 | return; | |
2117 | ||
f2cfea4a | 2118 | /* Do nothing if type has been laid out before. */ |
2119 | if (TYPE_SIZE (type)) | |
2120 | return; | |
2121 | ||
f2cfea4a | 2122 | switch (TREE_CODE (type)) |
2123 | { | |
2124 | case LANG_TYPE: | |
2125 | /* This kind of type is the responsibility | |
c3418f42 | 2126 | of the language-specific code. */ |
04e579b6 | 2127 | gcc_unreachable (); |
f2cfea4a | 2128 | |
0e3dfadd | 2129 | case BOOLEAN_TYPE: |
f2cfea4a | 2130 | case INTEGER_TYPE: |
2131 | case ENUMERAL_TYPE: | |
342ad2d6 | 2132 | SET_TYPE_MODE (type, |
2133 | smallest_mode_for_size (TYPE_PRECISION (type), MODE_INT)); | |
b278476e | 2134 | TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type))); |
cec6c892 | 2135 | TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type))); |
f2cfea4a | 2136 | break; |
2137 | ||
2138 | case REAL_TYPE: | |
342ad2d6 | 2139 | SET_TYPE_MODE (type, |
2140 | mode_for_size (TYPE_PRECISION (type), MODE_FLOAT, 0)); | |
b278476e | 2141 | TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type))); |
cec6c892 | 2142 | TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type))); |
f2cfea4a | 2143 | break; |
2144 | ||
06f0b99c | 2145 | case FIXED_POINT_TYPE: |
2146 | /* TYPE_MODE (type) has been set already. */ | |
2147 | TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type))); | |
2148 | TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type))); | |
2149 | break; | |
2150 | ||
f2cfea4a | 2151 | case COMPLEX_TYPE: |
78a8ed03 | 2152 | TYPE_UNSIGNED (type) = TYPE_UNSIGNED (TREE_TYPE (type)); |
342ad2d6 | 2153 | SET_TYPE_MODE (type, |
2154 | mode_for_size (2 * TYPE_PRECISION (TREE_TYPE (type)), | |
2155 | (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE | |
2156 | ? MODE_COMPLEX_FLOAT : MODE_COMPLEX_INT), | |
2157 | 0)); | |
b278476e | 2158 | TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type))); |
cec6c892 | 2159 | TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type))); |
f2cfea4a | 2160 | break; |
2161 | ||
8a95ab85 | 2162 | case VECTOR_TYPE: |
83e2a11b | 2163 | { |
2164 | int nunits = TYPE_VECTOR_SUBPARTS (type); | |
83e2a11b | 2165 | tree innertype = TREE_TYPE (type); |
2166 | ||
04e579b6 | 2167 | gcc_assert (!(nunits & (nunits - 1))); |
83e2a11b | 2168 | |
2169 | /* Find an appropriate mode for the vector type. */ | |
2170 | if (TYPE_MODE (type) == VOIDmode) | |
c4740c5d | 2171 | SET_TYPE_MODE (type, |
2172 | mode_for_vector (TYPE_MODE (innertype), nunits)); | |
83e2a11b | 2173 | |
06f0b99c | 2174 | TYPE_SATURATING (type) = TYPE_SATURATING (TREE_TYPE (type)); |
83e2a11b | 2175 | TYPE_UNSIGNED (type) = TYPE_UNSIGNED (TREE_TYPE (type)); |
2176 | TYPE_SIZE_UNIT (type) = int_const_binop (MULT_EXPR, | |
2177 | TYPE_SIZE_UNIT (innertype), | |
317e2a67 | 2178 | size_int (nunits)); |
83e2a11b | 2179 | TYPE_SIZE (type) = int_const_binop (MULT_EXPR, TYPE_SIZE (innertype), |
317e2a67 | 2180 | bitsize_int (nunits)); |
b74c835a | 2181 | |
482a44fa | 2182 | /* For vector types, we do not default to the mode's alignment. |
2183 | Instead, query a target hook, defaulting to natural alignment. | |
2184 | This prevents ABI changes depending on whether or not native | |
2185 | vector modes are supported. */ | |
2186 | TYPE_ALIGN (type) = targetm.vector_alignment (type); | |
2187 | ||
2188 | /* However, if the underlying mode requires a bigger alignment than | |
2189 | what the target hook provides, we cannot use the mode. For now, | |
2190 | simply reject that case. */ | |
2191 | gcc_assert (TYPE_ALIGN (type) | |
2192 | >= GET_MODE_ALIGNMENT (TYPE_MODE (type))); | |
83e2a11b | 2193 | break; |
2194 | } | |
8a95ab85 | 2195 | |
f2cfea4a | 2196 | case VOID_TYPE: |
02e7a332 | 2197 | /* This is an incomplete type and so doesn't have a size. */ |
f2cfea4a | 2198 | TYPE_ALIGN (type) = 1; |
aca14577 | 2199 | TYPE_USER_ALIGN (type) = 0; |
342ad2d6 | 2200 | SET_TYPE_MODE (type, VOIDmode); |
f2cfea4a | 2201 | break; |
2202 | ||
e23958d4 | 2203 | case OFFSET_TYPE: |
b278476e | 2204 | TYPE_SIZE (type) = bitsize_int (POINTER_SIZE); |
cec6c892 | 2205 | TYPE_SIZE_UNIT (type) = size_int (POINTER_SIZE / BITS_PER_UNIT); |
60bf59a4 | 2206 | /* A pointer might be MODE_PARTIAL_INT, |
2207 | but ptrdiff_t must be integral. */ | |
342ad2d6 | 2208 | SET_TYPE_MODE (type, mode_for_size (POINTER_SIZE, MODE_INT, 0)); |
98155838 | 2209 | TYPE_PRECISION (type) = POINTER_SIZE; |
e23958d4 | 2210 | break; |
2211 | ||
f2cfea4a | 2212 | case FUNCTION_TYPE: |
2213 | case METHOD_TYPE: | |
4812cab0 | 2214 | /* It's hard to see what the mode and size of a function ought to |
2215 | be, but we do know the alignment is FUNCTION_BOUNDARY, so | |
2216 | make it consistent with that. */ | |
342ad2d6 | 2217 | SET_TYPE_MODE (type, mode_for_size (FUNCTION_BOUNDARY, MODE_INT, 0)); |
4812cab0 | 2218 | TYPE_SIZE (type) = bitsize_int (FUNCTION_BOUNDARY); |
2219 | TYPE_SIZE_UNIT (type) = size_int (FUNCTION_BOUNDARY / BITS_PER_UNIT); | |
f2cfea4a | 2220 | break; |
2221 | ||
2222 | case POINTER_TYPE: | |
2223 | case REFERENCE_TYPE: | |
f1986931 | 2224 | { |
98155838 | 2225 | enum machine_mode mode = TYPE_MODE (type); |
2226 | if (TREE_CODE (type) == REFERENCE_TYPE && reference_types_internal) | |
2227 | { | |
2228 | addr_space_t as = TYPE_ADDR_SPACE (TREE_TYPE (type)); | |
2229 | mode = targetm.addr_space.address_mode (as); | |
2230 | } | |
805e22b2 | 2231 | |
98155838 | 2232 | TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (mode)); |
805e22b2 | 2233 | TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (mode)); |
78a8ed03 | 2234 | TYPE_UNSIGNED (type) = 1; |
98155838 | 2235 | TYPE_PRECISION (type) = GET_MODE_BITSIZE (mode); |
f1986931 | 2236 | } |
f2cfea4a | 2237 | break; |
2238 | ||
2239 | case ARRAY_TYPE: | |
2240 | { | |
19cb6b50 | 2241 | tree index = TYPE_DOMAIN (type); |
2242 | tree element = TREE_TYPE (type); | |
f2cfea4a | 2243 | |
2244 | build_pointer_type (element); | |
2245 | ||
2246 | /* We need to know both bounds in order to compute the size. */ | |
2247 | if (index && TYPE_MAX_VALUE (index) && TYPE_MIN_VALUE (index) | |
2248 | && TYPE_SIZE (element)) | |
2249 | { | |
bc97b18f | 2250 | tree ub = TYPE_MAX_VALUE (index); |
2251 | tree lb = TYPE_MIN_VALUE (index); | |
f00a8c41 | 2252 | tree element_size = TYPE_SIZE (element); |
41e112e6 | 2253 | tree length; |
2254 | ||
820fcceb | 2255 | /* Make sure that an array of zero-sized element is zero-sized |
2256 | regardless of its extent. */ | |
2257 | if (integer_zerop (element_size)) | |
2258 | length = size_zero_node; | |
2259 | ||
7542c3b4 | 2260 | /* The computation should happen in the original signedness so |
2261 | that (possible) negative values are handled appropriately | |
2262 | when determining overflow. */ | |
820fcceb | 2263 | else |
85d86b55 | 2264 | { |
2265 | /* ??? When it is obvious that the range is signed | |
2266 | represent it using ssizetype. */ | |
2267 | if (TREE_CODE (lb) == INTEGER_CST | |
2268 | && TREE_CODE (ub) == INTEGER_CST | |
2269 | && TYPE_UNSIGNED (TREE_TYPE (lb)) | |
2270 | && tree_int_cst_lt (ub, lb)) | |
2271 | { | |
796b6678 | 2272 | lb = wide_int_to_tree (ssizetype, |
5de9d3ed | 2273 | offset_int::from (lb, SIGNED)); |
796b6678 | 2274 | ub = wide_int_to_tree (ssizetype, |
5de9d3ed | 2275 | offset_int::from (ub, SIGNED)); |
85d86b55 | 2276 | } |
2277 | length | |
2278 | = fold_convert (sizetype, | |
2279 | size_binop (PLUS_EXPR, | |
2280 | build_int_cst (TREE_TYPE (lb), 1), | |
2281 | size_binop (MINUS_EXPR, ub, lb))); | |
2282 | } | |
2283 | ||
97658fc9 | 2284 | /* ??? We have no way to distinguish a null-sized array from an |
2285 | array spanning the whole sizetype range, so we arbitrarily | |
2286 | decide that [0, -1] is the only valid representation. */ | |
85d86b55 | 2287 | if (integer_zerop (length) |
97658fc9 | 2288 | && TREE_OVERFLOW (length) |
2289 | && integer_zerop (lb)) | |
85d86b55 | 2290 | length = size_zero_node; |
f2cfea4a | 2291 | |
902de8ed | 2292 | TYPE_SIZE (type) = size_binop (MULT_EXPR, element_size, |
7bd4091f | 2293 | fold_convert (bitsizetype, |
5d7ed6c7 | 2294 | length)); |
cec6c892 | 2295 | |
f00a8c41 | 2296 | /* If we know the size of the element, calculate the total size |
2297 | directly, rather than do some division thing below. This | |
2298 | optimization helps Fortran assumed-size arrays (where the | |
2299 | size of the array is determined at runtime) substantially. */ | |
2300 | if (TYPE_SIZE_UNIT (element)) | |
083a2b5e | 2301 | TYPE_SIZE_UNIT (type) |
2302 | = size_binop (MULT_EXPR, TYPE_SIZE_UNIT (element), length); | |
f2cfea4a | 2303 | } |
2304 | ||
2305 | /* Now round the alignment and size, | |
2306 | using machine-dependent criteria if any. */ | |
2307 | ||
2308 | #ifdef ROUND_TYPE_ALIGN | |
2309 | TYPE_ALIGN (type) | |
2310 | = ROUND_TYPE_ALIGN (type, TYPE_ALIGN (element), BITS_PER_UNIT); | |
2311 | #else | |
2312 | TYPE_ALIGN (type) = MAX (TYPE_ALIGN (element), BITS_PER_UNIT); | |
2313 | #endif | |
e6475517 | 2314 | TYPE_USER_ALIGN (type) = TYPE_USER_ALIGN (element); |
342ad2d6 | 2315 | SET_TYPE_MODE (type, BLKmode); |
f2cfea4a | 2316 | if (TYPE_SIZE (type) != 0 |
f91ed644 | 2317 | && ! targetm.member_type_forces_blk (type, VOIDmode) |
f2cfea4a | 2318 | /* BLKmode elements force BLKmode aggregate; |
2319 | else extract/store fields may lose. */ | |
2320 | && (TYPE_MODE (TREE_TYPE (type)) != BLKmode | |
2321 | || TYPE_NO_FORCE_BLK (TREE_TYPE (type)))) | |
2322 | { | |
13d3ceb9 | 2323 | SET_TYPE_MODE (type, mode_for_array (TREE_TYPE (type), |
2324 | TYPE_SIZE (type))); | |
0fc6aef1 | 2325 | if (TYPE_MODE (type) != BLKmode |
2326 | && STRICT_ALIGNMENT && TYPE_ALIGN (type) < BIGGEST_ALIGNMENT | |
a2ee4f78 | 2327 | && TYPE_ALIGN (type) < GET_MODE_ALIGNMENT (TYPE_MODE (type))) |
f2cfea4a | 2328 | { |
2329 | TYPE_NO_FORCE_BLK (type) = 1; | |
342ad2d6 | 2330 | SET_TYPE_MODE (type, BLKmode); |
f2cfea4a | 2331 | } |
f2cfea4a | 2332 | } |
e1b062ae | 2333 | /* When the element size is constant, check that it is at least as |
2334 | large as the element alignment. */ | |
b3bb0d2d | 2335 | if (TYPE_SIZE_UNIT (element) |
2336 | && TREE_CODE (TYPE_SIZE_UNIT (element)) == INTEGER_CST | |
e1b062ae | 2337 | /* If TYPE_SIZE_UNIT overflowed, then it is certainly larger than |
2338 | TYPE_ALIGN_UNIT. */ | |
f96bd2bf | 2339 | && !TREE_OVERFLOW (TYPE_SIZE_UNIT (element)) |
b3bb0d2d | 2340 | && !integer_zerop (TYPE_SIZE_UNIT (element)) |
2341 | && compare_tree_int (TYPE_SIZE_UNIT (element), | |
2342 | TYPE_ALIGN_UNIT (element)) < 0) | |
2343 | error ("alignment of array elements is greater than element size"); | |
f2cfea4a | 2344 | break; |
2345 | } | |
2346 | ||
2347 | case RECORD_TYPE: | |
83675f44 | 2348 | case UNION_TYPE: |
2349 | case QUAL_UNION_TYPE: | |
99f4e085 | 2350 | { |
2351 | tree field; | |
2352 | record_layout_info rli; | |
2353 | ||
2354 | /* Initialize the layout information. */ | |
02e7a332 | 2355 | rli = start_record_layout (type); |
2356 | ||
83675f44 | 2357 | /* If this is a QUAL_UNION_TYPE, we want to process the fields |
2358 | in the reverse order in building the COND_EXPR that denotes | |
2359 | its size. We reverse them again later. */ | |
2360 | if (TREE_CODE (type) == QUAL_UNION_TYPE) | |
2361 | TYPE_FIELDS (type) = nreverse (TYPE_FIELDS (type)); | |
02e7a332 | 2362 | |
2363 | /* Place all the fields. */ | |
1767a056 | 2364 | for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field)) |
02e7a332 | 2365 | place_field (rli, field); |
2366 | ||
83675f44 | 2367 | if (TREE_CODE (type) == QUAL_UNION_TYPE) |
2368 | TYPE_FIELDS (type) = nreverse (TYPE_FIELDS (type)); | |
02e7a332 | 2369 | |
99f4e085 | 2370 | /* Finish laying out the record. */ |
23ed74d8 | 2371 | finish_record_layout (rli, /*free_p=*/true); |
99f4e085 | 2372 | } |
f2cfea4a | 2373 | break; |
2374 | ||
f2cfea4a | 2375 | default: |
04e579b6 | 2376 | gcc_unreachable (); |
fe352cf1 | 2377 | } |
f2cfea4a | 2378 | |
99f4e085 | 2379 | /* Compute the final TYPE_SIZE, TYPE_ALIGN, etc. for TYPE. For |
83675f44 | 2380 | records and unions, finish_record_layout already called this |
2381 | function. */ | |
40734805 | 2382 | if (TREE_CODE (type) != RECORD_TYPE |
83675f44 | 2383 | && TREE_CODE (type) != UNION_TYPE |
2384 | && TREE_CODE (type) != QUAL_UNION_TYPE) | |
99f4e085 | 2385 | finalize_type_size (type); |
f2cfea4a | 2386 | |
b35a8f48 | 2387 | /* We should never see alias sets on incomplete aggregates. And we |
2388 | should not call layout_type on not incomplete aggregates. */ | |
2389 | if (AGGREGATE_TYPE_P (type)) | |
2390 | gcc_assert (!TYPE_ALIAS_SET_KNOWN_P (type)); | |
f2cfea4a | 2391 | } |
342ad2d6 | 2392 | |
392dee1e | 2393 | /* Return the least alignment required for type TYPE. */ |
2394 | ||
2395 | unsigned int | |
2396 | min_align_of_type (tree type) | |
2397 | { | |
2398 | unsigned int align = TYPE_ALIGN (type); | |
2399 | align = MIN (align, BIGGEST_ALIGNMENT); | |
2400 | #ifdef BIGGEST_FIELD_ALIGNMENT | |
2401 | align = MIN (align, BIGGEST_FIELD_ALIGNMENT); | |
2402 | #endif | |
2403 | unsigned int field_align = align; | |
2404 | #ifdef ADJUST_FIELD_ALIGN | |
2405 | tree field = build_decl (UNKNOWN_LOCATION, FIELD_DECL, NULL_TREE, | |
2406 | type); | |
2407 | field_align = ADJUST_FIELD_ALIGN (field, field_align); | |
2408 | ggc_free (field); | |
2409 | #endif | |
2410 | align = MIN (align, field_align); | |
2411 | return align / BITS_PER_UNIT; | |
2412 | } | |
2413 | ||
342ad2d6 | 2414 | /* Vector types need to re-check the target flags each time we report |
2415 | the machine mode. We need to do this because attribute target can | |
2416 | change the result of vector_mode_supported_p and have_regs_of_mode | |
2417 | on a per-function basis. Thus the TYPE_MODE of a VECTOR_TYPE can | |
2418 | change on a per-function basis. */ | |
48e1416a | 2419 | /* ??? Possibly a better solution is to run through all the types |
342ad2d6 | 2420 | referenced by a function and re-compute the TYPE_MODE once, rather |
2421 | than make the TYPE_MODE macro call a function. */ | |
2422 | ||
2423 | enum machine_mode | |
2424 | vector_type_mode (const_tree t) | |
2425 | { | |
2426 | enum machine_mode mode; | |
2427 | ||
2428 | gcc_assert (TREE_CODE (t) == VECTOR_TYPE); | |
2429 | ||
8f2eb9e1 | 2430 | mode = t->type_common.mode; |
342ad2d6 | 2431 | if (VECTOR_MODE_P (mode) |
2432 | && (!targetm.vector_mode_supported_p (mode) | |
2433 | || !have_regs_of_mode[mode])) | |
2434 | { | |
8f2eb9e1 | 2435 | enum machine_mode innermode = TREE_TYPE (t)->type_common.mode; |
342ad2d6 | 2436 | |
2437 | /* For integers, try mapping it to a same-sized scalar mode. */ | |
2438 | if (GET_MODE_CLASS (innermode) == MODE_INT) | |
2439 | { | |
2440 | mode = mode_for_size (TYPE_VECTOR_SUBPARTS (t) | |
2441 | * GET_MODE_BITSIZE (innermode), MODE_INT, 0); | |
2442 | ||
2443 | if (mode != VOIDmode && have_regs_of_mode[mode]) | |
2444 | return mode; | |
2445 | } | |
2446 | ||
2447 | return BLKmode; | |
2448 | } | |
2449 | ||
2450 | return mode; | |
2451 | } | |
f2cfea4a | 2452 | \f |
2453 | /* Create and return a type for signed integers of PRECISION bits. */ | |
2454 | ||
2455 | tree | |
60b8c5b3 | 2456 | make_signed_type (int precision) |
f2cfea4a | 2457 | { |
19cb6b50 | 2458 | tree type = make_node (INTEGER_TYPE); |
f2cfea4a | 2459 | |
2460 | TYPE_PRECISION (type) = precision; | |
2461 | ||
902de8ed | 2462 | fixup_signed_type (type); |
f2cfea4a | 2463 | return type; |
2464 | } | |
2465 | ||
2466 | /* Create and return a type for unsigned integers of PRECISION bits. */ | |
2467 | ||
2468 | tree | |
60b8c5b3 | 2469 | make_unsigned_type (int precision) |
f2cfea4a | 2470 | { |
19cb6b50 | 2471 | tree type = make_node (INTEGER_TYPE); |
f2cfea4a | 2472 | |
2473 | TYPE_PRECISION (type) = precision; | |
2474 | ||
f2cfea4a | 2475 | fixup_unsigned_type (type); |
2476 | return type; | |
2477 | } | |
902de8ed | 2478 | \f |
06f0b99c | 2479 | /* Create and return a type for fract of PRECISION bits, UNSIGNEDP, |
2480 | and SATP. */ | |
2481 | ||
2482 | tree | |
2483 | make_fract_type (int precision, int unsignedp, int satp) | |
2484 | { | |
2485 | tree type = make_node (FIXED_POINT_TYPE); | |
2486 | ||
2487 | TYPE_PRECISION (type) = precision; | |
2488 | ||
2489 | if (satp) | |
2490 | TYPE_SATURATING (type) = 1; | |
2491 | ||
2492 | /* Lay out the type: set its alignment, size, etc. */ | |
2493 | if (unsignedp) | |
2494 | { | |
2495 | TYPE_UNSIGNED (type) = 1; | |
342ad2d6 | 2496 | SET_TYPE_MODE (type, mode_for_size (precision, MODE_UFRACT, 0)); |
06f0b99c | 2497 | } |
2498 | else | |
342ad2d6 | 2499 | SET_TYPE_MODE (type, mode_for_size (precision, MODE_FRACT, 0)); |
06f0b99c | 2500 | layout_type (type); |
2501 | ||
2502 | return type; | |
2503 | } | |
2504 | ||
2505 | /* Create and return a type for accum of PRECISION bits, UNSIGNEDP, | |
2506 | and SATP. */ | |
2507 | ||
2508 | tree | |
2509 | make_accum_type (int precision, int unsignedp, int satp) | |
2510 | { | |
2511 | tree type = make_node (FIXED_POINT_TYPE); | |
2512 | ||
2513 | TYPE_PRECISION (type) = precision; | |
2514 | ||
2515 | if (satp) | |
2516 | TYPE_SATURATING (type) = 1; | |
2517 | ||
2518 | /* Lay out the type: set its alignment, size, etc. */ | |
2519 | if (unsignedp) | |
2520 | { | |
2521 | TYPE_UNSIGNED (type) = 1; | |
342ad2d6 | 2522 | SET_TYPE_MODE (type, mode_for_size (precision, MODE_UACCUM, 0)); |
06f0b99c | 2523 | } |
2524 | else | |
342ad2d6 | 2525 | SET_TYPE_MODE (type, mode_for_size (precision, MODE_ACCUM, 0)); |
06f0b99c | 2526 | layout_type (type); |
2527 | ||
2528 | return type; | |
2529 | } | |
2530 | ||
7907db97 | 2531 | /* Initialize sizetypes so layout_type can use them. */ |
902de8ed | 2532 | |
2533 | void | |
ad086ed4 | 2534 | initialize_sizetypes (void) |
902de8ed | 2535 | { |
7907db97 | 2536 | int precision, bprecision; |
2537 | ||
2538 | /* Get sizetypes precision from the SIZE_TYPE target macro. */ | |
748e5d45 | 2539 | if (strcmp (SIZETYPE, "unsigned int") == 0) |
7907db97 | 2540 | precision = INT_TYPE_SIZE; |
748e5d45 | 2541 | else if (strcmp (SIZETYPE, "long unsigned int") == 0) |
7907db97 | 2542 | precision = LONG_TYPE_SIZE; |
748e5d45 | 2543 | else if (strcmp (SIZETYPE, "long long unsigned int") == 0) |
7907db97 | 2544 | precision = LONG_LONG_TYPE_SIZE; |
748e5d45 | 2545 | else if (strcmp (SIZETYPE, "short unsigned int") == 0) |
621fad71 | 2546 | precision = SHORT_TYPE_SIZE; |
7907db97 | 2547 | else |
2548 | gcc_unreachable (); | |
7bd4091f | 2549 | |
7907db97 | 2550 | bprecision |
2551 | = MIN (precision + BITS_PER_UNIT_LOG + 1, MAX_FIXED_MODE_SIZE); | |
2552 | bprecision | |
2553 | = GET_MODE_PRECISION (smallest_mode_for_size (bprecision, MODE_INT)); | |
24cd46a7 | 2554 | if (bprecision > HOST_BITS_PER_DOUBLE_INT) |
2555 | bprecision = HOST_BITS_PER_DOUBLE_INT; | |
7907db97 | 2556 | |
2557 | /* Create stubs for sizetype and bitsizetype so we can create constants. */ | |
2558 | sizetype = make_node (INTEGER_TYPE); | |
f1444979 | 2559 | TYPE_NAME (sizetype) = get_identifier ("sizetype"); |
7907db97 | 2560 | TYPE_PRECISION (sizetype) = precision; |
2561 | TYPE_UNSIGNED (sizetype) = 1; | |
7907db97 | 2562 | bitsizetype = make_node (INTEGER_TYPE); |
2563 | TYPE_NAME (bitsizetype) = get_identifier ("bitsizetype"); | |
2564 | TYPE_PRECISION (bitsizetype) = bprecision; | |
2565 | TYPE_UNSIGNED (bitsizetype) = 1; | |
7907db97 | 2566 | |
2567 | /* Now layout both types manually. */ | |
2568 | SET_TYPE_MODE (sizetype, smallest_mode_for_size (precision, MODE_INT)); | |
2569 | TYPE_ALIGN (sizetype) = GET_MODE_ALIGNMENT (TYPE_MODE (sizetype)); | |
2570 | TYPE_SIZE (sizetype) = bitsize_int (precision); | |
2571 | TYPE_SIZE_UNIT (sizetype) = size_int (GET_MODE_SIZE (TYPE_MODE (sizetype))); | |
e913b5cd | 2572 | set_min_and_max_values_for_integral_type (sizetype, precision, UNSIGNED); |
7907db97 | 2573 | |
2574 | SET_TYPE_MODE (bitsizetype, smallest_mode_for_size (bprecision, MODE_INT)); | |
2575 | TYPE_ALIGN (bitsizetype) = GET_MODE_ALIGNMENT (TYPE_MODE (bitsizetype)); | |
2576 | TYPE_SIZE (bitsizetype) = bitsize_int (bprecision); | |
2577 | TYPE_SIZE_UNIT (bitsizetype) | |
2578 | = size_int (GET_MODE_SIZE (TYPE_MODE (bitsizetype))); | |
e913b5cd | 2579 | set_min_and_max_values_for_integral_type (bitsizetype, bprecision, UNSIGNED); |
e345fa3a | 2580 | |
ad086ed4 | 2581 | /* Create the signed variants of *sizetype. */ |
7907db97 | 2582 | ssizetype = make_signed_type (TYPE_PRECISION (sizetype)); |
f1444979 | 2583 | TYPE_NAME (ssizetype) = get_identifier ("ssizetype"); |
7907db97 | 2584 | sbitsizetype = make_signed_type (TYPE_PRECISION (bitsizetype)); |
f1444979 | 2585 | TYPE_NAME (sbitsizetype) = get_identifier ("sbitsizetype"); |
902de8ed | 2586 | } |
2587 | \f | |
63bf54cf | 2588 | /* TYPE is an integral type, i.e., an INTEGRAL_TYPE, ENUMERAL_TYPE |
2589 | or BOOLEAN_TYPE. Set TYPE_MIN_VALUE and TYPE_MAX_VALUE | |
ee1ab431 | 2590 | for TYPE, based on the PRECISION and whether or not the TYPE |
2591 | IS_UNSIGNED. PRECISION need not correspond to a width supported | |
2592 | natively by the hardware; for example, on a machine with 8-bit, | |
2593 | 16-bit, and 32-bit register modes, PRECISION might be 7, 23, or | |
2594 | 61. */ | |
2595 | ||
2596 | void | |
2597 | set_min_and_max_values_for_integral_type (tree type, | |
2598 | int precision, | |
e913b5cd | 2599 | signop sgn) |
ee1ab431 | 2600 | { |
0e3dfadd | 2601 | /* For bitfields with zero width we end up creating integer types |
2602 | with zero precision. Don't assign any minimum/maximum values | |
2603 | to those types, they don't have any valid value. */ | |
2604 | if (precision < 1) | |
2605 | return; | |
2606 | ||
796b6678 | 2607 | TYPE_MIN_VALUE (type) |
2608 | = wide_int_to_tree (type, wi::min_value (precision, sgn)); | |
2609 | TYPE_MAX_VALUE (type) | |
2610 | = wide_int_to_tree (type, wi::max_value (precision, sgn)); | |
ee1ab431 | 2611 | } |
2612 | ||
6d85d3bb | 2613 | /* Set the extreme values of TYPE based on its precision in bits, |
6bb1bdc7 | 2614 | then lay it out. Used when make_signed_type won't do |
6d85d3bb | 2615 | because the tree code is not INTEGER_TYPE. |
2616 | E.g. for Pascal, when the -fsigned-char option is given. */ | |
2617 | ||
2618 | void | |
60b8c5b3 | 2619 | fixup_signed_type (tree type) |
6d85d3bb | 2620 | { |
19cb6b50 | 2621 | int precision = TYPE_PRECISION (type); |
6d85d3bb | 2622 | |
e913b5cd | 2623 | set_min_and_max_values_for_integral_type (type, precision, SIGNED); |
6d85d3bb | 2624 | |
2625 | /* Lay out the type: set its alignment, size, etc. */ | |
6d85d3bb | 2626 | layout_type (type); |
2627 | } | |
2628 | ||
f2cfea4a | 2629 | /* Set the extreme values of TYPE based on its precision in bits, |
6bb1bdc7 | 2630 | then lay it out. This is used both in `make_unsigned_type' |
f2cfea4a | 2631 | and for enumeral types. */ |
2632 | ||
2633 | void | |
60b8c5b3 | 2634 | fixup_unsigned_type (tree type) |
f2cfea4a | 2635 | { |
19cb6b50 | 2636 | int precision = TYPE_PRECISION (type); |
f2cfea4a | 2637 | |
00b76131 | 2638 | TYPE_UNSIGNED (type) = 1; |
9e7454d0 | 2639 | |
e913b5cd | 2640 | set_min_and_max_values_for_integral_type (type, precision, UNSIGNED); |
f2cfea4a | 2641 | |
2642 | /* Lay out the type: set its alignment, size, etc. */ | |
f2cfea4a | 2643 | layout_type (type); |
2644 | } | |
2645 | \f | |
0a1f5755 | 2646 | /* Construct an iterator for a bitfield that spans BITSIZE bits, |
2647 | starting at BITPOS. | |
2648 | ||
2649 | BITREGION_START is the bit position of the first bit in this | |
2650 | sequence of bit fields. BITREGION_END is the last bit in this | |
2651 | sequence. If these two fields are non-zero, we should restrict the | |
2652 | memory access to that range. Otherwise, we are allowed to touch | |
2653 | any adjacent non bit-fields. | |
2654 | ||
2655 | ALIGN is the alignment of the underlying object in bits. | |
2656 | VOLATILEP says whether the bitfield is volatile. */ | |
2657 | ||
2658 | bit_field_mode_iterator | |
2659 | ::bit_field_mode_iterator (HOST_WIDE_INT bitsize, HOST_WIDE_INT bitpos, | |
2660 | HOST_WIDE_INT bitregion_start, | |
2661 | HOST_WIDE_INT bitregion_end, | |
2662 | unsigned int align, bool volatilep) | |
ae84f584 | 2663 | : m_mode (GET_CLASS_NARROWEST_MODE (MODE_INT)), m_bitsize (bitsize), |
2664 | m_bitpos (bitpos), m_bitregion_start (bitregion_start), | |
2665 | m_bitregion_end (bitregion_end), m_align (align), | |
2666 | m_volatilep (volatilep), m_count (0) | |
0a1f5755 | 2667 | { |
ae84f584 | 2668 | if (!m_bitregion_end) |
4ed6cf77 | 2669 | { |
392630d4 | 2670 | /* We can assume that any aligned chunk of ALIGN bits that overlaps |
2671 | the bitfield is mapped and won't trap, provided that ALIGN isn't | |
2672 | too large. The cap is the biggest required alignment for data, | |
2673 | or at least the word size. And force one such chunk at least. */ | |
2674 | unsigned HOST_WIDE_INT units | |
2675 | = MIN (align, MAX (BIGGEST_ALIGNMENT, BITS_PER_WORD)); | |
2676 | if (bitsize <= 0) | |
2677 | bitsize = 1; | |
ae84f584 | 2678 | m_bitregion_end = bitpos + bitsize + units - 1; |
2679 | m_bitregion_end -= m_bitregion_end % units + 1; | |
4ed6cf77 | 2680 | } |
0a1f5755 | 2681 | } |
2682 | ||
2683 | /* Calls to this function return successively larger modes that can be used | |
2684 | to represent the bitfield. Return true if another bitfield mode is | |
2685 | available, storing it in *OUT_MODE if so. */ | |
2686 | ||
2687 | bool | |
2688 | bit_field_mode_iterator::next_mode (enum machine_mode *out_mode) | |
2689 | { | |
ae84f584 | 2690 | for (; m_mode != VOIDmode; m_mode = GET_MODE_WIDER_MODE (m_mode)) |
0a1f5755 | 2691 | { |
ae84f584 | 2692 | unsigned int unit = GET_MODE_BITSIZE (m_mode); |
0a1f5755 | 2693 | |
2694 | /* Skip modes that don't have full precision. */ | |
ae84f584 | 2695 | if (unit != GET_MODE_PRECISION (m_mode)) |
0a1f5755 | 2696 | continue; |
2697 | ||
0a1f5755 | 2698 | /* Stop if the mode is too wide to handle efficiently. */ |
2699 | if (unit > MAX_FIXED_MODE_SIZE) | |
2700 | break; | |
2701 | ||
2702 | /* Don't deliver more than one multiword mode; the smallest one | |
2703 | should be used. */ | |
ae84f584 | 2704 | if (m_count > 0 && unit > BITS_PER_WORD) |
0a1f5755 | 2705 | break; |
2706 | ||
efa6660d | 2707 | /* Skip modes that are too small. */ |
ae84f584 | 2708 | unsigned HOST_WIDE_INT substart = (unsigned HOST_WIDE_INT) m_bitpos % unit; |
2709 | unsigned HOST_WIDE_INT subend = substart + m_bitsize; | |
efa6660d | 2710 | if (subend > unit) |
2711 | continue; | |
2712 | ||
0a1f5755 | 2713 | /* Stop if the mode goes outside the bitregion. */ |
ae84f584 | 2714 | HOST_WIDE_INT start = m_bitpos - substart; |
2715 | if (m_bitregion_start && start < m_bitregion_start) | |
0a1f5755 | 2716 | break; |
efa6660d | 2717 | HOST_WIDE_INT end = start + unit; |
ae84f584 | 2718 | if (end > m_bitregion_end + 1) |
4ed6cf77 | 2719 | break; |
2720 | ||
2721 | /* Stop if the mode requires too much alignment. */ | |
ae84f584 | 2722 | if (GET_MODE_ALIGNMENT (m_mode) > m_align |
2723 | && SLOW_UNALIGNED_ACCESS (m_mode, m_align)) | |
0a1f5755 | 2724 | break; |
2725 | ||
ae84f584 | 2726 | *out_mode = m_mode; |
2727 | m_mode = GET_MODE_WIDER_MODE (m_mode); | |
2728 | m_count++; | |
0a1f5755 | 2729 | return true; |
2730 | } | |
2731 | return false; | |
2732 | } | |
2733 | ||
2734 | /* Return true if smaller modes are generally preferred for this kind | |
2735 | of bitfield. */ | |
2736 | ||
2737 | bool | |
2738 | bit_field_mode_iterator::prefer_smaller_modes () | |
2739 | { | |
ae84f584 | 2740 | return (m_volatilep |
0a1f5755 | 2741 | ? targetm.narrow_volatile_bitfield () |
2742 | : !SLOW_BYTE_ACCESS); | |
2743 | } | |
2744 | ||
f2cfea4a | 2745 | /* Find the best machine mode to use when referencing a bit field of length |
2746 | BITSIZE bits starting at BITPOS. | |
2747 | ||
4bb60ec7 | 2748 | BITREGION_START is the bit position of the first bit in this |
2749 | sequence of bit fields. BITREGION_END is the last bit in this | |
2750 | sequence. If these two fields are non-zero, we should restrict the | |
0a1f5755 | 2751 | memory access to that range. Otherwise, we are allowed to touch |
4bb60ec7 | 2752 | any adjacent non bit-fields. |
2753 | ||
f2cfea4a | 2754 | The underlying object is known to be aligned to a boundary of ALIGN bits. |
2755 | If LARGEST_MODE is not VOIDmode, it means that we should not use a mode | |
2756 | larger than LARGEST_MODE (usually SImode). | |
2757 | ||
5f458503 | 2758 | If no mode meets all these conditions, we return VOIDmode. |
7bd4091f | 2759 | |
5f458503 | 2760 | If VOLATILEP is false and SLOW_BYTE_ACCESS is false, we return the |
2761 | smallest mode meeting these conditions. | |
2762 | ||
2763 | If VOLATILEP is false and SLOW_BYTE_ACCESS is true, we return the | |
2764 | largest mode (but a mode no wider than UNITS_PER_WORD) that meets | |
2765 | all the conditions. | |
7bd4091f | 2766 | |
5f458503 | 2767 | If VOLATILEP is true the narrow_volatile_bitfields target hook is used to |
2768 | decide which of the above modes should be used. */ | |
f2cfea4a | 2769 | |
2770 | enum machine_mode | |
4bb60ec7 | 2771 | get_best_mode (int bitsize, int bitpos, |
2772 | unsigned HOST_WIDE_INT bitregion_start, | |
2773 | unsigned HOST_WIDE_INT bitregion_end, | |
2774 | unsigned int align, | |
0a1f5755 | 2775 | enum machine_mode largest_mode, bool volatilep) |
f2cfea4a | 2776 | { |
0a1f5755 | 2777 | bit_field_mode_iterator iter (bitsize, bitpos, bitregion_start, |
2778 | bitregion_end, align, volatilep); | |
2779 | enum machine_mode widest_mode = VOIDmode; | |
f2cfea4a | 2780 | enum machine_mode mode; |
0a1f5755 | 2781 | while (iter.next_mode (&mode) |
06bedae0 | 2782 | /* ??? For historical reasons, reject modes that would normally |
2783 | receive greater alignment, even if unaligned accesses are | |
2784 | acceptable. This has both advantages and disadvantages. | |
4ed6cf77 | 2785 | Removing this check means that something like: |
2786 | ||
2787 | struct s { unsigned int x; unsigned int y; }; | |
2788 | int f (struct s *s) { return s->x == 0 && s->y == 0; } | |
2789 | ||
2790 | can be implemented using a single load and compare on | |
2791 | 64-bit machines that have no alignment restrictions. | |
2792 | For example, on powerpc64-linux-gnu, we would generate: | |
2793 | ||
2794 | ld 3,0(3) | |
2795 | cntlzd 3,3 | |
2796 | srdi 3,3,6 | |
2797 | blr | |
2798 | ||
2799 | rather than: | |
2800 | ||
2801 | lwz 9,0(3) | |
2802 | cmpwi 7,9,0 | |
2803 | bne 7,.L3 | |
2804 | lwz 3,4(3) | |
2805 | cntlzw 3,3 | |
2806 | srwi 3,3,5 | |
2807 | extsw 3,3 | |
2808 | blr | |
2809 | .p2align 4,,15 | |
2810 | .L3: | |
2811 | li 3,0 | |
2812 | blr | |
2813 | ||
2814 | However, accessing more than one field can make life harder | |
2815 | for the gimple optimizers. For example, gcc.dg/vect/bb-slp-5.c | |
2816 | has a series of unsigned short copies followed by a series of | |
2817 | unsigned short comparisons. With this check, both the copies | |
2818 | and comparisons remain 16-bit accesses and FRE is able | |
2819 | to eliminate the latter. Without the check, the comparisons | |
2820 | can be done using 2 64-bit operations, which FRE isn't able | |
2821 | to handle in the same way. | |
2822 | ||
2823 | Either way, it would probably be worth disabling this check | |
2824 | during expand. One particular example where removing the | |
2825 | check would help is the get_best_mode call in store_bit_field. | |
2826 | If we are given a memory bitregion of 128 bits that is aligned | |
2827 | to a 64-bit boundary, and the bitfield we want to modify is | |
2828 | in the second half of the bitregion, this check causes | |
2829 | store_bitfield to turn the memory into a 64-bit reference | |
2830 | to the _first_ half of the region. We later use | |
2831 | adjust_bitfield_address to get a reference to the correct half, | |
2832 | but doing so looks to adjust_bitfield_address as though we are | |
2833 | moving past the end of the original object, so it drops the | |
2834 | associated MEM_EXPR and MEM_OFFSET. Removing the check | |
2835 | causes store_bit_field to keep a 128-bit memory reference, | |
2836 | so that the final bitfield reference still has a MEM_EXPR | |
2837 | and MEM_OFFSET. */ | |
06bedae0 | 2838 | && GET_MODE_ALIGNMENT (mode) <= align |
0a1f5755 | 2839 | && (largest_mode == VOIDmode |
2840 | || GET_MODE_SIZE (mode) <= GET_MODE_SIZE (largest_mode))) | |
f2cfea4a | 2841 | { |
0a1f5755 | 2842 | widest_mode = mode; |
2843 | if (iter.prefer_smaller_modes ()) | |
f2cfea4a | 2844 | break; |
2845 | } | |
0a1f5755 | 2846 | return widest_mode; |
f2cfea4a | 2847 | } |
521dd524 | 2848 | |
f9cce2dc | 2849 | /* Gets minimal and maximal values for MODE (signed or unsigned depending on |
a6629703 | 2850 | SIGN). The returned constants are made to be usable in TARGET_MODE. */ |
f9cce2dc | 2851 | |
2852 | void | |
a6629703 | 2853 | get_mode_bounds (enum machine_mode mode, int sign, |
2854 | enum machine_mode target_mode, | |
2855 | rtx *mmin, rtx *mmax) | |
f9cce2dc | 2856 | { |
44a9bb76 | 2857 | unsigned size = GET_MODE_PRECISION (mode); |
a6629703 | 2858 | unsigned HOST_WIDE_INT min_val, max_val; |
f9cce2dc | 2859 | |
04e579b6 | 2860 | gcc_assert (size <= HOST_BITS_PER_WIDE_INT); |
f9cce2dc | 2861 | |
396020c7 | 2862 | /* Special case BImode, which has values 0 and STORE_FLAG_VALUE. */ |
2863 | if (mode == BImode) | |
2864 | { | |
2865 | if (STORE_FLAG_VALUE < 0) | |
2866 | { | |
2867 | min_val = STORE_FLAG_VALUE; | |
2868 | max_val = 0; | |
2869 | } | |
2870 | else | |
2871 | { | |
2872 | min_val = 0; | |
2873 | max_val = STORE_FLAG_VALUE; | |
2874 | } | |
2875 | } | |
2876 | else if (sign) | |
f9cce2dc | 2877 | { |
a6629703 | 2878 | min_val = -((unsigned HOST_WIDE_INT) 1 << (size - 1)); |
2879 | max_val = ((unsigned HOST_WIDE_INT) 1 << (size - 1)) - 1; | |
f9cce2dc | 2880 | } |
2881 | else | |
2882 | { | |
a6629703 | 2883 | min_val = 0; |
2884 | max_val = ((unsigned HOST_WIDE_INT) 1 << (size - 1) << 1) - 1; | |
f9cce2dc | 2885 | } |
a6629703 | 2886 | |
69e41517 | 2887 | *mmin = gen_int_mode (min_val, target_mode); |
2888 | *mmax = gen_int_mode (max_val, target_mode); | |
f9cce2dc | 2889 | } |
2890 | ||
1f3233d1 | 2891 | #include "gt-stor-layout.h" |