* stor-layout.c (variable_size): Do not issue errors.
[gcc.git] / gcc / stor-layout.c
1 /* C-compiler utilities for types and variables storage layout
2 Copyright (C) 1987, 1988, 1992, 1993, 1994, 1995, 1996, 1996, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4 Free Software Foundation, Inc.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "tm.h"
27 #include "tree.h"
28 #include "rtl.h"
29 #include "tm_p.h"
30 #include "flags.h"
31 #include "function.h"
32 #include "expr.h"
33 #include "output.h"
34 #include "diagnostic-core.h"
35 #include "ggc.h"
36 #include "target.h"
37 #include "langhooks.h"
38 #include "regs.h"
39 #include "params.h"
40 #include "cgraph.h"
41 #include "tree-inline.h"
42 #include "tree-dump.h"
43 #include "gimple.h"
44
45 /* Data type for the expressions representing sizes of data types.
46 It is the first integer type laid out. */
47 tree sizetype_tab[(int) TYPE_KIND_LAST];
48
49 /* If nonzero, this is an upper limit on alignment of structure fields.
50 The value is measured in bits. */
51 unsigned int maximum_field_alignment = TARGET_DEFAULT_PACK_STRUCT * BITS_PER_UNIT;
52
53 /* Nonzero if all REFERENCE_TYPEs are internal and hence should be allocated
54 in the address spaces' address_mode, not pointer_mode. Set only by
55 internal_reference_types called only by a front end. */
56 static int reference_types_internal = 0;
57
58 static tree self_referential_size (tree);
59 static void finalize_record_size (record_layout_info);
60 static void finalize_type_size (tree);
61 static void place_union_field (record_layout_info, tree);
62 #if defined (PCC_BITFIELD_TYPE_MATTERS) || defined (BITFIELD_NBYTES_LIMITED)
63 static int excess_unit_span (HOST_WIDE_INT, HOST_WIDE_INT, HOST_WIDE_INT,
64 HOST_WIDE_INT, tree);
65 #endif
66 extern void debug_rli (record_layout_info);
67 \f
68 /* SAVE_EXPRs for sizes of types and decls, waiting to be expanded. */
69
70 static GTY(()) VEC(tree,gc) *pending_sizes;
71
72 /* Show that REFERENCE_TYPES are internal and should use address_mode.
73 Called only by front end. */
74
75 void
76 internal_reference_types (void)
77 {
78 reference_types_internal = 1;
79 }
80
81 /* Get a VEC of all the objects put on the pending sizes list. */
82
83 VEC(tree,gc) *
84 get_pending_sizes (void)
85 {
86 VEC(tree,gc) *chain = pending_sizes;
87
88 pending_sizes = 0;
89 return chain;
90 }
91
92 /* Add EXPR to the pending sizes list. */
93
94 void
95 put_pending_size (tree expr)
96 {
97 /* Strip any simple arithmetic from EXPR to see if it has an underlying
98 SAVE_EXPR. */
99 expr = skip_simple_arithmetic (expr);
100
101 if (TREE_CODE (expr) == SAVE_EXPR)
102 VEC_safe_push (tree, gc, pending_sizes, expr);
103 }
104
105 /* Put a chain of objects into the pending sizes list, which must be
106 empty. */
107
108 void
109 put_pending_sizes (VEC(tree,gc) *chain)
110 {
111 gcc_assert (!pending_sizes);
112 pending_sizes = chain;
113 }
114
115 /* Given a size SIZE that may not be a constant, return a SAVE_EXPR
116 to serve as the actual size-expression for a type or decl. */
117
118 tree
119 variable_size (tree size)
120 {
121 tree save;
122
123 /* Obviously. */
124 if (TREE_CONSTANT (size))
125 return size;
126
127 /* If the size is self-referential, we can't make a SAVE_EXPR (see
128 save_expr for the rationale). But we can do something else. */
129 if (CONTAINS_PLACEHOLDER_P (size))
130 return self_referential_size (size);
131
132 /* If the language-processor is to take responsibility for variable-sized
133 items (e.g., languages which have elaboration procedures like Ada),
134 just return SIZE unchanged. */
135 if (lang_hooks.decls.global_bindings_p () < 0)
136 return size;
137
138 size = save_expr (size);
139
140 /* If an array with a variable number of elements is declared, and
141 the elements require destruction, we will emit a cleanup for the
142 array. That cleanup is run both on normal exit from the block
143 and in the exception-handler for the block. Normally, when code
144 is used in both ordinary code and in an exception handler it is
145 `unsaved', i.e., all SAVE_EXPRs are recalculated. However, we do
146 not wish to do that here; the array-size is the same in both
147 places. */
148 save = skip_simple_arithmetic (size);
149
150 if (cfun && cfun->dont_save_pending_sizes_p)
151 /* The front-end doesn't want us to keep a list of the expressions
152 that determine sizes for variable size objects. Trust it. */
153 return size;
154
155 put_pending_size (save);
156
157 return size;
158 }
159
160 /* An array of functions used for self-referential size computation. */
161 static GTY(()) VEC (tree, gc) *size_functions;
162
163 /* Look inside EXPR into simple arithmetic operations involving constants.
164 Return the outermost non-arithmetic or non-constant node. */
165
166 static tree
167 skip_simple_constant_arithmetic (tree expr)
168 {
169 while (true)
170 {
171 if (UNARY_CLASS_P (expr))
172 expr = TREE_OPERAND (expr, 0);
173 else if (BINARY_CLASS_P (expr))
174 {
175 if (TREE_CONSTANT (TREE_OPERAND (expr, 1)))
176 expr = TREE_OPERAND (expr, 0);
177 else if (TREE_CONSTANT (TREE_OPERAND (expr, 0)))
178 expr = TREE_OPERAND (expr, 1);
179 else
180 break;
181 }
182 else
183 break;
184 }
185
186 return expr;
187 }
188
189 /* Similar to copy_tree_r but do not copy component references involving
190 PLACEHOLDER_EXPRs. These nodes are spotted in find_placeholder_in_expr
191 and substituted in substitute_in_expr. */
192
193 static tree
194 copy_self_referential_tree_r (tree *tp, int *walk_subtrees, void *data)
195 {
196 enum tree_code code = TREE_CODE (*tp);
197
198 /* Stop at types, decls, constants like copy_tree_r. */
199 if (TREE_CODE_CLASS (code) == tcc_type
200 || TREE_CODE_CLASS (code) == tcc_declaration
201 || TREE_CODE_CLASS (code) == tcc_constant)
202 {
203 *walk_subtrees = 0;
204 return NULL_TREE;
205 }
206
207 /* This is the pattern built in ada/make_aligning_type. */
208 else if (code == ADDR_EXPR
209 && TREE_CODE (TREE_OPERAND (*tp, 0)) == PLACEHOLDER_EXPR)
210 {
211 *walk_subtrees = 0;
212 return NULL_TREE;
213 }
214
215 /* Default case: the component reference. */
216 else if (code == COMPONENT_REF)
217 {
218 tree inner;
219 for (inner = TREE_OPERAND (*tp, 0);
220 REFERENCE_CLASS_P (inner);
221 inner = TREE_OPERAND (inner, 0))
222 ;
223
224 if (TREE_CODE (inner) == PLACEHOLDER_EXPR)
225 {
226 *walk_subtrees = 0;
227 return NULL_TREE;
228 }
229 }
230
231 /* We're not supposed to have them in self-referential size trees
232 because we wouldn't properly control when they are evaluated.
233 However, not creating superfluous SAVE_EXPRs requires accurate
234 tracking of readonly-ness all the way down to here, which we
235 cannot always guarantee in practice. So punt in this case. */
236 else if (code == SAVE_EXPR)
237 return error_mark_node;
238
239 return copy_tree_r (tp, walk_subtrees, data);
240 }
241
242 /* Given a SIZE expression that is self-referential, return an equivalent
243 expression to serve as the actual size expression for a type. */
244
245 static tree
246 self_referential_size (tree size)
247 {
248 static unsigned HOST_WIDE_INT fnno = 0;
249 VEC (tree, heap) *self_refs = NULL;
250 tree param_type_list = NULL, param_decl_list = NULL;
251 tree t, ref, return_type, fntype, fnname, fndecl;
252 unsigned int i;
253 char buf[128];
254 VEC(tree,gc) *args = NULL;
255
256 /* Do not factor out simple operations. */
257 t = skip_simple_constant_arithmetic (size);
258 if (TREE_CODE (t) == CALL_EXPR)
259 return size;
260
261 /* Collect the list of self-references in the expression. */
262 find_placeholder_in_expr (size, &self_refs);
263 gcc_assert (VEC_length (tree, self_refs) > 0);
264
265 /* Obtain a private copy of the expression. */
266 t = size;
267 if (walk_tree (&t, copy_self_referential_tree_r, NULL, NULL) != NULL_TREE)
268 return size;
269 size = t;
270
271 /* Build the parameter and argument lists in parallel; also
272 substitute the former for the latter in the expression. */
273 args = VEC_alloc (tree, gc, VEC_length (tree, self_refs));
274 FOR_EACH_VEC_ELT (tree, self_refs, i, ref)
275 {
276 tree subst, param_name, param_type, param_decl;
277
278 if (DECL_P (ref))
279 {
280 /* We shouldn't have true variables here. */
281 gcc_assert (TREE_READONLY (ref));
282 subst = ref;
283 }
284 /* This is the pattern built in ada/make_aligning_type. */
285 else if (TREE_CODE (ref) == ADDR_EXPR)
286 subst = ref;
287 /* Default case: the component reference. */
288 else
289 subst = TREE_OPERAND (ref, 1);
290
291 sprintf (buf, "p%d", i);
292 param_name = get_identifier (buf);
293 param_type = TREE_TYPE (ref);
294 param_decl
295 = build_decl (input_location, PARM_DECL, param_name, param_type);
296 if (targetm.calls.promote_prototypes (NULL_TREE)
297 && INTEGRAL_TYPE_P (param_type)
298 && TYPE_PRECISION (param_type) < TYPE_PRECISION (integer_type_node))
299 DECL_ARG_TYPE (param_decl) = integer_type_node;
300 else
301 DECL_ARG_TYPE (param_decl) = param_type;
302 DECL_ARTIFICIAL (param_decl) = 1;
303 TREE_READONLY (param_decl) = 1;
304
305 size = substitute_in_expr (size, subst, param_decl);
306
307 param_type_list = tree_cons (NULL_TREE, param_type, param_type_list);
308 param_decl_list = chainon (param_decl, param_decl_list);
309 VEC_quick_push (tree, args, ref);
310 }
311
312 VEC_free (tree, heap, self_refs);
313
314 /* Append 'void' to indicate that the number of parameters is fixed. */
315 param_type_list = tree_cons (NULL_TREE, void_type_node, param_type_list);
316
317 /* The 3 lists have been created in reverse order. */
318 param_type_list = nreverse (param_type_list);
319 param_decl_list = nreverse (param_decl_list);
320
321 /* Build the function type. */
322 return_type = TREE_TYPE (size);
323 fntype = build_function_type (return_type, param_type_list);
324
325 /* Build the function declaration. */
326 sprintf (buf, "SZ"HOST_WIDE_INT_PRINT_UNSIGNED, fnno++);
327 fnname = get_file_function_name (buf);
328 fndecl = build_decl (input_location, FUNCTION_DECL, fnname, fntype);
329 for (t = param_decl_list; t; t = DECL_CHAIN (t))
330 DECL_CONTEXT (t) = fndecl;
331 DECL_ARGUMENTS (fndecl) = param_decl_list;
332 DECL_RESULT (fndecl)
333 = build_decl (input_location, RESULT_DECL, 0, return_type);
334 DECL_CONTEXT (DECL_RESULT (fndecl)) = fndecl;
335
336 /* The function has been created by the compiler and we don't
337 want to emit debug info for it. */
338 DECL_ARTIFICIAL (fndecl) = 1;
339 DECL_IGNORED_P (fndecl) = 1;
340
341 /* It is supposed to be "const" and never throw. */
342 TREE_READONLY (fndecl) = 1;
343 TREE_NOTHROW (fndecl) = 1;
344
345 /* We want it to be inlined when this is deemed profitable, as
346 well as discarded if every call has been integrated. */
347 DECL_DECLARED_INLINE_P (fndecl) = 1;
348
349 /* It is made up of a unique return statement. */
350 DECL_INITIAL (fndecl) = make_node (BLOCK);
351 BLOCK_SUPERCONTEXT (DECL_INITIAL (fndecl)) = fndecl;
352 t = build2 (MODIFY_EXPR, return_type, DECL_RESULT (fndecl), size);
353 DECL_SAVED_TREE (fndecl) = build1 (RETURN_EXPR, void_type_node, t);
354 TREE_STATIC (fndecl) = 1;
355
356 /* Put it onto the list of size functions. */
357 VEC_safe_push (tree, gc, size_functions, fndecl);
358
359 /* Replace the original expression with a call to the size function. */
360 return build_call_expr_loc_vec (UNKNOWN_LOCATION, fndecl, args);
361 }
362
363 /* Take, queue and compile all the size functions. It is essential that
364 the size functions be gimplified at the very end of the compilation
365 in order to guarantee transparent handling of self-referential sizes.
366 Otherwise the GENERIC inliner would not be able to inline them back
367 at each of their call sites, thus creating artificial non-constant
368 size expressions which would trigger nasty problems later on. */
369
370 void
371 finalize_size_functions (void)
372 {
373 unsigned int i;
374 tree fndecl;
375
376 for (i = 0; VEC_iterate(tree, size_functions, i, fndecl); i++)
377 {
378 dump_function (TDI_original, fndecl);
379 gimplify_function_tree (fndecl);
380 dump_function (TDI_generic, fndecl);
381 cgraph_finalize_function (fndecl, false);
382 }
383
384 VEC_free (tree, gc, size_functions);
385 }
386 \f
387 /* Return the machine mode to use for a nonscalar of SIZE bits. The
388 mode must be in class MCLASS, and have exactly that many value bits;
389 it may have padding as well. If LIMIT is nonzero, modes of wider
390 than MAX_FIXED_MODE_SIZE will not be used. */
391
392 enum machine_mode
393 mode_for_size (unsigned int size, enum mode_class mclass, int limit)
394 {
395 enum machine_mode mode;
396
397 if (limit && size > MAX_FIXED_MODE_SIZE)
398 return BLKmode;
399
400 /* Get the first mode which has this size, in the specified class. */
401 for (mode = GET_CLASS_NARROWEST_MODE (mclass); mode != VOIDmode;
402 mode = GET_MODE_WIDER_MODE (mode))
403 if (GET_MODE_PRECISION (mode) == size)
404 return mode;
405
406 return BLKmode;
407 }
408
409 /* Similar, except passed a tree node. */
410
411 enum machine_mode
412 mode_for_size_tree (const_tree size, enum mode_class mclass, int limit)
413 {
414 unsigned HOST_WIDE_INT uhwi;
415 unsigned int ui;
416
417 if (!host_integerp (size, 1))
418 return BLKmode;
419 uhwi = tree_low_cst (size, 1);
420 ui = uhwi;
421 if (uhwi != ui)
422 return BLKmode;
423 return mode_for_size (ui, mclass, limit);
424 }
425
426 /* Similar, but never return BLKmode; return the narrowest mode that
427 contains at least the requested number of value bits. */
428
429 enum machine_mode
430 smallest_mode_for_size (unsigned int size, enum mode_class mclass)
431 {
432 enum machine_mode mode;
433
434 /* Get the first mode which has at least this size, in the
435 specified class. */
436 for (mode = GET_CLASS_NARROWEST_MODE (mclass); mode != VOIDmode;
437 mode = GET_MODE_WIDER_MODE (mode))
438 if (GET_MODE_PRECISION (mode) >= size)
439 return mode;
440
441 gcc_unreachable ();
442 }
443
444 /* Find an integer mode of the exact same size, or BLKmode on failure. */
445
446 enum machine_mode
447 int_mode_for_mode (enum machine_mode mode)
448 {
449 switch (GET_MODE_CLASS (mode))
450 {
451 case MODE_INT:
452 case MODE_PARTIAL_INT:
453 break;
454
455 case MODE_COMPLEX_INT:
456 case MODE_COMPLEX_FLOAT:
457 case MODE_FLOAT:
458 case MODE_DECIMAL_FLOAT:
459 case MODE_VECTOR_INT:
460 case MODE_VECTOR_FLOAT:
461 case MODE_FRACT:
462 case MODE_ACCUM:
463 case MODE_UFRACT:
464 case MODE_UACCUM:
465 case MODE_VECTOR_FRACT:
466 case MODE_VECTOR_ACCUM:
467 case MODE_VECTOR_UFRACT:
468 case MODE_VECTOR_UACCUM:
469 mode = mode_for_size (GET_MODE_BITSIZE (mode), MODE_INT, 0);
470 break;
471
472 case MODE_RANDOM:
473 if (mode == BLKmode)
474 break;
475
476 /* ... fall through ... */
477
478 case MODE_CC:
479 default:
480 gcc_unreachable ();
481 }
482
483 return mode;
484 }
485
486 /* Find a mode that is suitable for representing a vector with
487 NUNITS elements of mode INNERMODE. Returns BLKmode if there
488 is no suitable mode. */
489
490 enum machine_mode
491 mode_for_vector (enum machine_mode innermode, unsigned nunits)
492 {
493 enum machine_mode mode;
494
495 /* First, look for a supported vector type. */
496 if (SCALAR_FLOAT_MODE_P (innermode))
497 mode = MIN_MODE_VECTOR_FLOAT;
498 else if (SCALAR_FRACT_MODE_P (innermode))
499 mode = MIN_MODE_VECTOR_FRACT;
500 else if (SCALAR_UFRACT_MODE_P (innermode))
501 mode = MIN_MODE_VECTOR_UFRACT;
502 else if (SCALAR_ACCUM_MODE_P (innermode))
503 mode = MIN_MODE_VECTOR_ACCUM;
504 else if (SCALAR_UACCUM_MODE_P (innermode))
505 mode = MIN_MODE_VECTOR_UACCUM;
506 else
507 mode = MIN_MODE_VECTOR_INT;
508
509 /* Do not check vector_mode_supported_p here. We'll do that
510 later in vector_type_mode. */
511 for (; mode != VOIDmode ; mode = GET_MODE_WIDER_MODE (mode))
512 if (GET_MODE_NUNITS (mode) == nunits
513 && GET_MODE_INNER (mode) == innermode)
514 break;
515
516 /* For integers, try mapping it to a same-sized scalar mode. */
517 if (mode == VOIDmode
518 && GET_MODE_CLASS (innermode) == MODE_INT)
519 mode = mode_for_size (nunits * GET_MODE_BITSIZE (innermode),
520 MODE_INT, 0);
521
522 if (mode == VOIDmode
523 || (GET_MODE_CLASS (mode) == MODE_INT
524 && !have_regs_of_mode[mode]))
525 return BLKmode;
526
527 return mode;
528 }
529
530 /* Return the alignment of MODE. This will be bounded by 1 and
531 BIGGEST_ALIGNMENT. */
532
533 unsigned int
534 get_mode_alignment (enum machine_mode mode)
535 {
536 return MIN (BIGGEST_ALIGNMENT, MAX (1, mode_base_align[mode]*BITS_PER_UNIT));
537 }
538
539 /* Return the natural mode of an array, given that it is SIZE bytes in
540 total and has elements of type ELEM_TYPE. */
541
542 static enum machine_mode
543 mode_for_array (tree elem_type, tree size)
544 {
545 tree elem_size;
546 unsigned HOST_WIDE_INT int_size, int_elem_size;
547 bool limit_p;
548
549 /* One-element arrays get the component type's mode. */
550 elem_size = TYPE_SIZE (elem_type);
551 if (simple_cst_equal (size, elem_size))
552 return TYPE_MODE (elem_type);
553
554 limit_p = true;
555 if (host_integerp (size, 1) && host_integerp (elem_size, 1))
556 {
557 int_size = tree_low_cst (size, 1);
558 int_elem_size = tree_low_cst (elem_size, 1);
559 if (int_elem_size > 0
560 && int_size % int_elem_size == 0
561 && targetm.array_mode_supported_p (TYPE_MODE (elem_type),
562 int_size / int_elem_size))
563 limit_p = false;
564 }
565 return mode_for_size_tree (size, MODE_INT, limit_p);
566 }
567 \f
568 /* Subroutine of layout_decl: Force alignment required for the data type.
569 But if the decl itself wants greater alignment, don't override that. */
570
571 static inline void
572 do_type_align (tree type, tree decl)
573 {
574 if (TYPE_ALIGN (type) > DECL_ALIGN (decl))
575 {
576 DECL_ALIGN (decl) = TYPE_ALIGN (type);
577 if (TREE_CODE (decl) == FIELD_DECL)
578 DECL_USER_ALIGN (decl) = TYPE_USER_ALIGN (type);
579 }
580 }
581
582 /* Set the size, mode and alignment of a ..._DECL node.
583 TYPE_DECL does need this for C++.
584 Note that LABEL_DECL and CONST_DECL nodes do not need this,
585 and FUNCTION_DECL nodes have them set up in a special (and simple) way.
586 Don't call layout_decl for them.
587
588 KNOWN_ALIGN is the amount of alignment we can assume this
589 decl has with no special effort. It is relevant only for FIELD_DECLs
590 and depends on the previous fields.
591 All that matters about KNOWN_ALIGN is which powers of 2 divide it.
592 If KNOWN_ALIGN is 0, it means, "as much alignment as you like":
593 the record will be aligned to suit. */
594
595 void
596 layout_decl (tree decl, unsigned int known_align)
597 {
598 tree type = TREE_TYPE (decl);
599 enum tree_code code = TREE_CODE (decl);
600 rtx rtl = NULL_RTX;
601 location_t loc = DECL_SOURCE_LOCATION (decl);
602
603 if (code == CONST_DECL)
604 return;
605
606 gcc_assert (code == VAR_DECL || code == PARM_DECL || code == RESULT_DECL
607 || code == TYPE_DECL ||code == FIELD_DECL);
608
609 rtl = DECL_RTL_IF_SET (decl);
610
611 if (type == error_mark_node)
612 type = void_type_node;
613
614 /* Usually the size and mode come from the data type without change,
615 however, the front-end may set the explicit width of the field, so its
616 size may not be the same as the size of its type. This happens with
617 bitfields, of course (an `int' bitfield may be only 2 bits, say), but it
618 also happens with other fields. For example, the C++ front-end creates
619 zero-sized fields corresponding to empty base classes, and depends on
620 layout_type setting DECL_FIELD_BITPOS correctly for the field. Set the
621 size in bytes from the size in bits. If we have already set the mode,
622 don't set it again since we can be called twice for FIELD_DECLs. */
623
624 DECL_UNSIGNED (decl) = TYPE_UNSIGNED (type);
625 if (DECL_MODE (decl) == VOIDmode)
626 DECL_MODE (decl) = TYPE_MODE (type);
627
628 if (DECL_SIZE (decl) == 0)
629 {
630 DECL_SIZE (decl) = TYPE_SIZE (type);
631 DECL_SIZE_UNIT (decl) = TYPE_SIZE_UNIT (type);
632 }
633 else if (DECL_SIZE_UNIT (decl) == 0)
634 DECL_SIZE_UNIT (decl)
635 = fold_convert_loc (loc, sizetype,
636 size_binop_loc (loc, CEIL_DIV_EXPR, DECL_SIZE (decl),
637 bitsize_unit_node));
638
639 if (code != FIELD_DECL)
640 /* For non-fields, update the alignment from the type. */
641 do_type_align (type, decl);
642 else
643 /* For fields, it's a bit more complicated... */
644 {
645 bool old_user_align = DECL_USER_ALIGN (decl);
646 bool zero_bitfield = false;
647 bool packed_p = DECL_PACKED (decl);
648 unsigned int mfa;
649
650 if (DECL_BIT_FIELD (decl))
651 {
652 DECL_BIT_FIELD_TYPE (decl) = type;
653
654 /* A zero-length bit-field affects the alignment of the next
655 field. In essence such bit-fields are not influenced by
656 any packing due to #pragma pack or attribute packed. */
657 if (integer_zerop (DECL_SIZE (decl))
658 && ! targetm.ms_bitfield_layout_p (DECL_FIELD_CONTEXT (decl)))
659 {
660 zero_bitfield = true;
661 packed_p = false;
662 #ifdef PCC_BITFIELD_TYPE_MATTERS
663 if (PCC_BITFIELD_TYPE_MATTERS)
664 do_type_align (type, decl);
665 else
666 #endif
667 {
668 #ifdef EMPTY_FIELD_BOUNDARY
669 if (EMPTY_FIELD_BOUNDARY > DECL_ALIGN (decl))
670 {
671 DECL_ALIGN (decl) = EMPTY_FIELD_BOUNDARY;
672 DECL_USER_ALIGN (decl) = 0;
673 }
674 #endif
675 }
676 }
677
678 /* See if we can use an ordinary integer mode for a bit-field.
679 Conditions are: a fixed size that is correct for another mode,
680 occupying a complete byte or bytes on proper boundary,
681 and not volatile or not -fstrict-volatile-bitfields. */
682 if (TYPE_SIZE (type) != 0
683 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
684 && GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
685 && !(TREE_THIS_VOLATILE (decl)
686 && flag_strict_volatile_bitfields > 0))
687 {
688 enum machine_mode xmode
689 = mode_for_size_tree (DECL_SIZE (decl), MODE_INT, 1);
690 unsigned int xalign = GET_MODE_ALIGNMENT (xmode);
691
692 if (xmode != BLKmode
693 && !(xalign > BITS_PER_UNIT && DECL_PACKED (decl))
694 && (known_align == 0 || known_align >= xalign))
695 {
696 DECL_ALIGN (decl) = MAX (xalign, DECL_ALIGN (decl));
697 DECL_MODE (decl) = xmode;
698 DECL_BIT_FIELD (decl) = 0;
699 }
700 }
701
702 /* Turn off DECL_BIT_FIELD if we won't need it set. */
703 if (TYPE_MODE (type) == BLKmode && DECL_MODE (decl) == BLKmode
704 && known_align >= TYPE_ALIGN (type)
705 && DECL_ALIGN (decl) >= TYPE_ALIGN (type))
706 DECL_BIT_FIELD (decl) = 0;
707 }
708 else if (packed_p && DECL_USER_ALIGN (decl))
709 /* Don't touch DECL_ALIGN. For other packed fields, go ahead and
710 round up; we'll reduce it again below. We want packing to
711 supersede USER_ALIGN inherited from the type, but defer to
712 alignment explicitly specified on the field decl. */;
713 else
714 do_type_align (type, decl);
715
716 /* If the field is packed and not explicitly aligned, give it the
717 minimum alignment. Note that do_type_align may set
718 DECL_USER_ALIGN, so we need to check old_user_align instead. */
719 if (packed_p
720 && !old_user_align)
721 DECL_ALIGN (decl) = MIN (DECL_ALIGN (decl), BITS_PER_UNIT);
722
723 if (! packed_p && ! DECL_USER_ALIGN (decl))
724 {
725 /* Some targets (i.e. i386, VMS) limit struct field alignment
726 to a lower boundary than alignment of variables unless
727 it was overridden by attribute aligned. */
728 #ifdef BIGGEST_FIELD_ALIGNMENT
729 DECL_ALIGN (decl)
730 = MIN (DECL_ALIGN (decl), (unsigned) BIGGEST_FIELD_ALIGNMENT);
731 #endif
732 #ifdef ADJUST_FIELD_ALIGN
733 DECL_ALIGN (decl) = ADJUST_FIELD_ALIGN (decl, DECL_ALIGN (decl));
734 #endif
735 }
736
737 if (zero_bitfield)
738 mfa = initial_max_fld_align * BITS_PER_UNIT;
739 else
740 mfa = maximum_field_alignment;
741 /* Should this be controlled by DECL_USER_ALIGN, too? */
742 if (mfa != 0)
743 DECL_ALIGN (decl) = MIN (DECL_ALIGN (decl), mfa);
744 }
745
746 /* Evaluate nonconstant size only once, either now or as soon as safe. */
747 if (DECL_SIZE (decl) != 0 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
748 DECL_SIZE (decl) = variable_size (DECL_SIZE (decl));
749 if (DECL_SIZE_UNIT (decl) != 0
750 && TREE_CODE (DECL_SIZE_UNIT (decl)) != INTEGER_CST)
751 DECL_SIZE_UNIT (decl) = variable_size (DECL_SIZE_UNIT (decl));
752
753 /* If requested, warn about definitions of large data objects. */
754 if (warn_larger_than
755 && (code == VAR_DECL || code == PARM_DECL)
756 && ! DECL_EXTERNAL (decl))
757 {
758 tree size = DECL_SIZE_UNIT (decl);
759
760 if (size != 0 && TREE_CODE (size) == INTEGER_CST
761 && compare_tree_int (size, larger_than_size) > 0)
762 {
763 int size_as_int = TREE_INT_CST_LOW (size);
764
765 if (compare_tree_int (size, size_as_int) == 0)
766 warning (OPT_Wlarger_than_, "size of %q+D is %d bytes", decl, size_as_int);
767 else
768 warning (OPT_Wlarger_than_, "size of %q+D is larger than %wd bytes",
769 decl, larger_than_size);
770 }
771 }
772
773 /* If the RTL was already set, update its mode and mem attributes. */
774 if (rtl)
775 {
776 PUT_MODE (rtl, DECL_MODE (decl));
777 SET_DECL_RTL (decl, 0);
778 set_mem_attributes (rtl, decl, 1);
779 SET_DECL_RTL (decl, rtl);
780 }
781 }
782
783 /* Given a VAR_DECL, PARM_DECL or RESULT_DECL, clears the results of
784 a previous call to layout_decl and calls it again. */
785
786 void
787 relayout_decl (tree decl)
788 {
789 DECL_SIZE (decl) = DECL_SIZE_UNIT (decl) = 0;
790 DECL_MODE (decl) = VOIDmode;
791 if (!DECL_USER_ALIGN (decl))
792 DECL_ALIGN (decl) = 0;
793 SET_DECL_RTL (decl, 0);
794
795 layout_decl (decl, 0);
796 }
797 \f
798 /* Begin laying out type T, which may be a RECORD_TYPE, UNION_TYPE, or
799 QUAL_UNION_TYPE. Return a pointer to a struct record_layout_info which
800 is to be passed to all other layout functions for this record. It is the
801 responsibility of the caller to call `free' for the storage returned.
802 Note that garbage collection is not permitted until we finish laying
803 out the record. */
804
805 record_layout_info
806 start_record_layout (tree t)
807 {
808 record_layout_info rli = XNEW (struct record_layout_info_s);
809
810 rli->t = t;
811
812 /* If the type has a minimum specified alignment (via an attribute
813 declaration, for example) use it -- otherwise, start with a
814 one-byte alignment. */
815 rli->record_align = MAX (BITS_PER_UNIT, TYPE_ALIGN (t));
816 rli->unpacked_align = rli->record_align;
817 rli->offset_align = MAX (rli->record_align, BIGGEST_ALIGNMENT);
818
819 #ifdef STRUCTURE_SIZE_BOUNDARY
820 /* Packed structures don't need to have minimum size. */
821 if (! TYPE_PACKED (t))
822 {
823 unsigned tmp;
824
825 /* #pragma pack overrides STRUCTURE_SIZE_BOUNDARY. */
826 tmp = (unsigned) STRUCTURE_SIZE_BOUNDARY;
827 if (maximum_field_alignment != 0)
828 tmp = MIN (tmp, maximum_field_alignment);
829 rli->record_align = MAX (rli->record_align, tmp);
830 }
831 #endif
832
833 rli->offset = size_zero_node;
834 rli->bitpos = bitsize_zero_node;
835 rli->prev_field = 0;
836 rli->pending_statics = NULL;
837 rli->packed_maybe_necessary = 0;
838 rli->remaining_in_alignment = 0;
839
840 return rli;
841 }
842
843 /* These four routines perform computations that convert between
844 the offset/bitpos forms and byte and bit offsets. */
845
846 tree
847 bit_from_pos (tree offset, tree bitpos)
848 {
849 return size_binop (PLUS_EXPR, bitpos,
850 size_binop (MULT_EXPR,
851 fold_convert (bitsizetype, offset),
852 bitsize_unit_node));
853 }
854
855 tree
856 byte_from_pos (tree offset, tree bitpos)
857 {
858 return size_binop (PLUS_EXPR, offset,
859 fold_convert (sizetype,
860 size_binop (TRUNC_DIV_EXPR, bitpos,
861 bitsize_unit_node)));
862 }
863
864 void
865 pos_from_bit (tree *poffset, tree *pbitpos, unsigned int off_align,
866 tree pos)
867 {
868 *poffset = size_binop (MULT_EXPR,
869 fold_convert (sizetype,
870 size_binop (FLOOR_DIV_EXPR, pos,
871 bitsize_int (off_align))),
872 size_int (off_align / BITS_PER_UNIT));
873 *pbitpos = size_binop (FLOOR_MOD_EXPR, pos, bitsize_int (off_align));
874 }
875
876 /* Given a pointer to bit and byte offsets and an offset alignment,
877 normalize the offsets so they are within the alignment. */
878
879 void
880 normalize_offset (tree *poffset, tree *pbitpos, unsigned int off_align)
881 {
882 /* If the bit position is now larger than it should be, adjust it
883 downwards. */
884 if (compare_tree_int (*pbitpos, off_align) >= 0)
885 {
886 tree extra_aligns = size_binop (FLOOR_DIV_EXPR, *pbitpos,
887 bitsize_int (off_align));
888
889 *poffset
890 = size_binop (PLUS_EXPR, *poffset,
891 size_binop (MULT_EXPR,
892 fold_convert (sizetype, extra_aligns),
893 size_int (off_align / BITS_PER_UNIT)));
894
895 *pbitpos
896 = size_binop (FLOOR_MOD_EXPR, *pbitpos, bitsize_int (off_align));
897 }
898 }
899
900 /* Print debugging information about the information in RLI. */
901
902 DEBUG_FUNCTION void
903 debug_rli (record_layout_info rli)
904 {
905 print_node_brief (stderr, "type", rli->t, 0);
906 print_node_brief (stderr, "\noffset", rli->offset, 0);
907 print_node_brief (stderr, " bitpos", rli->bitpos, 0);
908
909 fprintf (stderr, "\naligns: rec = %u, unpack = %u, off = %u\n",
910 rli->record_align, rli->unpacked_align,
911 rli->offset_align);
912
913 /* The ms_struct code is the only that uses this. */
914 if (targetm.ms_bitfield_layout_p (rli->t))
915 fprintf (stderr, "remaining in alignment = %u\n", rli->remaining_in_alignment);
916
917 if (rli->packed_maybe_necessary)
918 fprintf (stderr, "packed may be necessary\n");
919
920 if (!VEC_empty (tree, rli->pending_statics))
921 {
922 fprintf (stderr, "pending statics:\n");
923 debug_vec_tree (rli->pending_statics);
924 }
925 }
926
927 /* Given an RLI with a possibly-incremented BITPOS, adjust OFFSET and
928 BITPOS if necessary to keep BITPOS below OFFSET_ALIGN. */
929
930 void
931 normalize_rli (record_layout_info rli)
932 {
933 normalize_offset (&rli->offset, &rli->bitpos, rli->offset_align);
934 }
935
936 /* Returns the size in bytes allocated so far. */
937
938 tree
939 rli_size_unit_so_far (record_layout_info rli)
940 {
941 return byte_from_pos (rli->offset, rli->bitpos);
942 }
943
944 /* Returns the size in bits allocated so far. */
945
946 tree
947 rli_size_so_far (record_layout_info rli)
948 {
949 return bit_from_pos (rli->offset, rli->bitpos);
950 }
951
952 /* FIELD is about to be added to RLI->T. The alignment (in bits) of
953 the next available location within the record is given by KNOWN_ALIGN.
954 Update the variable alignment fields in RLI, and return the alignment
955 to give the FIELD. */
956
957 unsigned int
958 update_alignment_for_field (record_layout_info rli, tree field,
959 unsigned int known_align)
960 {
961 /* The alignment required for FIELD. */
962 unsigned int desired_align;
963 /* The type of this field. */
964 tree type = TREE_TYPE (field);
965 /* True if the field was explicitly aligned by the user. */
966 bool user_align;
967 bool is_bitfield;
968
969 /* Do not attempt to align an ERROR_MARK node */
970 if (TREE_CODE (type) == ERROR_MARK)
971 return 0;
972
973 /* Lay out the field so we know what alignment it needs. */
974 layout_decl (field, known_align);
975 desired_align = DECL_ALIGN (field);
976 user_align = DECL_USER_ALIGN (field);
977
978 is_bitfield = (type != error_mark_node
979 && DECL_BIT_FIELD_TYPE (field)
980 && ! integer_zerop (TYPE_SIZE (type)));
981
982 /* Record must have at least as much alignment as any field.
983 Otherwise, the alignment of the field within the record is
984 meaningless. */
985 if (targetm.ms_bitfield_layout_p (rli->t))
986 {
987 /* Here, the alignment of the underlying type of a bitfield can
988 affect the alignment of a record; even a zero-sized field
989 can do this. The alignment should be to the alignment of
990 the type, except that for zero-size bitfields this only
991 applies if there was an immediately prior, nonzero-size
992 bitfield. (That's the way it is, experimentally.) */
993 if ((!is_bitfield && !DECL_PACKED (field))
994 || (!integer_zerop (DECL_SIZE (field))
995 ? !DECL_PACKED (field)
996 : (rli->prev_field
997 && DECL_BIT_FIELD_TYPE (rli->prev_field)
998 && ! integer_zerop (DECL_SIZE (rli->prev_field)))))
999 {
1000 unsigned int type_align = TYPE_ALIGN (type);
1001 type_align = MAX (type_align, desired_align);
1002 if (maximum_field_alignment != 0)
1003 type_align = MIN (type_align, maximum_field_alignment);
1004 rli->record_align = MAX (rli->record_align, type_align);
1005 rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type));
1006 }
1007 }
1008 #ifdef PCC_BITFIELD_TYPE_MATTERS
1009 else if (is_bitfield && PCC_BITFIELD_TYPE_MATTERS)
1010 {
1011 /* Named bit-fields cause the entire structure to have the
1012 alignment implied by their type. Some targets also apply the same
1013 rules to unnamed bitfields. */
1014 if (DECL_NAME (field) != 0
1015 || targetm.align_anon_bitfield ())
1016 {
1017 unsigned int type_align = TYPE_ALIGN (type);
1018
1019 #ifdef ADJUST_FIELD_ALIGN
1020 if (! TYPE_USER_ALIGN (type))
1021 type_align = ADJUST_FIELD_ALIGN (field, type_align);
1022 #endif
1023
1024 /* Targets might chose to handle unnamed and hence possibly
1025 zero-width bitfield. Those are not influenced by #pragmas
1026 or packed attributes. */
1027 if (integer_zerop (DECL_SIZE (field)))
1028 {
1029 if (initial_max_fld_align)
1030 type_align = MIN (type_align,
1031 initial_max_fld_align * BITS_PER_UNIT);
1032 }
1033 else if (maximum_field_alignment != 0)
1034 type_align = MIN (type_align, maximum_field_alignment);
1035 else if (DECL_PACKED (field))
1036 type_align = MIN (type_align, BITS_PER_UNIT);
1037
1038 /* The alignment of the record is increased to the maximum
1039 of the current alignment, the alignment indicated on the
1040 field (i.e., the alignment specified by an __aligned__
1041 attribute), and the alignment indicated by the type of
1042 the field. */
1043 rli->record_align = MAX (rli->record_align, desired_align);
1044 rli->record_align = MAX (rli->record_align, type_align);
1045
1046 if (warn_packed)
1047 rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type));
1048 user_align |= TYPE_USER_ALIGN (type);
1049 }
1050 }
1051 #endif
1052 else
1053 {
1054 rli->record_align = MAX (rli->record_align, desired_align);
1055 rli->unpacked_align = MAX (rli->unpacked_align, TYPE_ALIGN (type));
1056 }
1057
1058 TYPE_USER_ALIGN (rli->t) |= user_align;
1059
1060 return desired_align;
1061 }
1062
1063 /* Called from place_field to handle unions. */
1064
1065 static void
1066 place_union_field (record_layout_info rli, tree field)
1067 {
1068 update_alignment_for_field (rli, field, /*known_align=*/0);
1069
1070 DECL_FIELD_OFFSET (field) = size_zero_node;
1071 DECL_FIELD_BIT_OFFSET (field) = bitsize_zero_node;
1072 SET_DECL_OFFSET_ALIGN (field, BIGGEST_ALIGNMENT);
1073
1074 /* If this is an ERROR_MARK return *after* having set the
1075 field at the start of the union. This helps when parsing
1076 invalid fields. */
1077 if (TREE_CODE (TREE_TYPE (field)) == ERROR_MARK)
1078 return;
1079
1080 /* We assume the union's size will be a multiple of a byte so we don't
1081 bother with BITPOS. */
1082 if (TREE_CODE (rli->t) == UNION_TYPE)
1083 rli->offset = size_binop (MAX_EXPR, rli->offset, DECL_SIZE_UNIT (field));
1084 else if (TREE_CODE (rli->t) == QUAL_UNION_TYPE)
1085 rli->offset = fold_build3 (COND_EXPR, sizetype, DECL_QUALIFIER (field),
1086 DECL_SIZE_UNIT (field), rli->offset);
1087 }
1088
1089 #if defined (PCC_BITFIELD_TYPE_MATTERS) || defined (BITFIELD_NBYTES_LIMITED)
1090 /* A bitfield of SIZE with a required access alignment of ALIGN is allocated
1091 at BYTE_OFFSET / BIT_OFFSET. Return nonzero if the field would span more
1092 units of alignment than the underlying TYPE. */
1093 static int
1094 excess_unit_span (HOST_WIDE_INT byte_offset, HOST_WIDE_INT bit_offset,
1095 HOST_WIDE_INT size, HOST_WIDE_INT align, tree type)
1096 {
1097 /* Note that the calculation of OFFSET might overflow; we calculate it so
1098 that we still get the right result as long as ALIGN is a power of two. */
1099 unsigned HOST_WIDE_INT offset = byte_offset * BITS_PER_UNIT + bit_offset;
1100
1101 offset = offset % align;
1102 return ((offset + size + align - 1) / align
1103 > ((unsigned HOST_WIDE_INT) tree_low_cst (TYPE_SIZE (type), 1)
1104 / align));
1105 }
1106 #endif
1107
1108 /* RLI contains information about the layout of a RECORD_TYPE. FIELD
1109 is a FIELD_DECL to be added after those fields already present in
1110 T. (FIELD is not actually added to the TYPE_FIELDS list here;
1111 callers that desire that behavior must manually perform that step.) */
1112
1113 void
1114 place_field (record_layout_info rli, tree field)
1115 {
1116 /* The alignment required for FIELD. */
1117 unsigned int desired_align;
1118 /* The alignment FIELD would have if we just dropped it into the
1119 record as it presently stands. */
1120 unsigned int known_align;
1121 unsigned int actual_align;
1122 /* The type of this field. */
1123 tree type = TREE_TYPE (field);
1124
1125 gcc_assert (TREE_CODE (field) != ERROR_MARK);
1126
1127 /* If FIELD is static, then treat it like a separate variable, not
1128 really like a structure field. If it is a FUNCTION_DECL, it's a
1129 method. In both cases, all we do is lay out the decl, and we do
1130 it *after* the record is laid out. */
1131 if (TREE_CODE (field) == VAR_DECL)
1132 {
1133 VEC_safe_push (tree, gc, rli->pending_statics, field);
1134 return;
1135 }
1136
1137 /* Enumerators and enum types which are local to this class need not
1138 be laid out. Likewise for initialized constant fields. */
1139 else if (TREE_CODE (field) != FIELD_DECL)
1140 return;
1141
1142 /* Unions are laid out very differently than records, so split
1143 that code off to another function. */
1144 else if (TREE_CODE (rli->t) != RECORD_TYPE)
1145 {
1146 place_union_field (rli, field);
1147 return;
1148 }
1149
1150 else if (TREE_CODE (type) == ERROR_MARK)
1151 {
1152 /* Place this field at the current allocation position, so we
1153 maintain monotonicity. */
1154 DECL_FIELD_OFFSET (field) = rli->offset;
1155 DECL_FIELD_BIT_OFFSET (field) = rli->bitpos;
1156 SET_DECL_OFFSET_ALIGN (field, rli->offset_align);
1157 return;
1158 }
1159
1160 /* Work out the known alignment so far. Note that A & (-A) is the
1161 value of the least-significant bit in A that is one. */
1162 if (! integer_zerop (rli->bitpos))
1163 known_align = (tree_low_cst (rli->bitpos, 1)
1164 & - tree_low_cst (rli->bitpos, 1));
1165 else if (integer_zerop (rli->offset))
1166 known_align = 0;
1167 else if (host_integerp (rli->offset, 1))
1168 known_align = (BITS_PER_UNIT
1169 * (tree_low_cst (rli->offset, 1)
1170 & - tree_low_cst (rli->offset, 1)));
1171 else
1172 known_align = rli->offset_align;
1173
1174 desired_align = update_alignment_for_field (rli, field, known_align);
1175 if (known_align == 0)
1176 known_align = MAX (BIGGEST_ALIGNMENT, rli->record_align);
1177
1178 if (warn_packed && DECL_PACKED (field))
1179 {
1180 if (known_align >= TYPE_ALIGN (type))
1181 {
1182 if (TYPE_ALIGN (type) > desired_align)
1183 {
1184 if (STRICT_ALIGNMENT)
1185 warning (OPT_Wattributes, "packed attribute causes "
1186 "inefficient alignment for %q+D", field);
1187 /* Don't warn if DECL_PACKED was set by the type. */
1188 else if (!TYPE_PACKED (rli->t))
1189 warning (OPT_Wattributes, "packed attribute is "
1190 "unnecessary for %q+D", field);
1191 }
1192 }
1193 else
1194 rli->packed_maybe_necessary = 1;
1195 }
1196
1197 /* Does this field automatically have alignment it needs by virtue
1198 of the fields that precede it and the record's own alignment?
1199 We already align ms_struct fields, so don't re-align them. */
1200 if (known_align < desired_align
1201 && !targetm.ms_bitfield_layout_p (rli->t))
1202 {
1203 /* No, we need to skip space before this field.
1204 Bump the cumulative size to multiple of field alignment. */
1205
1206 if (DECL_SOURCE_LOCATION (field) != BUILTINS_LOCATION)
1207 warning (OPT_Wpadded, "padding struct to align %q+D", field);
1208
1209 /* If the alignment is still within offset_align, just align
1210 the bit position. */
1211 if (desired_align < rli->offset_align)
1212 rli->bitpos = round_up (rli->bitpos, desired_align);
1213 else
1214 {
1215 /* First adjust OFFSET by the partial bits, then align. */
1216 rli->offset
1217 = size_binop (PLUS_EXPR, rli->offset,
1218 fold_convert (sizetype,
1219 size_binop (CEIL_DIV_EXPR, rli->bitpos,
1220 bitsize_unit_node)));
1221 rli->bitpos = bitsize_zero_node;
1222
1223 rli->offset = round_up (rli->offset, desired_align / BITS_PER_UNIT);
1224 }
1225
1226 if (! TREE_CONSTANT (rli->offset))
1227 rli->offset_align = desired_align;
1228
1229 }
1230
1231 /* Handle compatibility with PCC. Note that if the record has any
1232 variable-sized fields, we need not worry about compatibility. */
1233 #ifdef PCC_BITFIELD_TYPE_MATTERS
1234 if (PCC_BITFIELD_TYPE_MATTERS
1235 && ! targetm.ms_bitfield_layout_p (rli->t)
1236 && TREE_CODE (field) == FIELD_DECL
1237 && type != error_mark_node
1238 && DECL_BIT_FIELD (field)
1239 && (! DECL_PACKED (field)
1240 /* Enter for these packed fields only to issue a warning. */
1241 || TYPE_ALIGN (type) <= BITS_PER_UNIT)
1242 && maximum_field_alignment == 0
1243 && ! integer_zerop (DECL_SIZE (field))
1244 && host_integerp (DECL_SIZE (field), 1)
1245 && host_integerp (rli->offset, 1)
1246 && host_integerp (TYPE_SIZE (type), 1))
1247 {
1248 unsigned int type_align = TYPE_ALIGN (type);
1249 tree dsize = DECL_SIZE (field);
1250 HOST_WIDE_INT field_size = tree_low_cst (dsize, 1);
1251 HOST_WIDE_INT offset = tree_low_cst (rli->offset, 0);
1252 HOST_WIDE_INT bit_offset = tree_low_cst (rli->bitpos, 0);
1253
1254 #ifdef ADJUST_FIELD_ALIGN
1255 if (! TYPE_USER_ALIGN (type))
1256 type_align = ADJUST_FIELD_ALIGN (field, type_align);
1257 #endif
1258
1259 /* A bit field may not span more units of alignment of its type
1260 than its type itself. Advance to next boundary if necessary. */
1261 if (excess_unit_span (offset, bit_offset, field_size, type_align, type))
1262 {
1263 if (DECL_PACKED (field))
1264 {
1265 if (warn_packed_bitfield_compat == 1)
1266 inform
1267 (input_location,
1268 "offset of packed bit-field %qD has changed in GCC 4.4",
1269 field);
1270 }
1271 else
1272 rli->bitpos = round_up (rli->bitpos, type_align);
1273 }
1274
1275 if (! DECL_PACKED (field))
1276 TYPE_USER_ALIGN (rli->t) |= TYPE_USER_ALIGN (type);
1277 }
1278 #endif
1279
1280 #ifdef BITFIELD_NBYTES_LIMITED
1281 if (BITFIELD_NBYTES_LIMITED
1282 && ! targetm.ms_bitfield_layout_p (rli->t)
1283 && TREE_CODE (field) == FIELD_DECL
1284 && type != error_mark_node
1285 && DECL_BIT_FIELD_TYPE (field)
1286 && ! DECL_PACKED (field)
1287 && ! integer_zerop (DECL_SIZE (field))
1288 && host_integerp (DECL_SIZE (field), 1)
1289 && host_integerp (rli->offset, 1)
1290 && host_integerp (TYPE_SIZE (type), 1))
1291 {
1292 unsigned int type_align = TYPE_ALIGN (type);
1293 tree dsize = DECL_SIZE (field);
1294 HOST_WIDE_INT field_size = tree_low_cst (dsize, 1);
1295 HOST_WIDE_INT offset = tree_low_cst (rli->offset, 0);
1296 HOST_WIDE_INT bit_offset = tree_low_cst (rli->bitpos, 0);
1297
1298 #ifdef ADJUST_FIELD_ALIGN
1299 if (! TYPE_USER_ALIGN (type))
1300 type_align = ADJUST_FIELD_ALIGN (field, type_align);
1301 #endif
1302
1303 if (maximum_field_alignment != 0)
1304 type_align = MIN (type_align, maximum_field_alignment);
1305 /* ??? This test is opposite the test in the containing if
1306 statement, so this code is unreachable currently. */
1307 else if (DECL_PACKED (field))
1308 type_align = MIN (type_align, BITS_PER_UNIT);
1309
1310 /* A bit field may not span the unit of alignment of its type.
1311 Advance to next boundary if necessary. */
1312 if (excess_unit_span (offset, bit_offset, field_size, type_align, type))
1313 rli->bitpos = round_up (rli->bitpos, type_align);
1314
1315 TYPE_USER_ALIGN (rli->t) |= TYPE_USER_ALIGN (type);
1316 }
1317 #endif
1318
1319 /* See the docs for TARGET_MS_BITFIELD_LAYOUT_P for details.
1320 A subtlety:
1321 When a bit field is inserted into a packed record, the whole
1322 size of the underlying type is used by one or more same-size
1323 adjacent bitfields. (That is, if its long:3, 32 bits is
1324 used in the record, and any additional adjacent long bitfields are
1325 packed into the same chunk of 32 bits. However, if the size
1326 changes, a new field of that size is allocated.) In an unpacked
1327 record, this is the same as using alignment, but not equivalent
1328 when packing.
1329
1330 Note: for compatibility, we use the type size, not the type alignment
1331 to determine alignment, since that matches the documentation */
1332
1333 if (targetm.ms_bitfield_layout_p (rli->t))
1334 {
1335 tree prev_saved = rli->prev_field;
1336 tree prev_type = prev_saved ? DECL_BIT_FIELD_TYPE (prev_saved) : NULL;
1337
1338 /* This is a bitfield if it exists. */
1339 if (rli->prev_field)
1340 {
1341 /* If both are bitfields, nonzero, and the same size, this is
1342 the middle of a run. Zero declared size fields are special
1343 and handled as "end of run". (Note: it's nonzero declared
1344 size, but equal type sizes!) (Since we know that both
1345 the current and previous fields are bitfields by the
1346 time we check it, DECL_SIZE must be present for both.) */
1347 if (DECL_BIT_FIELD_TYPE (field)
1348 && !integer_zerop (DECL_SIZE (field))
1349 && !integer_zerop (DECL_SIZE (rli->prev_field))
1350 && host_integerp (DECL_SIZE (rli->prev_field), 0)
1351 && host_integerp (TYPE_SIZE (type), 0)
1352 && simple_cst_equal (TYPE_SIZE (type), TYPE_SIZE (prev_type)))
1353 {
1354 /* We're in the middle of a run of equal type size fields; make
1355 sure we realign if we run out of bits. (Not decl size,
1356 type size!) */
1357 HOST_WIDE_INT bitsize = tree_low_cst (DECL_SIZE (field), 1);
1358
1359 if (rli->remaining_in_alignment < bitsize)
1360 {
1361 HOST_WIDE_INT typesize = tree_low_cst (TYPE_SIZE (type), 1);
1362
1363 /* out of bits; bump up to next 'word'. */
1364 rli->bitpos
1365 = size_binop (PLUS_EXPR, rli->bitpos,
1366 bitsize_int (rli->remaining_in_alignment));
1367 rli->prev_field = field;
1368 if (typesize < bitsize)
1369 rli->remaining_in_alignment = 0;
1370 else
1371 rli->remaining_in_alignment = typesize - bitsize;
1372 }
1373 else
1374 rli->remaining_in_alignment -= bitsize;
1375 }
1376 else
1377 {
1378 /* End of a run: if leaving a run of bitfields of the same type
1379 size, we have to "use up" the rest of the bits of the type
1380 size.
1381
1382 Compute the new position as the sum of the size for the prior
1383 type and where we first started working on that type.
1384 Note: since the beginning of the field was aligned then
1385 of course the end will be too. No round needed. */
1386
1387 if (!integer_zerop (DECL_SIZE (rli->prev_field)))
1388 {
1389 rli->bitpos
1390 = size_binop (PLUS_EXPR, rli->bitpos,
1391 bitsize_int (rli->remaining_in_alignment));
1392 }
1393 else
1394 /* We "use up" size zero fields; the code below should behave
1395 as if the prior field was not a bitfield. */
1396 prev_saved = NULL;
1397
1398 /* Cause a new bitfield to be captured, either this time (if
1399 currently a bitfield) or next time we see one. */
1400 if (!DECL_BIT_FIELD_TYPE(field)
1401 || integer_zerop (DECL_SIZE (field)))
1402 rli->prev_field = NULL;
1403 }
1404
1405 normalize_rli (rli);
1406 }
1407
1408 /* If we're starting a new run of same size type bitfields
1409 (or a run of non-bitfields), set up the "first of the run"
1410 fields.
1411
1412 That is, if the current field is not a bitfield, or if there
1413 was a prior bitfield the type sizes differ, or if there wasn't
1414 a prior bitfield the size of the current field is nonzero.
1415
1416 Note: we must be sure to test ONLY the type size if there was
1417 a prior bitfield and ONLY for the current field being zero if
1418 there wasn't. */
1419
1420 if (!DECL_BIT_FIELD_TYPE (field)
1421 || (prev_saved != NULL
1422 ? !simple_cst_equal (TYPE_SIZE (type), TYPE_SIZE (prev_type))
1423 : !integer_zerop (DECL_SIZE (field)) ))
1424 {
1425 /* Never smaller than a byte for compatibility. */
1426 unsigned int type_align = BITS_PER_UNIT;
1427
1428 /* (When not a bitfield), we could be seeing a flex array (with
1429 no DECL_SIZE). Since we won't be using remaining_in_alignment
1430 until we see a bitfield (and come by here again) we just skip
1431 calculating it. */
1432 if (DECL_SIZE (field) != NULL
1433 && host_integerp (TYPE_SIZE (TREE_TYPE (field)), 1)
1434 && host_integerp (DECL_SIZE (field), 1))
1435 {
1436 unsigned HOST_WIDE_INT bitsize
1437 = tree_low_cst (DECL_SIZE (field), 1);
1438 unsigned HOST_WIDE_INT typesize
1439 = tree_low_cst (TYPE_SIZE (TREE_TYPE (field)), 1);
1440
1441 if (typesize < bitsize)
1442 rli->remaining_in_alignment = 0;
1443 else
1444 rli->remaining_in_alignment = typesize - bitsize;
1445 }
1446
1447 /* Now align (conventionally) for the new type. */
1448 type_align = TYPE_ALIGN (TREE_TYPE (field));
1449
1450 if (maximum_field_alignment != 0)
1451 type_align = MIN (type_align, maximum_field_alignment);
1452
1453 rli->bitpos = round_up (rli->bitpos, type_align);
1454
1455 /* If we really aligned, don't allow subsequent bitfields
1456 to undo that. */
1457 rli->prev_field = NULL;
1458 }
1459 }
1460
1461 /* Offset so far becomes the position of this field after normalizing. */
1462 normalize_rli (rli);
1463 DECL_FIELD_OFFSET (field) = rli->offset;
1464 DECL_FIELD_BIT_OFFSET (field) = rli->bitpos;
1465 SET_DECL_OFFSET_ALIGN (field, rli->offset_align);
1466
1467 /* If this field ended up more aligned than we thought it would be (we
1468 approximate this by seeing if its position changed), lay out the field
1469 again; perhaps we can use an integral mode for it now. */
1470 if (! integer_zerop (DECL_FIELD_BIT_OFFSET (field)))
1471 actual_align = (tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1)
1472 & - tree_low_cst (DECL_FIELD_BIT_OFFSET (field), 1));
1473 else if (integer_zerop (DECL_FIELD_OFFSET (field)))
1474 actual_align = MAX (BIGGEST_ALIGNMENT, rli->record_align);
1475 else if (host_integerp (DECL_FIELD_OFFSET (field), 1))
1476 actual_align = (BITS_PER_UNIT
1477 * (tree_low_cst (DECL_FIELD_OFFSET (field), 1)
1478 & - tree_low_cst (DECL_FIELD_OFFSET (field), 1)));
1479 else
1480 actual_align = DECL_OFFSET_ALIGN (field);
1481 /* ACTUAL_ALIGN is still the actual alignment *within the record* .
1482 store / extract bit field operations will check the alignment of the
1483 record against the mode of bit fields. */
1484
1485 if (known_align != actual_align)
1486 layout_decl (field, actual_align);
1487
1488 if (rli->prev_field == NULL && DECL_BIT_FIELD_TYPE (field))
1489 rli->prev_field = field;
1490
1491 /* Now add size of this field to the size of the record. If the size is
1492 not constant, treat the field as being a multiple of bytes and just
1493 adjust the offset, resetting the bit position. Otherwise, apportion the
1494 size amongst the bit position and offset. First handle the case of an
1495 unspecified size, which can happen when we have an invalid nested struct
1496 definition, such as struct j { struct j { int i; } }. The error message
1497 is printed in finish_struct. */
1498 if (DECL_SIZE (field) == 0)
1499 /* Do nothing. */;
1500 else if (TREE_CODE (DECL_SIZE (field)) != INTEGER_CST
1501 || TREE_OVERFLOW (DECL_SIZE (field)))
1502 {
1503 rli->offset
1504 = size_binop (PLUS_EXPR, rli->offset,
1505 fold_convert (sizetype,
1506 size_binop (CEIL_DIV_EXPR, rli->bitpos,
1507 bitsize_unit_node)));
1508 rli->offset
1509 = size_binop (PLUS_EXPR, rli->offset, DECL_SIZE_UNIT (field));
1510 rli->bitpos = bitsize_zero_node;
1511 rli->offset_align = MIN (rli->offset_align, desired_align);
1512 }
1513 else if (targetm.ms_bitfield_layout_p (rli->t))
1514 {
1515 rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos, DECL_SIZE (field));
1516
1517 /* If we ended a bitfield before the full length of the type then
1518 pad the struct out to the full length of the last type. */
1519 if ((DECL_CHAIN (field) == NULL
1520 || TREE_CODE (DECL_CHAIN (field)) != FIELD_DECL)
1521 && DECL_BIT_FIELD_TYPE (field)
1522 && !integer_zerop (DECL_SIZE (field)))
1523 rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos,
1524 bitsize_int (rli->remaining_in_alignment));
1525
1526 normalize_rli (rli);
1527 }
1528 else
1529 {
1530 rli->bitpos = size_binop (PLUS_EXPR, rli->bitpos, DECL_SIZE (field));
1531 normalize_rli (rli);
1532 }
1533 }
1534
1535 /* Assuming that all the fields have been laid out, this function uses
1536 RLI to compute the final TYPE_SIZE, TYPE_ALIGN, etc. for the type
1537 indicated by RLI. */
1538
1539 static void
1540 finalize_record_size (record_layout_info rli)
1541 {
1542 tree unpadded_size, unpadded_size_unit;
1543
1544 /* Now we want just byte and bit offsets, so set the offset alignment
1545 to be a byte and then normalize. */
1546 rli->offset_align = BITS_PER_UNIT;
1547 normalize_rli (rli);
1548
1549 /* Determine the desired alignment. */
1550 #ifdef ROUND_TYPE_ALIGN
1551 TYPE_ALIGN (rli->t) = ROUND_TYPE_ALIGN (rli->t, TYPE_ALIGN (rli->t),
1552 rli->record_align);
1553 #else
1554 TYPE_ALIGN (rli->t) = MAX (TYPE_ALIGN (rli->t), rli->record_align);
1555 #endif
1556
1557 /* Compute the size so far. Be sure to allow for extra bits in the
1558 size in bytes. We have guaranteed above that it will be no more
1559 than a single byte. */
1560 unpadded_size = rli_size_so_far (rli);
1561 unpadded_size_unit = rli_size_unit_so_far (rli);
1562 if (! integer_zerop (rli->bitpos))
1563 unpadded_size_unit
1564 = size_binop (PLUS_EXPR, unpadded_size_unit, size_one_node);
1565
1566 /* Round the size up to be a multiple of the required alignment. */
1567 TYPE_SIZE (rli->t) = round_up (unpadded_size, TYPE_ALIGN (rli->t));
1568 TYPE_SIZE_UNIT (rli->t)
1569 = round_up (unpadded_size_unit, TYPE_ALIGN_UNIT (rli->t));
1570
1571 if (TREE_CONSTANT (unpadded_size)
1572 && simple_cst_equal (unpadded_size, TYPE_SIZE (rli->t)) == 0
1573 && input_location != BUILTINS_LOCATION)
1574 warning (OPT_Wpadded, "padding struct size to alignment boundary");
1575
1576 if (warn_packed && TREE_CODE (rli->t) == RECORD_TYPE
1577 && TYPE_PACKED (rli->t) && ! rli->packed_maybe_necessary
1578 && TREE_CONSTANT (unpadded_size))
1579 {
1580 tree unpacked_size;
1581
1582 #ifdef ROUND_TYPE_ALIGN
1583 rli->unpacked_align
1584 = ROUND_TYPE_ALIGN (rli->t, TYPE_ALIGN (rli->t), rli->unpacked_align);
1585 #else
1586 rli->unpacked_align = MAX (TYPE_ALIGN (rli->t), rli->unpacked_align);
1587 #endif
1588
1589 unpacked_size = round_up (TYPE_SIZE (rli->t), rli->unpacked_align);
1590 if (simple_cst_equal (unpacked_size, TYPE_SIZE (rli->t)))
1591 {
1592 if (TYPE_NAME (rli->t))
1593 {
1594 tree name;
1595
1596 if (TREE_CODE (TYPE_NAME (rli->t)) == IDENTIFIER_NODE)
1597 name = TYPE_NAME (rli->t);
1598 else
1599 name = DECL_NAME (TYPE_NAME (rli->t));
1600
1601 if (STRICT_ALIGNMENT)
1602 warning (OPT_Wpacked, "packed attribute causes inefficient "
1603 "alignment for %qE", name);
1604 else
1605 warning (OPT_Wpacked,
1606 "packed attribute is unnecessary for %qE", name);
1607 }
1608 else
1609 {
1610 if (STRICT_ALIGNMENT)
1611 warning (OPT_Wpacked,
1612 "packed attribute causes inefficient alignment");
1613 else
1614 warning (OPT_Wpacked, "packed attribute is unnecessary");
1615 }
1616 }
1617 }
1618 }
1619
1620 /* Compute the TYPE_MODE for the TYPE (which is a RECORD_TYPE). */
1621
1622 void
1623 compute_record_mode (tree type)
1624 {
1625 tree field;
1626 enum machine_mode mode = VOIDmode;
1627
1628 /* Most RECORD_TYPEs have BLKmode, so we start off assuming that.
1629 However, if possible, we use a mode that fits in a register
1630 instead, in order to allow for better optimization down the
1631 line. */
1632 SET_TYPE_MODE (type, BLKmode);
1633
1634 if (! host_integerp (TYPE_SIZE (type), 1))
1635 return;
1636
1637 /* A record which has any BLKmode members must itself be
1638 BLKmode; it can't go in a register. Unless the member is
1639 BLKmode only because it isn't aligned. */
1640 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
1641 {
1642 if (TREE_CODE (field) != FIELD_DECL)
1643 continue;
1644
1645 if (TREE_CODE (TREE_TYPE (field)) == ERROR_MARK
1646 || (TYPE_MODE (TREE_TYPE (field)) == BLKmode
1647 && ! TYPE_NO_FORCE_BLK (TREE_TYPE (field))
1648 && !(TYPE_SIZE (TREE_TYPE (field)) != 0
1649 && integer_zerop (TYPE_SIZE (TREE_TYPE (field)))))
1650 || ! host_integerp (bit_position (field), 1)
1651 || DECL_SIZE (field) == 0
1652 || ! host_integerp (DECL_SIZE (field), 1))
1653 return;
1654
1655 /* If this field is the whole struct, remember its mode so
1656 that, say, we can put a double in a class into a DF
1657 register instead of forcing it to live in the stack. */
1658 if (simple_cst_equal (TYPE_SIZE (type), DECL_SIZE (field)))
1659 mode = DECL_MODE (field);
1660
1661 #ifdef MEMBER_TYPE_FORCES_BLK
1662 /* With some targets, eg. c4x, it is sub-optimal
1663 to access an aligned BLKmode structure as a scalar. */
1664
1665 if (MEMBER_TYPE_FORCES_BLK (field, mode))
1666 return;
1667 #endif /* MEMBER_TYPE_FORCES_BLK */
1668 }
1669
1670 /* If we only have one real field; use its mode if that mode's size
1671 matches the type's size. This only applies to RECORD_TYPE. This
1672 does not apply to unions. */
1673 if (TREE_CODE (type) == RECORD_TYPE && mode != VOIDmode
1674 && host_integerp (TYPE_SIZE (type), 1)
1675 && GET_MODE_BITSIZE (mode) == TREE_INT_CST_LOW (TYPE_SIZE (type)))
1676 SET_TYPE_MODE (type, mode);
1677 else
1678 SET_TYPE_MODE (type, mode_for_size_tree (TYPE_SIZE (type), MODE_INT, 1));
1679
1680 /* If structure's known alignment is less than what the scalar
1681 mode would need, and it matters, then stick with BLKmode. */
1682 if (TYPE_MODE (type) != BLKmode
1683 && STRICT_ALIGNMENT
1684 && ! (TYPE_ALIGN (type) >= BIGGEST_ALIGNMENT
1685 || TYPE_ALIGN (type) >= GET_MODE_ALIGNMENT (TYPE_MODE (type))))
1686 {
1687 /* If this is the only reason this type is BLKmode, then
1688 don't force containing types to be BLKmode. */
1689 TYPE_NO_FORCE_BLK (type) = 1;
1690 SET_TYPE_MODE (type, BLKmode);
1691 }
1692 }
1693
1694 /* Compute TYPE_SIZE and TYPE_ALIGN for TYPE, once it has been laid
1695 out. */
1696
1697 static void
1698 finalize_type_size (tree type)
1699 {
1700 /* Normally, use the alignment corresponding to the mode chosen.
1701 However, where strict alignment is not required, avoid
1702 over-aligning structures, since most compilers do not do this
1703 alignment. */
1704
1705 if (TYPE_MODE (type) != BLKmode && TYPE_MODE (type) != VOIDmode
1706 && (STRICT_ALIGNMENT
1707 || (TREE_CODE (type) != RECORD_TYPE && TREE_CODE (type) != UNION_TYPE
1708 && TREE_CODE (type) != QUAL_UNION_TYPE
1709 && TREE_CODE (type) != ARRAY_TYPE)))
1710 {
1711 unsigned mode_align = GET_MODE_ALIGNMENT (TYPE_MODE (type));
1712
1713 /* Don't override a larger alignment requirement coming from a user
1714 alignment of one of the fields. */
1715 if (mode_align >= TYPE_ALIGN (type))
1716 {
1717 TYPE_ALIGN (type) = mode_align;
1718 TYPE_USER_ALIGN (type) = 0;
1719 }
1720 }
1721
1722 /* Do machine-dependent extra alignment. */
1723 #ifdef ROUND_TYPE_ALIGN
1724 TYPE_ALIGN (type)
1725 = ROUND_TYPE_ALIGN (type, TYPE_ALIGN (type), BITS_PER_UNIT);
1726 #endif
1727
1728 /* If we failed to find a simple way to calculate the unit size
1729 of the type, find it by division. */
1730 if (TYPE_SIZE_UNIT (type) == 0 && TYPE_SIZE (type) != 0)
1731 /* TYPE_SIZE (type) is computed in bitsizetype. After the division, the
1732 result will fit in sizetype. We will get more efficient code using
1733 sizetype, so we force a conversion. */
1734 TYPE_SIZE_UNIT (type)
1735 = fold_convert (sizetype,
1736 size_binop (FLOOR_DIV_EXPR, TYPE_SIZE (type),
1737 bitsize_unit_node));
1738
1739 if (TYPE_SIZE (type) != 0)
1740 {
1741 TYPE_SIZE (type) = round_up (TYPE_SIZE (type), TYPE_ALIGN (type));
1742 TYPE_SIZE_UNIT (type)
1743 = round_up (TYPE_SIZE_UNIT (type), TYPE_ALIGN_UNIT (type));
1744 }
1745
1746 /* Evaluate nonconstant sizes only once, either now or as soon as safe. */
1747 if (TYPE_SIZE (type) != 0 && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
1748 TYPE_SIZE (type) = variable_size (TYPE_SIZE (type));
1749 if (TYPE_SIZE_UNIT (type) != 0
1750 && TREE_CODE (TYPE_SIZE_UNIT (type)) != INTEGER_CST)
1751 TYPE_SIZE_UNIT (type) = variable_size (TYPE_SIZE_UNIT (type));
1752
1753 /* Also layout any other variants of the type. */
1754 if (TYPE_NEXT_VARIANT (type)
1755 || type != TYPE_MAIN_VARIANT (type))
1756 {
1757 tree variant;
1758 /* Record layout info of this variant. */
1759 tree size = TYPE_SIZE (type);
1760 tree size_unit = TYPE_SIZE_UNIT (type);
1761 unsigned int align = TYPE_ALIGN (type);
1762 unsigned int user_align = TYPE_USER_ALIGN (type);
1763 enum machine_mode mode = TYPE_MODE (type);
1764
1765 /* Copy it into all variants. */
1766 for (variant = TYPE_MAIN_VARIANT (type);
1767 variant != 0;
1768 variant = TYPE_NEXT_VARIANT (variant))
1769 {
1770 TYPE_SIZE (variant) = size;
1771 TYPE_SIZE_UNIT (variant) = size_unit;
1772 TYPE_ALIGN (variant) = align;
1773 TYPE_USER_ALIGN (variant) = user_align;
1774 SET_TYPE_MODE (variant, mode);
1775 }
1776 }
1777 }
1778
1779 /* Do all of the work required to layout the type indicated by RLI,
1780 once the fields have been laid out. This function will call `free'
1781 for RLI, unless FREE_P is false. Passing a value other than false
1782 for FREE_P is bad practice; this option only exists to support the
1783 G++ 3.2 ABI. */
1784
1785 void
1786 finish_record_layout (record_layout_info rli, int free_p)
1787 {
1788 tree variant;
1789
1790 /* Compute the final size. */
1791 finalize_record_size (rli);
1792
1793 /* Compute the TYPE_MODE for the record. */
1794 compute_record_mode (rli->t);
1795
1796 /* Perform any last tweaks to the TYPE_SIZE, etc. */
1797 finalize_type_size (rli->t);
1798
1799 /* Propagate TYPE_PACKED to variants. With C++ templates,
1800 handle_packed_attribute is too early to do this. */
1801 for (variant = TYPE_NEXT_VARIANT (rli->t); variant;
1802 variant = TYPE_NEXT_VARIANT (variant))
1803 TYPE_PACKED (variant) = TYPE_PACKED (rli->t);
1804
1805 /* Lay out any static members. This is done now because their type
1806 may use the record's type. */
1807 while (!VEC_empty (tree, rli->pending_statics))
1808 layout_decl (VEC_pop (tree, rli->pending_statics), 0);
1809
1810 /* Clean up. */
1811 if (free_p)
1812 {
1813 VEC_free (tree, gc, rli->pending_statics);
1814 free (rli);
1815 }
1816 }
1817 \f
1818
1819 /* Finish processing a builtin RECORD_TYPE type TYPE. It's name is
1820 NAME, its fields are chained in reverse on FIELDS.
1821
1822 If ALIGN_TYPE is non-null, it is given the same alignment as
1823 ALIGN_TYPE. */
1824
1825 void
1826 finish_builtin_struct (tree type, const char *name, tree fields,
1827 tree align_type)
1828 {
1829 tree tail, next;
1830
1831 for (tail = NULL_TREE; fields; tail = fields, fields = next)
1832 {
1833 DECL_FIELD_CONTEXT (fields) = type;
1834 next = DECL_CHAIN (fields);
1835 DECL_CHAIN (fields) = tail;
1836 }
1837 TYPE_FIELDS (type) = tail;
1838
1839 if (align_type)
1840 {
1841 TYPE_ALIGN (type) = TYPE_ALIGN (align_type);
1842 TYPE_USER_ALIGN (type) = TYPE_USER_ALIGN (align_type);
1843 }
1844
1845 layout_type (type);
1846 #if 0 /* not yet, should get fixed properly later */
1847 TYPE_NAME (type) = make_type_decl (get_identifier (name), type);
1848 #else
1849 TYPE_NAME (type) = build_decl (BUILTINS_LOCATION,
1850 TYPE_DECL, get_identifier (name), type);
1851 #endif
1852 TYPE_STUB_DECL (type) = TYPE_NAME (type);
1853 layout_decl (TYPE_NAME (type), 0);
1854 }
1855
1856 /* Calculate the mode, size, and alignment for TYPE.
1857 For an array type, calculate the element separation as well.
1858 Record TYPE on the chain of permanent or temporary types
1859 so that dbxout will find out about it.
1860
1861 TYPE_SIZE of a type is nonzero if the type has been laid out already.
1862 layout_type does nothing on such a type.
1863
1864 If the type is incomplete, its TYPE_SIZE remains zero. */
1865
1866 void
1867 layout_type (tree type)
1868 {
1869 gcc_assert (type);
1870
1871 if (type == error_mark_node)
1872 return;
1873
1874 /* Do nothing if type has been laid out before. */
1875 if (TYPE_SIZE (type))
1876 return;
1877
1878 switch (TREE_CODE (type))
1879 {
1880 case LANG_TYPE:
1881 /* This kind of type is the responsibility
1882 of the language-specific code. */
1883 gcc_unreachable ();
1884
1885 case BOOLEAN_TYPE: /* Used for Java, Pascal, and Chill. */
1886 if (TYPE_PRECISION (type) == 0)
1887 TYPE_PRECISION (type) = 1; /* default to one byte/boolean. */
1888
1889 /* ... fall through ... */
1890
1891 case INTEGER_TYPE:
1892 case ENUMERAL_TYPE:
1893 if (TREE_CODE (TYPE_MIN_VALUE (type)) == INTEGER_CST
1894 && tree_int_cst_sgn (TYPE_MIN_VALUE (type)) >= 0)
1895 TYPE_UNSIGNED (type) = 1;
1896
1897 SET_TYPE_MODE (type,
1898 smallest_mode_for_size (TYPE_PRECISION (type), MODE_INT));
1899 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
1900 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
1901 break;
1902
1903 case REAL_TYPE:
1904 SET_TYPE_MODE (type,
1905 mode_for_size (TYPE_PRECISION (type), MODE_FLOAT, 0));
1906 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
1907 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
1908 break;
1909
1910 case FIXED_POINT_TYPE:
1911 /* TYPE_MODE (type) has been set already. */
1912 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
1913 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
1914 break;
1915
1916 case COMPLEX_TYPE:
1917 TYPE_UNSIGNED (type) = TYPE_UNSIGNED (TREE_TYPE (type));
1918 SET_TYPE_MODE (type,
1919 mode_for_size (2 * TYPE_PRECISION (TREE_TYPE (type)),
1920 (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE
1921 ? MODE_COMPLEX_FLOAT : MODE_COMPLEX_INT),
1922 0));
1923 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (TYPE_MODE (type)));
1924 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (TYPE_MODE (type)));
1925 break;
1926
1927 case VECTOR_TYPE:
1928 {
1929 int nunits = TYPE_VECTOR_SUBPARTS (type);
1930 tree innertype = TREE_TYPE (type);
1931
1932 gcc_assert (!(nunits & (nunits - 1)));
1933
1934 /* Find an appropriate mode for the vector type. */
1935 if (TYPE_MODE (type) == VOIDmode)
1936 SET_TYPE_MODE (type,
1937 mode_for_vector (TYPE_MODE (innertype), nunits));
1938
1939 TYPE_SATURATING (type) = TYPE_SATURATING (TREE_TYPE (type));
1940 TYPE_UNSIGNED (type) = TYPE_UNSIGNED (TREE_TYPE (type));
1941 TYPE_SIZE_UNIT (type) = int_const_binop (MULT_EXPR,
1942 TYPE_SIZE_UNIT (innertype),
1943 size_int (nunits));
1944 TYPE_SIZE (type) = int_const_binop (MULT_EXPR, TYPE_SIZE (innertype),
1945 bitsize_int (nunits));
1946
1947 /* Always naturally align vectors. This prevents ABI changes
1948 depending on whether or not native vector modes are supported. */
1949 TYPE_ALIGN (type) = tree_low_cst (TYPE_SIZE (type), 0);
1950 break;
1951 }
1952
1953 case VOID_TYPE:
1954 /* This is an incomplete type and so doesn't have a size. */
1955 TYPE_ALIGN (type) = 1;
1956 TYPE_USER_ALIGN (type) = 0;
1957 SET_TYPE_MODE (type, VOIDmode);
1958 break;
1959
1960 case OFFSET_TYPE:
1961 TYPE_SIZE (type) = bitsize_int (POINTER_SIZE);
1962 TYPE_SIZE_UNIT (type) = size_int (POINTER_SIZE / BITS_PER_UNIT);
1963 /* A pointer might be MODE_PARTIAL_INT,
1964 but ptrdiff_t must be integral. */
1965 SET_TYPE_MODE (type, mode_for_size (POINTER_SIZE, MODE_INT, 0));
1966 TYPE_PRECISION (type) = POINTER_SIZE;
1967 break;
1968
1969 case FUNCTION_TYPE:
1970 case METHOD_TYPE:
1971 /* It's hard to see what the mode and size of a function ought to
1972 be, but we do know the alignment is FUNCTION_BOUNDARY, so
1973 make it consistent with that. */
1974 SET_TYPE_MODE (type, mode_for_size (FUNCTION_BOUNDARY, MODE_INT, 0));
1975 TYPE_SIZE (type) = bitsize_int (FUNCTION_BOUNDARY);
1976 TYPE_SIZE_UNIT (type) = size_int (FUNCTION_BOUNDARY / BITS_PER_UNIT);
1977 break;
1978
1979 case POINTER_TYPE:
1980 case REFERENCE_TYPE:
1981 {
1982 enum machine_mode mode = TYPE_MODE (type);
1983 if (TREE_CODE (type) == REFERENCE_TYPE && reference_types_internal)
1984 {
1985 addr_space_t as = TYPE_ADDR_SPACE (TREE_TYPE (type));
1986 mode = targetm.addr_space.address_mode (as);
1987 }
1988
1989 TYPE_SIZE (type) = bitsize_int (GET_MODE_BITSIZE (mode));
1990 TYPE_SIZE_UNIT (type) = size_int (GET_MODE_SIZE (mode));
1991 TYPE_UNSIGNED (type) = 1;
1992 TYPE_PRECISION (type) = GET_MODE_BITSIZE (mode);
1993 }
1994 break;
1995
1996 case ARRAY_TYPE:
1997 {
1998 tree index = TYPE_DOMAIN (type);
1999 tree element = TREE_TYPE (type);
2000
2001 build_pointer_type (element);
2002
2003 /* We need to know both bounds in order to compute the size. */
2004 if (index && TYPE_MAX_VALUE (index) && TYPE_MIN_VALUE (index)
2005 && TYPE_SIZE (element))
2006 {
2007 tree ub = TYPE_MAX_VALUE (index);
2008 tree lb = TYPE_MIN_VALUE (index);
2009 tree element_size = TYPE_SIZE (element);
2010 tree length;
2011
2012 /* Make sure that an array of zero-sized element is zero-sized
2013 regardless of its extent. */
2014 if (integer_zerop (element_size))
2015 length = size_zero_node;
2016
2017 /* The computation should happen in the original type so
2018 that (possible) negative values are handled appropriately. */
2019 else
2020 length
2021 = fold_convert (sizetype,
2022 fold_build2 (PLUS_EXPR, TREE_TYPE (lb),
2023 build_int_cst (TREE_TYPE (lb), 1),
2024 fold_build2 (MINUS_EXPR,
2025 TREE_TYPE (lb),
2026 ub, lb)));
2027
2028 TYPE_SIZE (type) = size_binop (MULT_EXPR, element_size,
2029 fold_convert (bitsizetype,
2030 length));
2031
2032 /* If we know the size of the element, calculate the total size
2033 directly, rather than do some division thing below. This
2034 optimization helps Fortran assumed-size arrays (where the
2035 size of the array is determined at runtime) substantially. */
2036 if (TYPE_SIZE_UNIT (element))
2037 TYPE_SIZE_UNIT (type)
2038 = size_binop (MULT_EXPR, TYPE_SIZE_UNIT (element), length);
2039 }
2040
2041 /* Now round the alignment and size,
2042 using machine-dependent criteria if any. */
2043
2044 #ifdef ROUND_TYPE_ALIGN
2045 TYPE_ALIGN (type)
2046 = ROUND_TYPE_ALIGN (type, TYPE_ALIGN (element), BITS_PER_UNIT);
2047 #else
2048 TYPE_ALIGN (type) = MAX (TYPE_ALIGN (element), BITS_PER_UNIT);
2049 #endif
2050 TYPE_USER_ALIGN (type) = TYPE_USER_ALIGN (element);
2051 SET_TYPE_MODE (type, BLKmode);
2052 if (TYPE_SIZE (type) != 0
2053 #ifdef MEMBER_TYPE_FORCES_BLK
2054 && ! MEMBER_TYPE_FORCES_BLK (type, VOIDmode)
2055 #endif
2056 /* BLKmode elements force BLKmode aggregate;
2057 else extract/store fields may lose. */
2058 && (TYPE_MODE (TREE_TYPE (type)) != BLKmode
2059 || TYPE_NO_FORCE_BLK (TREE_TYPE (type))))
2060 {
2061 SET_TYPE_MODE (type, mode_for_array (TREE_TYPE (type),
2062 TYPE_SIZE (type)));
2063 if (TYPE_MODE (type) != BLKmode
2064 && STRICT_ALIGNMENT && TYPE_ALIGN (type) < BIGGEST_ALIGNMENT
2065 && TYPE_ALIGN (type) < GET_MODE_ALIGNMENT (TYPE_MODE (type)))
2066 {
2067 TYPE_NO_FORCE_BLK (type) = 1;
2068 SET_TYPE_MODE (type, BLKmode);
2069 }
2070 }
2071 /* When the element size is constant, check that it is at least as
2072 large as the element alignment. */
2073 if (TYPE_SIZE_UNIT (element)
2074 && TREE_CODE (TYPE_SIZE_UNIT (element)) == INTEGER_CST
2075 /* If TYPE_SIZE_UNIT overflowed, then it is certainly larger than
2076 TYPE_ALIGN_UNIT. */
2077 && !TREE_OVERFLOW (TYPE_SIZE_UNIT (element))
2078 && !integer_zerop (TYPE_SIZE_UNIT (element))
2079 && compare_tree_int (TYPE_SIZE_UNIT (element),
2080 TYPE_ALIGN_UNIT (element)) < 0)
2081 error ("alignment of array elements is greater than element size");
2082 break;
2083 }
2084
2085 case RECORD_TYPE:
2086 case UNION_TYPE:
2087 case QUAL_UNION_TYPE:
2088 {
2089 tree field;
2090 record_layout_info rli;
2091
2092 /* Initialize the layout information. */
2093 rli = start_record_layout (type);
2094
2095 /* If this is a QUAL_UNION_TYPE, we want to process the fields
2096 in the reverse order in building the COND_EXPR that denotes
2097 its size. We reverse them again later. */
2098 if (TREE_CODE (type) == QUAL_UNION_TYPE)
2099 TYPE_FIELDS (type) = nreverse (TYPE_FIELDS (type));
2100
2101 /* Place all the fields. */
2102 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
2103 place_field (rli, field);
2104
2105 if (TREE_CODE (type) == QUAL_UNION_TYPE)
2106 TYPE_FIELDS (type) = nreverse (TYPE_FIELDS (type));
2107
2108 /* Finish laying out the record. */
2109 finish_record_layout (rli, /*free_p=*/true);
2110 }
2111 break;
2112
2113 default:
2114 gcc_unreachable ();
2115 }
2116
2117 /* Compute the final TYPE_SIZE, TYPE_ALIGN, etc. for TYPE. For
2118 records and unions, finish_record_layout already called this
2119 function. */
2120 if (TREE_CODE (type) != RECORD_TYPE
2121 && TREE_CODE (type) != UNION_TYPE
2122 && TREE_CODE (type) != QUAL_UNION_TYPE)
2123 finalize_type_size (type);
2124
2125 /* We should never see alias sets on incomplete aggregates. And we
2126 should not call layout_type on not incomplete aggregates. */
2127 if (AGGREGATE_TYPE_P (type))
2128 gcc_assert (!TYPE_ALIAS_SET_KNOWN_P (type));
2129 }
2130
2131 /* Vector types need to re-check the target flags each time we report
2132 the machine mode. We need to do this because attribute target can
2133 change the result of vector_mode_supported_p and have_regs_of_mode
2134 on a per-function basis. Thus the TYPE_MODE of a VECTOR_TYPE can
2135 change on a per-function basis. */
2136 /* ??? Possibly a better solution is to run through all the types
2137 referenced by a function and re-compute the TYPE_MODE once, rather
2138 than make the TYPE_MODE macro call a function. */
2139
2140 enum machine_mode
2141 vector_type_mode (const_tree t)
2142 {
2143 enum machine_mode mode;
2144
2145 gcc_assert (TREE_CODE (t) == VECTOR_TYPE);
2146
2147 mode = t->type.mode;
2148 if (VECTOR_MODE_P (mode)
2149 && (!targetm.vector_mode_supported_p (mode)
2150 || !have_regs_of_mode[mode]))
2151 {
2152 enum machine_mode innermode = TREE_TYPE (t)->type.mode;
2153
2154 /* For integers, try mapping it to a same-sized scalar mode. */
2155 if (GET_MODE_CLASS (innermode) == MODE_INT)
2156 {
2157 mode = mode_for_size (TYPE_VECTOR_SUBPARTS (t)
2158 * GET_MODE_BITSIZE (innermode), MODE_INT, 0);
2159
2160 if (mode != VOIDmode && have_regs_of_mode[mode])
2161 return mode;
2162 }
2163
2164 return BLKmode;
2165 }
2166
2167 return mode;
2168 }
2169 \f
2170 /* Create and return a type for signed integers of PRECISION bits. */
2171
2172 tree
2173 make_signed_type (int precision)
2174 {
2175 tree type = make_node (INTEGER_TYPE);
2176
2177 TYPE_PRECISION (type) = precision;
2178
2179 fixup_signed_type (type);
2180 return type;
2181 }
2182
2183 /* Create and return a type for unsigned integers of PRECISION bits. */
2184
2185 tree
2186 make_unsigned_type (int precision)
2187 {
2188 tree type = make_node (INTEGER_TYPE);
2189
2190 TYPE_PRECISION (type) = precision;
2191
2192 fixup_unsigned_type (type);
2193 return type;
2194 }
2195 \f
2196 /* Create and return a type for fract of PRECISION bits, UNSIGNEDP,
2197 and SATP. */
2198
2199 tree
2200 make_fract_type (int precision, int unsignedp, int satp)
2201 {
2202 tree type = make_node (FIXED_POINT_TYPE);
2203
2204 TYPE_PRECISION (type) = precision;
2205
2206 if (satp)
2207 TYPE_SATURATING (type) = 1;
2208
2209 /* Lay out the type: set its alignment, size, etc. */
2210 if (unsignedp)
2211 {
2212 TYPE_UNSIGNED (type) = 1;
2213 SET_TYPE_MODE (type, mode_for_size (precision, MODE_UFRACT, 0));
2214 }
2215 else
2216 SET_TYPE_MODE (type, mode_for_size (precision, MODE_FRACT, 0));
2217 layout_type (type);
2218
2219 return type;
2220 }
2221
2222 /* Create and return a type for accum of PRECISION bits, UNSIGNEDP,
2223 and SATP. */
2224
2225 tree
2226 make_accum_type (int precision, int unsignedp, int satp)
2227 {
2228 tree type = make_node (FIXED_POINT_TYPE);
2229
2230 TYPE_PRECISION (type) = precision;
2231
2232 if (satp)
2233 TYPE_SATURATING (type) = 1;
2234
2235 /* Lay out the type: set its alignment, size, etc. */
2236 if (unsignedp)
2237 {
2238 TYPE_UNSIGNED (type) = 1;
2239 SET_TYPE_MODE (type, mode_for_size (precision, MODE_UACCUM, 0));
2240 }
2241 else
2242 SET_TYPE_MODE (type, mode_for_size (precision, MODE_ACCUM, 0));
2243 layout_type (type);
2244
2245 return type;
2246 }
2247
2248 /* Initialize sizetype and bitsizetype to a reasonable and temporary
2249 value to enable integer types to be created. */
2250
2251 void
2252 initialize_sizetypes (void)
2253 {
2254 tree t = make_node (INTEGER_TYPE);
2255 int precision = GET_MODE_BITSIZE (SImode);
2256
2257 SET_TYPE_MODE (t, SImode);
2258 TYPE_ALIGN (t) = GET_MODE_ALIGNMENT (SImode);
2259 TYPE_IS_SIZETYPE (t) = 1;
2260 TYPE_UNSIGNED (t) = 1;
2261 TYPE_SIZE (t) = build_int_cst (t, precision);
2262 TYPE_SIZE_UNIT (t) = build_int_cst (t, GET_MODE_SIZE (SImode));
2263 TYPE_PRECISION (t) = precision;
2264
2265 set_min_and_max_values_for_integral_type (t, precision,
2266 /*is_unsigned=*/true);
2267
2268 sizetype = t;
2269 bitsizetype = build_distinct_type_copy (t);
2270 }
2271
2272 /* Make sizetype a version of TYPE, and initialize *sizetype accordingly.
2273 We do this by overwriting the stub sizetype and bitsizetype nodes created
2274 by initialize_sizetypes. This makes sure that (a) anything stubby about
2275 them no longer exists and (b) any INTEGER_CSTs created with such a type,
2276 remain valid. */
2277
2278 void
2279 set_sizetype (tree type)
2280 {
2281 tree t, max;
2282 int oprecision = TYPE_PRECISION (type);
2283 /* The *bitsizetype types use a precision that avoids overflows when
2284 calculating signed sizes / offsets in bits. However, when
2285 cross-compiling from a 32 bit to a 64 bit host, we are limited to 64 bit
2286 precision. */
2287 int precision
2288 = MIN (oprecision + BITS_PER_UNIT_LOG + 1, MAX_FIXED_MODE_SIZE);
2289 precision
2290 = GET_MODE_PRECISION (smallest_mode_for_size (precision, MODE_INT));
2291 if (precision > HOST_BITS_PER_WIDE_INT * 2)
2292 precision = HOST_BITS_PER_WIDE_INT * 2;
2293
2294 /* sizetype must be an unsigned type. */
2295 gcc_assert (TYPE_UNSIGNED (type));
2296
2297 t = build_distinct_type_copy (type);
2298 /* We want to use sizetype's cache, as we will be replacing that type. */
2299 TYPE_CACHED_VALUES (t) = TYPE_CACHED_VALUES (sizetype);
2300 TYPE_CACHED_VALUES_P (t) = TYPE_CACHED_VALUES_P (sizetype);
2301 TYPE_UID (t) = TYPE_UID (sizetype);
2302 TYPE_IS_SIZETYPE (t) = 1;
2303
2304 /* Replace our original stub sizetype. */
2305 memcpy (sizetype, t, tree_size (sizetype));
2306 TYPE_MAIN_VARIANT (sizetype) = sizetype;
2307 TYPE_CANONICAL (sizetype) = sizetype;
2308
2309 /* sizetype is unsigned but we need to fix TYPE_MAX_VALUE so that it is
2310 sign-extended in a way consistent with force_fit_type. */
2311 max = TYPE_MAX_VALUE (sizetype);
2312 TYPE_MAX_VALUE (sizetype)
2313 = double_int_to_tree (sizetype, tree_to_double_int (max));
2314
2315 t = make_node (INTEGER_TYPE);
2316 TYPE_NAME (t) = get_identifier ("bit_size_type");
2317 /* We want to use bitsizetype's cache, as we will be replacing that type. */
2318 TYPE_CACHED_VALUES (t) = TYPE_CACHED_VALUES (bitsizetype);
2319 TYPE_CACHED_VALUES_P (t) = TYPE_CACHED_VALUES_P (bitsizetype);
2320 TYPE_PRECISION (t) = precision;
2321 TYPE_UID (t) = TYPE_UID (bitsizetype);
2322 TYPE_IS_SIZETYPE (t) = 1;
2323
2324 /* Replace our original stub bitsizetype. */
2325 memcpy (bitsizetype, t, tree_size (bitsizetype));
2326 TYPE_MAIN_VARIANT (bitsizetype) = bitsizetype;
2327 TYPE_CANONICAL (bitsizetype) = bitsizetype;
2328
2329 fixup_unsigned_type (bitsizetype);
2330
2331 /* Create the signed variants of *sizetype. */
2332 ssizetype = make_signed_type (oprecision);
2333 TYPE_IS_SIZETYPE (ssizetype) = 1;
2334 sbitsizetype = make_signed_type (precision);
2335 TYPE_IS_SIZETYPE (sbitsizetype) = 1;
2336 }
2337 \f
2338 /* TYPE is an integral type, i.e., an INTEGRAL_TYPE, ENUMERAL_TYPE
2339 or BOOLEAN_TYPE. Set TYPE_MIN_VALUE and TYPE_MAX_VALUE
2340 for TYPE, based on the PRECISION and whether or not the TYPE
2341 IS_UNSIGNED. PRECISION need not correspond to a width supported
2342 natively by the hardware; for example, on a machine with 8-bit,
2343 16-bit, and 32-bit register modes, PRECISION might be 7, 23, or
2344 61. */
2345
2346 void
2347 set_min_and_max_values_for_integral_type (tree type,
2348 int precision,
2349 bool is_unsigned)
2350 {
2351 tree min_value;
2352 tree max_value;
2353
2354 if (is_unsigned)
2355 {
2356 min_value = build_int_cst (type, 0);
2357 max_value
2358 = build_int_cst_wide (type, precision - HOST_BITS_PER_WIDE_INT >= 0
2359 ? -1
2360 : ((HOST_WIDE_INT) 1 << precision) - 1,
2361 precision - HOST_BITS_PER_WIDE_INT > 0
2362 ? ((unsigned HOST_WIDE_INT) ~0
2363 >> (HOST_BITS_PER_WIDE_INT
2364 - (precision - HOST_BITS_PER_WIDE_INT)))
2365 : 0);
2366 }
2367 else
2368 {
2369 min_value
2370 = build_int_cst_wide (type,
2371 (precision - HOST_BITS_PER_WIDE_INT > 0
2372 ? 0
2373 : (HOST_WIDE_INT) (-1) << (precision - 1)),
2374 (((HOST_WIDE_INT) (-1)
2375 << (precision - HOST_BITS_PER_WIDE_INT - 1 > 0
2376 ? precision - HOST_BITS_PER_WIDE_INT - 1
2377 : 0))));
2378 max_value
2379 = build_int_cst_wide (type,
2380 (precision - HOST_BITS_PER_WIDE_INT > 0
2381 ? -1
2382 : ((HOST_WIDE_INT) 1 << (precision - 1)) - 1),
2383 (precision - HOST_BITS_PER_WIDE_INT - 1 > 0
2384 ? (((HOST_WIDE_INT) 1
2385 << (precision - HOST_BITS_PER_WIDE_INT - 1))) - 1
2386 : 0));
2387 }
2388
2389 TYPE_MIN_VALUE (type) = min_value;
2390 TYPE_MAX_VALUE (type) = max_value;
2391 }
2392
2393 /* Set the extreme values of TYPE based on its precision in bits,
2394 then lay it out. Used when make_signed_type won't do
2395 because the tree code is not INTEGER_TYPE.
2396 E.g. for Pascal, when the -fsigned-char option is given. */
2397
2398 void
2399 fixup_signed_type (tree type)
2400 {
2401 int precision = TYPE_PRECISION (type);
2402
2403 /* We can not represent properly constants greater then
2404 2 * HOST_BITS_PER_WIDE_INT, still we need the types
2405 as they are used by i386 vector extensions and friends. */
2406 if (precision > HOST_BITS_PER_WIDE_INT * 2)
2407 precision = HOST_BITS_PER_WIDE_INT * 2;
2408
2409 set_min_and_max_values_for_integral_type (type, precision,
2410 /*is_unsigned=*/false);
2411
2412 /* Lay out the type: set its alignment, size, etc. */
2413 layout_type (type);
2414 }
2415
2416 /* Set the extreme values of TYPE based on its precision in bits,
2417 then lay it out. This is used both in `make_unsigned_type'
2418 and for enumeral types. */
2419
2420 void
2421 fixup_unsigned_type (tree type)
2422 {
2423 int precision = TYPE_PRECISION (type);
2424
2425 /* We can not represent properly constants greater then
2426 2 * HOST_BITS_PER_WIDE_INT, still we need the types
2427 as they are used by i386 vector extensions and friends. */
2428 if (precision > HOST_BITS_PER_WIDE_INT * 2)
2429 precision = HOST_BITS_PER_WIDE_INT * 2;
2430
2431 TYPE_UNSIGNED (type) = 1;
2432
2433 set_min_and_max_values_for_integral_type (type, precision,
2434 /*is_unsigned=*/true);
2435
2436 /* Lay out the type: set its alignment, size, etc. */
2437 layout_type (type);
2438 }
2439 \f
2440 /* Find the best machine mode to use when referencing a bit field of length
2441 BITSIZE bits starting at BITPOS.
2442
2443 The underlying object is known to be aligned to a boundary of ALIGN bits.
2444 If LARGEST_MODE is not VOIDmode, it means that we should not use a mode
2445 larger than LARGEST_MODE (usually SImode).
2446
2447 If no mode meets all these conditions, we return VOIDmode.
2448
2449 If VOLATILEP is false and SLOW_BYTE_ACCESS is false, we return the
2450 smallest mode meeting these conditions.
2451
2452 If VOLATILEP is false and SLOW_BYTE_ACCESS is true, we return the
2453 largest mode (but a mode no wider than UNITS_PER_WORD) that meets
2454 all the conditions.
2455
2456 If VOLATILEP is true the narrow_volatile_bitfields target hook is used to
2457 decide which of the above modes should be used. */
2458
2459 enum machine_mode
2460 get_best_mode (int bitsize, int bitpos, unsigned int align,
2461 enum machine_mode largest_mode, int volatilep)
2462 {
2463 enum machine_mode mode;
2464 unsigned int unit = 0;
2465
2466 /* Find the narrowest integer mode that contains the bit field. */
2467 for (mode = GET_CLASS_NARROWEST_MODE (MODE_INT); mode != VOIDmode;
2468 mode = GET_MODE_WIDER_MODE (mode))
2469 {
2470 unit = GET_MODE_BITSIZE (mode);
2471 if ((bitpos % unit) + bitsize <= unit)
2472 break;
2473 }
2474
2475 if (mode == VOIDmode
2476 /* It is tempting to omit the following line
2477 if STRICT_ALIGNMENT is true.
2478 But that is incorrect, since if the bitfield uses part of 3 bytes
2479 and we use a 4-byte mode, we could get a spurious segv
2480 if the extra 4th byte is past the end of memory.
2481 (Though at least one Unix compiler ignores this problem:
2482 that on the Sequent 386 machine. */
2483 || MIN (unit, BIGGEST_ALIGNMENT) > align
2484 || (largest_mode != VOIDmode && unit > GET_MODE_BITSIZE (largest_mode)))
2485 return VOIDmode;
2486
2487 if ((SLOW_BYTE_ACCESS && ! volatilep)
2488 || (volatilep && !targetm.narrow_volatile_bitfield ()))
2489 {
2490 enum machine_mode wide_mode = VOIDmode, tmode;
2491
2492 for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT); tmode != VOIDmode;
2493 tmode = GET_MODE_WIDER_MODE (tmode))
2494 {
2495 unit = GET_MODE_BITSIZE (tmode);
2496 if (bitpos / unit == (bitpos + bitsize - 1) / unit
2497 && unit <= BITS_PER_WORD
2498 && unit <= MIN (align, BIGGEST_ALIGNMENT)
2499 && (largest_mode == VOIDmode
2500 || unit <= GET_MODE_BITSIZE (largest_mode)))
2501 wide_mode = tmode;
2502 }
2503
2504 if (wide_mode != VOIDmode)
2505 return wide_mode;
2506 }
2507
2508 return mode;
2509 }
2510
2511 /* Gets minimal and maximal values for MODE (signed or unsigned depending on
2512 SIGN). The returned constants are made to be usable in TARGET_MODE. */
2513
2514 void
2515 get_mode_bounds (enum machine_mode mode, int sign,
2516 enum machine_mode target_mode,
2517 rtx *mmin, rtx *mmax)
2518 {
2519 unsigned size = GET_MODE_BITSIZE (mode);
2520 unsigned HOST_WIDE_INT min_val, max_val;
2521
2522 gcc_assert (size <= HOST_BITS_PER_WIDE_INT);
2523
2524 if (sign)
2525 {
2526 min_val = -((unsigned HOST_WIDE_INT) 1 << (size - 1));
2527 max_val = ((unsigned HOST_WIDE_INT) 1 << (size - 1)) - 1;
2528 }
2529 else
2530 {
2531 min_val = 0;
2532 max_val = ((unsigned HOST_WIDE_INT) 1 << (size - 1) << 1) - 1;
2533 }
2534
2535 *mmin = gen_int_mode (min_val, target_mode);
2536 *mmax = gen_int_mode (max_val, target_mode);
2537 }
2538
2539 #include "gt-stor-layout.h"