[62/77] Big machine_mode to scalar_int_mode replacement
[gcc.git] / gcc / explow.c
1 /* Subroutines for manipulating rtx's in semantically interesting ways.
2 Copyright (C) 1987-2017 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it under
7 the terms of the GNU General Public License as published by the Free
8 Software Foundation; either version 3, or (at your option) any later
9 version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "target.h"
25 #include "function.h"
26 #include "rtl.h"
27 #include "tree.h"
28 #include "memmodel.h"
29 #include "tm_p.h"
30 #include "expmed.h"
31 #include "profile-count.h"
32 #include "optabs.h"
33 #include "emit-rtl.h"
34 #include "recog.h"
35 #include "diagnostic-core.h"
36 #include "stor-layout.h"
37 #include "except.h"
38 #include "dojump.h"
39 #include "explow.h"
40 #include "expr.h"
41 #include "common/common-target.h"
42 #include "output.h"
43
44 static rtx break_out_memory_refs (rtx);
45
46
47 /* Truncate and perhaps sign-extend C as appropriate for MODE. */
48
49 HOST_WIDE_INT
50 trunc_int_for_mode (HOST_WIDE_INT c, machine_mode mode)
51 {
52 int width = GET_MODE_PRECISION (mode);
53
54 /* You want to truncate to a _what_? */
55 gcc_assert (SCALAR_INT_MODE_P (mode)
56 || POINTER_BOUNDS_MODE_P (mode));
57
58 /* Canonicalize BImode to 0 and STORE_FLAG_VALUE. */
59 if (mode == BImode)
60 return c & 1 ? STORE_FLAG_VALUE : 0;
61
62 /* Sign-extend for the requested mode. */
63
64 if (width < HOST_BITS_PER_WIDE_INT)
65 {
66 HOST_WIDE_INT sign = 1;
67 sign <<= width - 1;
68 c &= (sign << 1) - 1;
69 c ^= sign;
70 c -= sign;
71 }
72
73 return c;
74 }
75
76 /* Return an rtx for the sum of X and the integer C, given that X has
77 mode MODE. INPLACE is true if X can be modified inplace or false
78 if it must be treated as immutable. */
79
80 rtx
81 plus_constant (machine_mode mode, rtx x, HOST_WIDE_INT c,
82 bool inplace)
83 {
84 RTX_CODE code;
85 rtx y;
86 rtx tem;
87 int all_constant = 0;
88
89 gcc_assert (GET_MODE (x) == VOIDmode || GET_MODE (x) == mode);
90
91 if (c == 0)
92 return x;
93
94 restart:
95
96 code = GET_CODE (x);
97 y = x;
98
99 switch (code)
100 {
101 CASE_CONST_SCALAR_INT:
102 return immed_wide_int_const (wi::add (rtx_mode_t (x, mode), c), mode);
103 case MEM:
104 /* If this is a reference to the constant pool, try replacing it with
105 a reference to a new constant. If the resulting address isn't
106 valid, don't return it because we have no way to validize it. */
107 if (GET_CODE (XEXP (x, 0)) == SYMBOL_REF
108 && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)))
109 {
110 rtx cst = get_pool_constant (XEXP (x, 0));
111
112 if (GET_CODE (cst) == CONST_VECTOR
113 && GET_MODE_INNER (GET_MODE (cst)) == mode)
114 {
115 cst = gen_lowpart (mode, cst);
116 gcc_assert (cst);
117 }
118 if (GET_MODE (cst) == VOIDmode || GET_MODE (cst) == mode)
119 {
120 tem = plus_constant (mode, cst, c);
121 tem = force_const_mem (GET_MODE (x), tem);
122 /* Targets may disallow some constants in the constant pool, thus
123 force_const_mem may return NULL_RTX. */
124 if (tem && memory_address_p (GET_MODE (tem), XEXP (tem, 0)))
125 return tem;
126 }
127 }
128 break;
129
130 case CONST:
131 /* If adding to something entirely constant, set a flag
132 so that we can add a CONST around the result. */
133 if (inplace && shared_const_p (x))
134 inplace = false;
135 x = XEXP (x, 0);
136 all_constant = 1;
137 goto restart;
138
139 case SYMBOL_REF:
140 case LABEL_REF:
141 all_constant = 1;
142 break;
143
144 case PLUS:
145 /* The interesting case is adding the integer to a sum. Look
146 for constant term in the sum and combine with C. For an
147 integer constant term or a constant term that is not an
148 explicit integer, we combine or group them together anyway.
149
150 We may not immediately return from the recursive call here, lest
151 all_constant gets lost. */
152
153 if (CONSTANT_P (XEXP (x, 1)))
154 {
155 rtx term = plus_constant (mode, XEXP (x, 1), c, inplace);
156 if (term == const0_rtx)
157 x = XEXP (x, 0);
158 else if (inplace)
159 XEXP (x, 1) = term;
160 else
161 x = gen_rtx_PLUS (mode, XEXP (x, 0), term);
162 c = 0;
163 }
164 else if (rtx *const_loc = find_constant_term_loc (&y))
165 {
166 if (!inplace)
167 {
168 /* We need to be careful since X may be shared and we can't
169 modify it in place. */
170 x = copy_rtx (x);
171 const_loc = find_constant_term_loc (&x);
172 }
173 *const_loc = plus_constant (mode, *const_loc, c, true);
174 c = 0;
175 }
176 break;
177
178 default:
179 break;
180 }
181
182 if (c != 0)
183 x = gen_rtx_PLUS (mode, x, gen_int_mode (c, mode));
184
185 if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
186 return x;
187 else if (all_constant)
188 return gen_rtx_CONST (mode, x);
189 else
190 return x;
191 }
192 \f
193 /* If X is a sum, return a new sum like X but lacking any constant terms.
194 Add all the removed constant terms into *CONSTPTR.
195 X itself is not altered. The result != X if and only if
196 it is not isomorphic to X. */
197
198 rtx
199 eliminate_constant_term (rtx x, rtx *constptr)
200 {
201 rtx x0, x1;
202 rtx tem;
203
204 if (GET_CODE (x) != PLUS)
205 return x;
206
207 /* First handle constants appearing at this level explicitly. */
208 if (CONST_INT_P (XEXP (x, 1))
209 && 0 != (tem = simplify_binary_operation (PLUS, GET_MODE (x), *constptr,
210 XEXP (x, 1)))
211 && CONST_INT_P (tem))
212 {
213 *constptr = tem;
214 return eliminate_constant_term (XEXP (x, 0), constptr);
215 }
216
217 tem = const0_rtx;
218 x0 = eliminate_constant_term (XEXP (x, 0), &tem);
219 x1 = eliminate_constant_term (XEXP (x, 1), &tem);
220 if ((x1 != XEXP (x, 1) || x0 != XEXP (x, 0))
221 && 0 != (tem = simplify_binary_operation (PLUS, GET_MODE (x),
222 *constptr, tem))
223 && CONST_INT_P (tem))
224 {
225 *constptr = tem;
226 return gen_rtx_PLUS (GET_MODE (x), x0, x1);
227 }
228
229 return x;
230 }
231
232 \f
233 /* Return a copy of X in which all memory references
234 and all constants that involve symbol refs
235 have been replaced with new temporary registers.
236 Also emit code to load the memory locations and constants
237 into those registers.
238
239 If X contains no such constants or memory references,
240 X itself (not a copy) is returned.
241
242 If a constant is found in the address that is not a legitimate constant
243 in an insn, it is left alone in the hope that it might be valid in the
244 address.
245
246 X may contain no arithmetic except addition, subtraction and multiplication.
247 Values returned by expand_expr with 1 for sum_ok fit this constraint. */
248
249 static rtx
250 break_out_memory_refs (rtx x)
251 {
252 if (MEM_P (x)
253 || (CONSTANT_P (x) && CONSTANT_ADDRESS_P (x)
254 && GET_MODE (x) != VOIDmode))
255 x = force_reg (GET_MODE (x), x);
256 else if (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
257 || GET_CODE (x) == MULT)
258 {
259 rtx op0 = break_out_memory_refs (XEXP (x, 0));
260 rtx op1 = break_out_memory_refs (XEXP (x, 1));
261
262 if (op0 != XEXP (x, 0) || op1 != XEXP (x, 1))
263 x = simplify_gen_binary (GET_CODE (x), GET_MODE (x), op0, op1);
264 }
265
266 return x;
267 }
268
269 /* Given X, a memory address in address space AS' pointer mode, convert it to
270 an address in the address space's address mode, or vice versa (TO_MODE says
271 which way). We take advantage of the fact that pointers are not allowed to
272 overflow by commuting arithmetic operations over conversions so that address
273 arithmetic insns can be used. IN_CONST is true if this conversion is inside
274 a CONST. NO_EMIT is true if no insns should be emitted, and instead
275 it should return NULL if it can't be simplified without emitting insns. */
276
277 rtx
278 convert_memory_address_addr_space_1 (scalar_int_mode to_mode ATTRIBUTE_UNUSED,
279 rtx x, addr_space_t as ATTRIBUTE_UNUSED,
280 bool in_const ATTRIBUTE_UNUSED,
281 bool no_emit ATTRIBUTE_UNUSED)
282 {
283 #ifndef POINTERS_EXTEND_UNSIGNED
284 gcc_assert (GET_MODE (x) == to_mode || GET_MODE (x) == VOIDmode);
285 return x;
286 #else /* defined(POINTERS_EXTEND_UNSIGNED) */
287 scalar_int_mode pointer_mode, address_mode, from_mode;
288 rtx temp;
289 enum rtx_code code;
290
291 /* If X already has the right mode, just return it. */
292 if (GET_MODE (x) == to_mode)
293 return x;
294
295 pointer_mode = targetm.addr_space.pointer_mode (as);
296 address_mode = targetm.addr_space.address_mode (as);
297 from_mode = to_mode == pointer_mode ? address_mode : pointer_mode;
298
299 /* Here we handle some special cases. If none of them apply, fall through
300 to the default case. */
301 switch (GET_CODE (x))
302 {
303 CASE_CONST_SCALAR_INT:
304 if (GET_MODE_SIZE (to_mode) < GET_MODE_SIZE (from_mode))
305 code = TRUNCATE;
306 else if (POINTERS_EXTEND_UNSIGNED < 0)
307 break;
308 else if (POINTERS_EXTEND_UNSIGNED > 0)
309 code = ZERO_EXTEND;
310 else
311 code = SIGN_EXTEND;
312 temp = simplify_unary_operation (code, to_mode, x, from_mode);
313 if (temp)
314 return temp;
315 break;
316
317 case SUBREG:
318 if ((SUBREG_PROMOTED_VAR_P (x) || REG_POINTER (SUBREG_REG (x)))
319 && GET_MODE (SUBREG_REG (x)) == to_mode)
320 return SUBREG_REG (x);
321 break;
322
323 case LABEL_REF:
324 temp = gen_rtx_LABEL_REF (to_mode, label_ref_label (x));
325 LABEL_REF_NONLOCAL_P (temp) = LABEL_REF_NONLOCAL_P (x);
326 return temp;
327
328 case SYMBOL_REF:
329 temp = shallow_copy_rtx (x);
330 PUT_MODE (temp, to_mode);
331 return temp;
332
333 case CONST:
334 temp = convert_memory_address_addr_space_1 (to_mode, XEXP (x, 0), as,
335 true, no_emit);
336 return temp ? gen_rtx_CONST (to_mode, temp) : temp;
337
338 case PLUS:
339 case MULT:
340 /* For addition we can safely permute the conversion and addition
341 operation if one operand is a constant and converting the constant
342 does not change it or if one operand is a constant and we are
343 using a ptr_extend instruction (POINTERS_EXTEND_UNSIGNED < 0).
344 We can always safely permute them if we are making the address
345 narrower. Inside a CONST RTL, this is safe for both pointers
346 zero or sign extended as pointers cannot wrap. */
347 if (GET_MODE_SIZE (to_mode) < GET_MODE_SIZE (from_mode)
348 || (GET_CODE (x) == PLUS
349 && CONST_INT_P (XEXP (x, 1))
350 && ((in_const && POINTERS_EXTEND_UNSIGNED != 0)
351 || XEXP (x, 1) == convert_memory_address_addr_space_1
352 (to_mode, XEXP (x, 1), as, in_const,
353 no_emit)
354 || POINTERS_EXTEND_UNSIGNED < 0)))
355 {
356 temp = convert_memory_address_addr_space_1 (to_mode, XEXP (x, 0),
357 as, in_const, no_emit);
358 return (temp ? gen_rtx_fmt_ee (GET_CODE (x), to_mode,
359 temp, XEXP (x, 1))
360 : temp);
361 }
362 break;
363
364 default:
365 break;
366 }
367
368 if (no_emit)
369 return NULL_RTX;
370
371 return convert_modes (to_mode, from_mode,
372 x, POINTERS_EXTEND_UNSIGNED);
373 #endif /* defined(POINTERS_EXTEND_UNSIGNED) */
374 }
375
376 /* Given X, a memory address in address space AS' pointer mode, convert it to
377 an address in the address space's address mode, or vice versa (TO_MODE says
378 which way). We take advantage of the fact that pointers are not allowed to
379 overflow by commuting arithmetic operations over conversions so that address
380 arithmetic insns can be used. */
381
382 rtx
383 convert_memory_address_addr_space (scalar_int_mode to_mode, rtx x,
384 addr_space_t as)
385 {
386 return convert_memory_address_addr_space_1 (to_mode, x, as, false, false);
387 }
388 \f
389
390 /* Return something equivalent to X but valid as a memory address for something
391 of mode MODE in the named address space AS. When X is not itself valid,
392 this works by copying X or subexpressions of it into registers. */
393
394 rtx
395 memory_address_addr_space (machine_mode mode, rtx x, addr_space_t as)
396 {
397 rtx oldx = x;
398 scalar_int_mode address_mode = targetm.addr_space.address_mode (as);
399
400 x = convert_memory_address_addr_space (address_mode, x, as);
401
402 /* By passing constant addresses through registers
403 we get a chance to cse them. */
404 if (! cse_not_expected && CONSTANT_P (x) && CONSTANT_ADDRESS_P (x))
405 x = force_reg (address_mode, x);
406
407 /* We get better cse by rejecting indirect addressing at this stage.
408 Let the combiner create indirect addresses where appropriate.
409 For now, generate the code so that the subexpressions useful to share
410 are visible. But not if cse won't be done! */
411 else
412 {
413 if (! cse_not_expected && !REG_P (x))
414 x = break_out_memory_refs (x);
415
416 /* At this point, any valid address is accepted. */
417 if (memory_address_addr_space_p (mode, x, as))
418 goto done;
419
420 /* If it was valid before but breaking out memory refs invalidated it,
421 use it the old way. */
422 if (memory_address_addr_space_p (mode, oldx, as))
423 {
424 x = oldx;
425 goto done;
426 }
427
428 /* Perform machine-dependent transformations on X
429 in certain cases. This is not necessary since the code
430 below can handle all possible cases, but machine-dependent
431 transformations can make better code. */
432 {
433 rtx orig_x = x;
434 x = targetm.addr_space.legitimize_address (x, oldx, mode, as);
435 if (orig_x != x && memory_address_addr_space_p (mode, x, as))
436 goto done;
437 }
438
439 /* PLUS and MULT can appear in special ways
440 as the result of attempts to make an address usable for indexing.
441 Usually they are dealt with by calling force_operand, below.
442 But a sum containing constant terms is special
443 if removing them makes the sum a valid address:
444 then we generate that address in a register
445 and index off of it. We do this because it often makes
446 shorter code, and because the addresses thus generated
447 in registers often become common subexpressions. */
448 if (GET_CODE (x) == PLUS)
449 {
450 rtx constant_term = const0_rtx;
451 rtx y = eliminate_constant_term (x, &constant_term);
452 if (constant_term == const0_rtx
453 || ! memory_address_addr_space_p (mode, y, as))
454 x = force_operand (x, NULL_RTX);
455 else
456 {
457 y = gen_rtx_PLUS (GET_MODE (x), copy_to_reg (y), constant_term);
458 if (! memory_address_addr_space_p (mode, y, as))
459 x = force_operand (x, NULL_RTX);
460 else
461 x = y;
462 }
463 }
464
465 else if (GET_CODE (x) == MULT || GET_CODE (x) == MINUS)
466 x = force_operand (x, NULL_RTX);
467
468 /* If we have a register that's an invalid address,
469 it must be a hard reg of the wrong class. Copy it to a pseudo. */
470 else if (REG_P (x))
471 x = copy_to_reg (x);
472
473 /* Last resort: copy the value to a register, since
474 the register is a valid address. */
475 else
476 x = force_reg (address_mode, x);
477 }
478
479 done:
480
481 gcc_assert (memory_address_addr_space_p (mode, x, as));
482 /* If we didn't change the address, we are done. Otherwise, mark
483 a reg as a pointer if we have REG or REG + CONST_INT. */
484 if (oldx == x)
485 return x;
486 else if (REG_P (x))
487 mark_reg_pointer (x, BITS_PER_UNIT);
488 else if (GET_CODE (x) == PLUS
489 && REG_P (XEXP (x, 0))
490 && CONST_INT_P (XEXP (x, 1)))
491 mark_reg_pointer (XEXP (x, 0), BITS_PER_UNIT);
492
493 /* OLDX may have been the address on a temporary. Update the address
494 to indicate that X is now used. */
495 update_temp_slot_address (oldx, x);
496
497 return x;
498 }
499
500 /* Convert a mem ref into one with a valid memory address.
501 Pass through anything else unchanged. */
502
503 rtx
504 validize_mem (rtx ref)
505 {
506 if (!MEM_P (ref))
507 return ref;
508 ref = use_anchored_address (ref);
509 if (memory_address_addr_space_p (GET_MODE (ref), XEXP (ref, 0),
510 MEM_ADDR_SPACE (ref)))
511 return ref;
512
513 /* Don't alter REF itself, since that is probably a stack slot. */
514 return replace_equiv_address (ref, XEXP (ref, 0));
515 }
516
517 /* If X is a memory reference to a member of an object block, try rewriting
518 it to use an anchor instead. Return the new memory reference on success
519 and the old one on failure. */
520
521 rtx
522 use_anchored_address (rtx x)
523 {
524 rtx base;
525 HOST_WIDE_INT offset;
526 machine_mode mode;
527
528 if (!flag_section_anchors)
529 return x;
530
531 if (!MEM_P (x))
532 return x;
533
534 /* Split the address into a base and offset. */
535 base = XEXP (x, 0);
536 offset = 0;
537 if (GET_CODE (base) == CONST
538 && GET_CODE (XEXP (base, 0)) == PLUS
539 && CONST_INT_P (XEXP (XEXP (base, 0), 1)))
540 {
541 offset += INTVAL (XEXP (XEXP (base, 0), 1));
542 base = XEXP (XEXP (base, 0), 0);
543 }
544
545 /* Check whether BASE is suitable for anchors. */
546 if (GET_CODE (base) != SYMBOL_REF
547 || !SYMBOL_REF_HAS_BLOCK_INFO_P (base)
548 || SYMBOL_REF_ANCHOR_P (base)
549 || SYMBOL_REF_BLOCK (base) == NULL
550 || !targetm.use_anchors_for_symbol_p (base))
551 return x;
552
553 /* Decide where BASE is going to be. */
554 place_block_symbol (base);
555
556 /* Get the anchor we need to use. */
557 offset += SYMBOL_REF_BLOCK_OFFSET (base);
558 base = get_section_anchor (SYMBOL_REF_BLOCK (base), offset,
559 SYMBOL_REF_TLS_MODEL (base));
560
561 /* Work out the offset from the anchor. */
562 offset -= SYMBOL_REF_BLOCK_OFFSET (base);
563
564 /* If we're going to run a CSE pass, force the anchor into a register.
565 We will then be able to reuse registers for several accesses, if the
566 target costs say that that's worthwhile. */
567 mode = GET_MODE (base);
568 if (!cse_not_expected)
569 base = force_reg (mode, base);
570
571 return replace_equiv_address (x, plus_constant (mode, base, offset));
572 }
573 \f
574 /* Copy the value or contents of X to a new temp reg and return that reg. */
575
576 rtx
577 copy_to_reg (rtx x)
578 {
579 rtx temp = gen_reg_rtx (GET_MODE (x));
580
581 /* If not an operand, must be an address with PLUS and MULT so
582 do the computation. */
583 if (! general_operand (x, VOIDmode))
584 x = force_operand (x, temp);
585
586 if (x != temp)
587 emit_move_insn (temp, x);
588
589 return temp;
590 }
591
592 /* Like copy_to_reg but always give the new register mode Pmode
593 in case X is a constant. */
594
595 rtx
596 copy_addr_to_reg (rtx x)
597 {
598 return copy_to_mode_reg (Pmode, x);
599 }
600
601 /* Like copy_to_reg but always give the new register mode MODE
602 in case X is a constant. */
603
604 rtx
605 copy_to_mode_reg (machine_mode mode, rtx x)
606 {
607 rtx temp = gen_reg_rtx (mode);
608
609 /* If not an operand, must be an address with PLUS and MULT so
610 do the computation. */
611 if (! general_operand (x, VOIDmode))
612 x = force_operand (x, temp);
613
614 gcc_assert (GET_MODE (x) == mode || GET_MODE (x) == VOIDmode);
615 if (x != temp)
616 emit_move_insn (temp, x);
617 return temp;
618 }
619
620 /* Load X into a register if it is not already one.
621 Use mode MODE for the register.
622 X should be valid for mode MODE, but it may be a constant which
623 is valid for all integer modes; that's why caller must specify MODE.
624
625 The caller must not alter the value in the register we return,
626 since we mark it as a "constant" register. */
627
628 rtx
629 force_reg (machine_mode mode, rtx x)
630 {
631 rtx temp, set;
632 rtx_insn *insn;
633
634 if (REG_P (x))
635 return x;
636
637 if (general_operand (x, mode))
638 {
639 temp = gen_reg_rtx (mode);
640 insn = emit_move_insn (temp, x);
641 }
642 else
643 {
644 temp = force_operand (x, NULL_RTX);
645 if (REG_P (temp))
646 insn = get_last_insn ();
647 else
648 {
649 rtx temp2 = gen_reg_rtx (mode);
650 insn = emit_move_insn (temp2, temp);
651 temp = temp2;
652 }
653 }
654
655 /* Let optimizers know that TEMP's value never changes
656 and that X can be substituted for it. Don't get confused
657 if INSN set something else (such as a SUBREG of TEMP). */
658 if (CONSTANT_P (x)
659 && (set = single_set (insn)) != 0
660 && SET_DEST (set) == temp
661 && ! rtx_equal_p (x, SET_SRC (set)))
662 set_unique_reg_note (insn, REG_EQUAL, x);
663
664 /* Let optimizers know that TEMP is a pointer, and if so, the
665 known alignment of that pointer. */
666 {
667 unsigned align = 0;
668 if (GET_CODE (x) == SYMBOL_REF)
669 {
670 align = BITS_PER_UNIT;
671 if (SYMBOL_REF_DECL (x) && DECL_P (SYMBOL_REF_DECL (x)))
672 align = DECL_ALIGN (SYMBOL_REF_DECL (x));
673 }
674 else if (GET_CODE (x) == LABEL_REF)
675 align = BITS_PER_UNIT;
676 else if (GET_CODE (x) == CONST
677 && GET_CODE (XEXP (x, 0)) == PLUS
678 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
679 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
680 {
681 rtx s = XEXP (XEXP (x, 0), 0);
682 rtx c = XEXP (XEXP (x, 0), 1);
683 unsigned sa, ca;
684
685 sa = BITS_PER_UNIT;
686 if (SYMBOL_REF_DECL (s) && DECL_P (SYMBOL_REF_DECL (s)))
687 sa = DECL_ALIGN (SYMBOL_REF_DECL (s));
688
689 if (INTVAL (c) == 0)
690 align = sa;
691 else
692 {
693 ca = ctz_hwi (INTVAL (c)) * BITS_PER_UNIT;
694 align = MIN (sa, ca);
695 }
696 }
697
698 if (align || (MEM_P (x) && MEM_POINTER (x)))
699 mark_reg_pointer (temp, align);
700 }
701
702 return temp;
703 }
704
705 /* If X is a memory ref, copy its contents to a new temp reg and return
706 that reg. Otherwise, return X. */
707
708 rtx
709 force_not_mem (rtx x)
710 {
711 rtx temp;
712
713 if (!MEM_P (x) || GET_MODE (x) == BLKmode)
714 return x;
715
716 temp = gen_reg_rtx (GET_MODE (x));
717
718 if (MEM_POINTER (x))
719 REG_POINTER (temp) = 1;
720
721 emit_move_insn (temp, x);
722 return temp;
723 }
724
725 /* Copy X to TARGET (if it's nonzero and a reg)
726 or to a new temp reg and return that reg.
727 MODE is the mode to use for X in case it is a constant. */
728
729 rtx
730 copy_to_suggested_reg (rtx x, rtx target, machine_mode mode)
731 {
732 rtx temp;
733
734 if (target && REG_P (target))
735 temp = target;
736 else
737 temp = gen_reg_rtx (mode);
738
739 emit_move_insn (temp, x);
740 return temp;
741 }
742 \f
743 /* Return the mode to use to pass or return a scalar of TYPE and MODE.
744 PUNSIGNEDP points to the signedness of the type and may be adjusted
745 to show what signedness to use on extension operations.
746
747 FOR_RETURN is nonzero if the caller is promoting the return value
748 of FNDECL, else it is for promoting args. */
749
750 machine_mode
751 promote_function_mode (const_tree type, machine_mode mode, int *punsignedp,
752 const_tree funtype, int for_return)
753 {
754 /* Called without a type node for a libcall. */
755 if (type == NULL_TREE)
756 {
757 if (INTEGRAL_MODE_P (mode))
758 return targetm.calls.promote_function_mode (NULL_TREE, mode,
759 punsignedp, funtype,
760 for_return);
761 else
762 return mode;
763 }
764
765 switch (TREE_CODE (type))
766 {
767 case INTEGER_TYPE: case ENUMERAL_TYPE: case BOOLEAN_TYPE:
768 case REAL_TYPE: case OFFSET_TYPE: case FIXED_POINT_TYPE:
769 case POINTER_TYPE: case REFERENCE_TYPE:
770 return targetm.calls.promote_function_mode (type, mode, punsignedp, funtype,
771 for_return);
772
773 default:
774 return mode;
775 }
776 }
777 /* Return the mode to use to store a scalar of TYPE and MODE.
778 PUNSIGNEDP points to the signedness of the type and may be adjusted
779 to show what signedness to use on extension operations. */
780
781 machine_mode
782 promote_mode (const_tree type ATTRIBUTE_UNUSED, machine_mode mode,
783 int *punsignedp ATTRIBUTE_UNUSED)
784 {
785 #ifdef PROMOTE_MODE
786 enum tree_code code;
787 int unsignedp;
788 #endif
789
790 /* For libcalls this is invoked without TYPE from the backends
791 TARGET_PROMOTE_FUNCTION_MODE hooks. Don't do anything in that
792 case. */
793 if (type == NULL_TREE)
794 return mode;
795
796 /* FIXME: this is the same logic that was there until GCC 4.4, but we
797 probably want to test POINTERS_EXTEND_UNSIGNED even if PROMOTE_MODE
798 is not defined. The affected targets are M32C, S390, SPARC. */
799 #ifdef PROMOTE_MODE
800 code = TREE_CODE (type);
801 unsignedp = *punsignedp;
802
803 switch (code)
804 {
805 case INTEGER_TYPE: case ENUMERAL_TYPE: case BOOLEAN_TYPE:
806 case REAL_TYPE: case OFFSET_TYPE: case FIXED_POINT_TYPE:
807 PROMOTE_MODE (mode, unsignedp, type);
808 *punsignedp = unsignedp;
809 return mode;
810
811 #ifdef POINTERS_EXTEND_UNSIGNED
812 case REFERENCE_TYPE:
813 case POINTER_TYPE:
814 *punsignedp = POINTERS_EXTEND_UNSIGNED;
815 return targetm.addr_space.address_mode
816 (TYPE_ADDR_SPACE (TREE_TYPE (type)));
817 #endif
818
819 default:
820 return mode;
821 }
822 #else
823 return mode;
824 #endif
825 }
826
827
828 /* Use one of promote_mode or promote_function_mode to find the promoted
829 mode of DECL. If PUNSIGNEDP is not NULL, store there the unsignedness
830 of DECL after promotion. */
831
832 machine_mode
833 promote_decl_mode (const_tree decl, int *punsignedp)
834 {
835 tree type = TREE_TYPE (decl);
836 int unsignedp = TYPE_UNSIGNED (type);
837 machine_mode mode = DECL_MODE (decl);
838 machine_mode pmode;
839
840 if (TREE_CODE (decl) == RESULT_DECL && !DECL_BY_REFERENCE (decl))
841 pmode = promote_function_mode (type, mode, &unsignedp,
842 TREE_TYPE (current_function_decl), 1);
843 else if (TREE_CODE (decl) == RESULT_DECL || TREE_CODE (decl) == PARM_DECL)
844 pmode = promote_function_mode (type, mode, &unsignedp,
845 TREE_TYPE (current_function_decl), 2);
846 else
847 pmode = promote_mode (type, mode, &unsignedp);
848
849 if (punsignedp)
850 *punsignedp = unsignedp;
851 return pmode;
852 }
853
854 /* Return the promoted mode for name. If it is a named SSA_NAME, it
855 is the same as promote_decl_mode. Otherwise, it is the promoted
856 mode of a temp decl of same type as the SSA_NAME, if we had created
857 one. */
858
859 machine_mode
860 promote_ssa_mode (const_tree name, int *punsignedp)
861 {
862 gcc_assert (TREE_CODE (name) == SSA_NAME);
863
864 /* Partitions holding parms and results must be promoted as expected
865 by function.c. */
866 if (SSA_NAME_VAR (name)
867 && (TREE_CODE (SSA_NAME_VAR (name)) == PARM_DECL
868 || TREE_CODE (SSA_NAME_VAR (name)) == RESULT_DECL))
869 {
870 machine_mode mode = promote_decl_mode (SSA_NAME_VAR (name), punsignedp);
871 if (mode != BLKmode)
872 return mode;
873 }
874
875 tree type = TREE_TYPE (name);
876 int unsignedp = TYPE_UNSIGNED (type);
877 machine_mode mode = TYPE_MODE (type);
878
879 /* Bypass TYPE_MODE when it maps vector modes to BLKmode. */
880 if (mode == BLKmode)
881 {
882 gcc_assert (VECTOR_TYPE_P (type));
883 mode = type->type_common.mode;
884 }
885
886 machine_mode pmode = promote_mode (type, mode, &unsignedp);
887 if (punsignedp)
888 *punsignedp = unsignedp;
889
890 return pmode;
891 }
892
893
894 \f
895 /* Controls the behavior of {anti_,}adjust_stack. */
896 static bool suppress_reg_args_size;
897
898 /* A helper for adjust_stack and anti_adjust_stack. */
899
900 static void
901 adjust_stack_1 (rtx adjust, bool anti_p)
902 {
903 rtx temp;
904 rtx_insn *insn;
905
906 /* Hereafter anti_p means subtract_p. */
907 if (!STACK_GROWS_DOWNWARD)
908 anti_p = !anti_p;
909
910 temp = expand_binop (Pmode,
911 anti_p ? sub_optab : add_optab,
912 stack_pointer_rtx, adjust, stack_pointer_rtx, 0,
913 OPTAB_LIB_WIDEN);
914
915 if (temp != stack_pointer_rtx)
916 insn = emit_move_insn (stack_pointer_rtx, temp);
917 else
918 {
919 insn = get_last_insn ();
920 temp = single_set (insn);
921 gcc_assert (temp != NULL && SET_DEST (temp) == stack_pointer_rtx);
922 }
923
924 if (!suppress_reg_args_size)
925 add_reg_note (insn, REG_ARGS_SIZE, GEN_INT (stack_pointer_delta));
926 }
927
928 /* Adjust the stack pointer by ADJUST (an rtx for a number of bytes).
929 This pops when ADJUST is positive. ADJUST need not be constant. */
930
931 void
932 adjust_stack (rtx adjust)
933 {
934 if (adjust == const0_rtx)
935 return;
936
937 /* We expect all variable sized adjustments to be multiple of
938 PREFERRED_STACK_BOUNDARY. */
939 if (CONST_INT_P (adjust))
940 stack_pointer_delta -= INTVAL (adjust);
941
942 adjust_stack_1 (adjust, false);
943 }
944
945 /* Adjust the stack pointer by minus ADJUST (an rtx for a number of bytes).
946 This pushes when ADJUST is positive. ADJUST need not be constant. */
947
948 void
949 anti_adjust_stack (rtx adjust)
950 {
951 if (adjust == const0_rtx)
952 return;
953
954 /* We expect all variable sized adjustments to be multiple of
955 PREFERRED_STACK_BOUNDARY. */
956 if (CONST_INT_P (adjust))
957 stack_pointer_delta += INTVAL (adjust);
958
959 adjust_stack_1 (adjust, true);
960 }
961
962 /* Round the size of a block to be pushed up to the boundary required
963 by this machine. SIZE is the desired size, which need not be constant. */
964
965 static rtx
966 round_push (rtx size)
967 {
968 rtx align_rtx, alignm1_rtx;
969
970 if (!SUPPORTS_STACK_ALIGNMENT
971 || crtl->preferred_stack_boundary == MAX_SUPPORTED_STACK_ALIGNMENT)
972 {
973 int align = crtl->preferred_stack_boundary / BITS_PER_UNIT;
974
975 if (align == 1)
976 return size;
977
978 if (CONST_INT_P (size))
979 {
980 HOST_WIDE_INT new_size = (INTVAL (size) + align - 1) / align * align;
981
982 if (INTVAL (size) != new_size)
983 size = GEN_INT (new_size);
984 return size;
985 }
986
987 align_rtx = GEN_INT (align);
988 alignm1_rtx = GEN_INT (align - 1);
989 }
990 else
991 {
992 /* If crtl->preferred_stack_boundary might still grow, use
993 virtual_preferred_stack_boundary_rtx instead. This will be
994 substituted by the right value in vregs pass and optimized
995 during combine. */
996 align_rtx = virtual_preferred_stack_boundary_rtx;
997 alignm1_rtx = force_operand (plus_constant (Pmode, align_rtx, -1),
998 NULL_RTX);
999 }
1000
1001 /* CEIL_DIV_EXPR needs to worry about the addition overflowing,
1002 but we know it can't. So add ourselves and then do
1003 TRUNC_DIV_EXPR. */
1004 size = expand_binop (Pmode, add_optab, size, alignm1_rtx,
1005 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1006 size = expand_divmod (0, TRUNC_DIV_EXPR, Pmode, size, align_rtx,
1007 NULL_RTX, 1);
1008 size = expand_mult (Pmode, size, align_rtx, NULL_RTX, 1);
1009
1010 return size;
1011 }
1012 \f
1013 /* Save the stack pointer for the purpose in SAVE_LEVEL. PSAVE is a pointer
1014 to a previously-created save area. If no save area has been allocated,
1015 this function will allocate one. If a save area is specified, it
1016 must be of the proper mode. */
1017
1018 void
1019 emit_stack_save (enum save_level save_level, rtx *psave)
1020 {
1021 rtx sa = *psave;
1022 /* The default is that we use a move insn and save in a Pmode object. */
1023 rtx_insn *(*fcn) (rtx, rtx) = gen_move_insn;
1024 machine_mode mode = STACK_SAVEAREA_MODE (save_level);
1025
1026 /* See if this machine has anything special to do for this kind of save. */
1027 switch (save_level)
1028 {
1029 case SAVE_BLOCK:
1030 if (targetm.have_save_stack_block ())
1031 fcn = targetm.gen_save_stack_block;
1032 break;
1033 case SAVE_FUNCTION:
1034 if (targetm.have_save_stack_function ())
1035 fcn = targetm.gen_save_stack_function;
1036 break;
1037 case SAVE_NONLOCAL:
1038 if (targetm.have_save_stack_nonlocal ())
1039 fcn = targetm.gen_save_stack_nonlocal;
1040 break;
1041 default:
1042 break;
1043 }
1044
1045 /* If there is no save area and we have to allocate one, do so. Otherwise
1046 verify the save area is the proper mode. */
1047
1048 if (sa == 0)
1049 {
1050 if (mode != VOIDmode)
1051 {
1052 if (save_level == SAVE_NONLOCAL)
1053 *psave = sa = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
1054 else
1055 *psave = sa = gen_reg_rtx (mode);
1056 }
1057 }
1058
1059 do_pending_stack_adjust ();
1060 if (sa != 0)
1061 sa = validize_mem (sa);
1062 emit_insn (fcn (sa, stack_pointer_rtx));
1063 }
1064
1065 /* Restore the stack pointer for the purpose in SAVE_LEVEL. SA is the save
1066 area made by emit_stack_save. If it is zero, we have nothing to do. */
1067
1068 void
1069 emit_stack_restore (enum save_level save_level, rtx sa)
1070 {
1071 /* The default is that we use a move insn. */
1072 rtx_insn *(*fcn) (rtx, rtx) = gen_move_insn;
1073
1074 /* If stack_realign_drap, the x86 backend emits a prologue that aligns both
1075 STACK_POINTER and HARD_FRAME_POINTER.
1076 If stack_realign_fp, the x86 backend emits a prologue that aligns only
1077 STACK_POINTER. This renders the HARD_FRAME_POINTER unusable for accessing
1078 aligned variables, which is reflected in ix86_can_eliminate.
1079 We normally still have the realigned STACK_POINTER that we can use.
1080 But if there is a stack restore still present at reload, it can trigger
1081 mark_not_eliminable for the STACK_POINTER, leaving no way to eliminate
1082 FRAME_POINTER into a hard reg.
1083 To prevent this situation, we force need_drap if we emit a stack
1084 restore. */
1085 if (SUPPORTS_STACK_ALIGNMENT)
1086 crtl->need_drap = true;
1087
1088 /* See if this machine has anything special to do for this kind of save. */
1089 switch (save_level)
1090 {
1091 case SAVE_BLOCK:
1092 if (targetm.have_restore_stack_block ())
1093 fcn = targetm.gen_restore_stack_block;
1094 break;
1095 case SAVE_FUNCTION:
1096 if (targetm.have_restore_stack_function ())
1097 fcn = targetm.gen_restore_stack_function;
1098 break;
1099 case SAVE_NONLOCAL:
1100 if (targetm.have_restore_stack_nonlocal ())
1101 fcn = targetm.gen_restore_stack_nonlocal;
1102 break;
1103 default:
1104 break;
1105 }
1106
1107 if (sa != 0)
1108 {
1109 sa = validize_mem (sa);
1110 /* These clobbers prevent the scheduler from moving
1111 references to variable arrays below the code
1112 that deletes (pops) the arrays. */
1113 emit_clobber (gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (VOIDmode)));
1114 emit_clobber (gen_rtx_MEM (BLKmode, stack_pointer_rtx));
1115 }
1116
1117 discard_pending_stack_adjust ();
1118
1119 emit_insn (fcn (stack_pointer_rtx, sa));
1120 }
1121
1122 /* Invoke emit_stack_save on the nonlocal_goto_save_area for the current
1123 function. This should be called whenever we allocate or deallocate
1124 dynamic stack space. */
1125
1126 void
1127 update_nonlocal_goto_save_area (void)
1128 {
1129 tree t_save;
1130 rtx r_save;
1131
1132 /* The nonlocal_goto_save_area object is an array of N pointers. The
1133 first one is used for the frame pointer save; the rest are sized by
1134 STACK_SAVEAREA_MODE. Create a reference to array index 1, the first
1135 of the stack save area slots. */
1136 t_save = build4 (ARRAY_REF,
1137 TREE_TYPE (TREE_TYPE (cfun->nonlocal_goto_save_area)),
1138 cfun->nonlocal_goto_save_area,
1139 integer_one_node, NULL_TREE, NULL_TREE);
1140 r_save = expand_expr (t_save, NULL_RTX, VOIDmode, EXPAND_WRITE);
1141
1142 emit_stack_save (SAVE_NONLOCAL, &r_save);
1143 }
1144
1145 /* Record a new stack level for the current function. This should be called
1146 whenever we allocate or deallocate dynamic stack space. */
1147
1148 void
1149 record_new_stack_level (void)
1150 {
1151 /* Record the new stack level for nonlocal gotos. */
1152 if (cfun->nonlocal_goto_save_area)
1153 update_nonlocal_goto_save_area ();
1154
1155 /* Record the new stack level for SJLJ exceptions. */
1156 if (targetm_common.except_unwind_info (&global_options) == UI_SJLJ)
1157 update_sjlj_context ();
1158 }
1159 \f
1160 /* Return an rtx doing runtime alignment to REQUIRED_ALIGN on TARGET. */
1161 static rtx
1162 align_dynamic_address (rtx target, unsigned required_align)
1163 {
1164 /* CEIL_DIV_EXPR needs to worry about the addition overflowing,
1165 but we know it can't. So add ourselves and then do
1166 TRUNC_DIV_EXPR. */
1167 target = expand_binop (Pmode, add_optab, target,
1168 gen_int_mode (required_align / BITS_PER_UNIT - 1,
1169 Pmode),
1170 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1171 target = expand_divmod (0, TRUNC_DIV_EXPR, Pmode, target,
1172 gen_int_mode (required_align / BITS_PER_UNIT,
1173 Pmode),
1174 NULL_RTX, 1);
1175 target = expand_mult (Pmode, target,
1176 gen_int_mode (required_align / BITS_PER_UNIT,
1177 Pmode),
1178 NULL_RTX, 1);
1179
1180 return target;
1181 }
1182
1183 /* Return an rtx through *PSIZE, representing the size of an area of memory to
1184 be dynamically pushed on the stack.
1185
1186 *PSIZE is an rtx representing the size of the area.
1187
1188 SIZE_ALIGN is the alignment (in bits) that we know SIZE has. This
1189 parameter may be zero. If so, a proper value will be extracted
1190 from SIZE if it is constant, otherwise BITS_PER_UNIT will be assumed.
1191
1192 REQUIRED_ALIGN is the alignment (in bits) required for the region
1193 of memory.
1194
1195 If PSTACK_USAGE_SIZE is not NULL it points to a value that is increased for
1196 the additional size returned. */
1197 void
1198 get_dynamic_stack_size (rtx *psize, unsigned size_align,
1199 unsigned required_align,
1200 HOST_WIDE_INT *pstack_usage_size)
1201 {
1202 unsigned extra = 0;
1203 rtx size = *psize;
1204
1205 /* Ensure the size is in the proper mode. */
1206 if (GET_MODE (size) != VOIDmode && GET_MODE (size) != Pmode)
1207 size = convert_to_mode (Pmode, size, 1);
1208
1209 if (CONST_INT_P (size))
1210 {
1211 unsigned HOST_WIDE_INT lsb;
1212
1213 lsb = INTVAL (size);
1214 lsb &= -lsb;
1215
1216 /* Watch out for overflow truncating to "unsigned". */
1217 if (lsb > UINT_MAX / BITS_PER_UNIT)
1218 size_align = 1u << (HOST_BITS_PER_INT - 1);
1219 else
1220 size_align = (unsigned)lsb * BITS_PER_UNIT;
1221 }
1222 else if (size_align < BITS_PER_UNIT)
1223 size_align = BITS_PER_UNIT;
1224
1225 /* We can't attempt to minimize alignment necessary, because we don't
1226 know the final value of preferred_stack_boundary yet while executing
1227 this code. */
1228 if (crtl->preferred_stack_boundary < PREFERRED_STACK_BOUNDARY)
1229 crtl->preferred_stack_boundary = PREFERRED_STACK_BOUNDARY;
1230
1231 /* We will need to ensure that the address we return is aligned to
1232 REQUIRED_ALIGN. At this point in the compilation, we don't always
1233 know the final value of the STACK_DYNAMIC_OFFSET used in function.c
1234 (it might depend on the size of the outgoing parameter lists, for
1235 example), so we must preventively align the value. We leave space
1236 in SIZE for the hole that might result from the alignment operation. */
1237
1238 extra = (required_align - BITS_PER_UNIT) / BITS_PER_UNIT;
1239 size = plus_constant (Pmode, size, extra);
1240 size = force_operand (size, NULL_RTX);
1241
1242 if (flag_stack_usage_info && pstack_usage_size)
1243 *pstack_usage_size += extra;
1244
1245 if (extra && size_align > BITS_PER_UNIT)
1246 size_align = BITS_PER_UNIT;
1247
1248 /* Round the size to a multiple of the required stack alignment.
1249 Since the stack is presumed to be rounded before this allocation,
1250 this will maintain the required alignment.
1251
1252 If the stack grows downward, we could save an insn by subtracting
1253 SIZE from the stack pointer and then aligning the stack pointer.
1254 The problem with this is that the stack pointer may be unaligned
1255 between the execution of the subtraction and alignment insns and
1256 some machines do not allow this. Even on those that do, some
1257 signal handlers malfunction if a signal should occur between those
1258 insns. Since this is an extremely rare event, we have no reliable
1259 way of knowing which systems have this problem. So we avoid even
1260 momentarily mis-aligning the stack. */
1261 if (size_align % MAX_SUPPORTED_STACK_ALIGNMENT != 0)
1262 {
1263 size = round_push (size);
1264
1265 if (flag_stack_usage_info && pstack_usage_size)
1266 {
1267 int align = crtl->preferred_stack_boundary / BITS_PER_UNIT;
1268 *pstack_usage_size =
1269 (*pstack_usage_size + align - 1) / align * align;
1270 }
1271 }
1272
1273 *psize = size;
1274 }
1275
1276 /* Return an rtx representing the address of an area of memory dynamically
1277 pushed on the stack.
1278
1279 Any required stack pointer alignment is preserved.
1280
1281 SIZE is an rtx representing the size of the area.
1282
1283 SIZE_ALIGN is the alignment (in bits) that we know SIZE has. This
1284 parameter may be zero. If so, a proper value will be extracted
1285 from SIZE if it is constant, otherwise BITS_PER_UNIT will be assumed.
1286
1287 REQUIRED_ALIGN is the alignment (in bits) required for the region
1288 of memory.
1289
1290 If CANNOT_ACCUMULATE is set to TRUE, the caller guarantees that the
1291 stack space allocated by the generated code cannot be added with itself
1292 in the course of the execution of the function. It is always safe to
1293 pass FALSE here and the following criterion is sufficient in order to
1294 pass TRUE: every path in the CFG that starts at the allocation point and
1295 loops to it executes the associated deallocation code. */
1296
1297 rtx
1298 allocate_dynamic_stack_space (rtx size, unsigned size_align,
1299 unsigned required_align, bool cannot_accumulate)
1300 {
1301 HOST_WIDE_INT stack_usage_size = -1;
1302 rtx_code_label *final_label;
1303 rtx final_target, target;
1304
1305 /* If we're asking for zero bytes, it doesn't matter what we point
1306 to since we can't dereference it. But return a reasonable
1307 address anyway. */
1308 if (size == const0_rtx)
1309 return virtual_stack_dynamic_rtx;
1310
1311 /* Otherwise, show we're calling alloca or equivalent. */
1312 cfun->calls_alloca = 1;
1313
1314 /* If stack usage info is requested, look into the size we are passed.
1315 We need to do so this early to avoid the obfuscation that may be
1316 introduced later by the various alignment operations. */
1317 if (flag_stack_usage_info)
1318 {
1319 if (CONST_INT_P (size))
1320 stack_usage_size = INTVAL (size);
1321 else if (REG_P (size))
1322 {
1323 /* Look into the last emitted insn and see if we can deduce
1324 something for the register. */
1325 rtx_insn *insn;
1326 rtx set, note;
1327 insn = get_last_insn ();
1328 if ((set = single_set (insn)) && rtx_equal_p (SET_DEST (set), size))
1329 {
1330 if (CONST_INT_P (SET_SRC (set)))
1331 stack_usage_size = INTVAL (SET_SRC (set));
1332 else if ((note = find_reg_equal_equiv_note (insn))
1333 && CONST_INT_P (XEXP (note, 0)))
1334 stack_usage_size = INTVAL (XEXP (note, 0));
1335 }
1336 }
1337
1338 /* If the size is not constant, we can't say anything. */
1339 if (stack_usage_size == -1)
1340 {
1341 current_function_has_unbounded_dynamic_stack_size = 1;
1342 stack_usage_size = 0;
1343 }
1344 }
1345
1346 get_dynamic_stack_size (&size, size_align, required_align, &stack_usage_size);
1347
1348 target = gen_reg_rtx (Pmode);
1349
1350 /* The size is supposed to be fully adjusted at this point so record it
1351 if stack usage info is requested. */
1352 if (flag_stack_usage_info)
1353 {
1354 current_function_dynamic_stack_size += stack_usage_size;
1355
1356 /* ??? This is gross but the only safe stance in the absence
1357 of stack usage oriented flow analysis. */
1358 if (!cannot_accumulate)
1359 current_function_has_unbounded_dynamic_stack_size = 1;
1360 }
1361
1362 do_pending_stack_adjust ();
1363
1364 final_label = NULL;
1365 final_target = NULL_RTX;
1366
1367 /* If we are splitting the stack, we need to ask the backend whether
1368 there is enough room on the current stack. If there isn't, or if
1369 the backend doesn't know how to tell is, then we need to call a
1370 function to allocate memory in some other way. This memory will
1371 be released when we release the current stack segment. The
1372 effect is that stack allocation becomes less efficient, but at
1373 least it doesn't cause a stack overflow. */
1374 if (flag_split_stack)
1375 {
1376 rtx_code_label *available_label;
1377 rtx ask, space, func;
1378
1379 available_label = NULL;
1380
1381 if (targetm.have_split_stack_space_check ())
1382 {
1383 available_label = gen_label_rtx ();
1384
1385 /* This instruction will branch to AVAILABLE_LABEL if there
1386 are SIZE bytes available on the stack. */
1387 emit_insn (targetm.gen_split_stack_space_check
1388 (size, available_label));
1389 }
1390
1391 /* The __morestack_allocate_stack_space function will allocate
1392 memory using malloc. If the alignment of the memory returned
1393 by malloc does not meet REQUIRED_ALIGN, we increase SIZE to
1394 make sure we allocate enough space. */
1395 if (MALLOC_ABI_ALIGNMENT >= required_align)
1396 ask = size;
1397 else
1398 ask = expand_binop (Pmode, add_optab, size,
1399 gen_int_mode (required_align / BITS_PER_UNIT - 1,
1400 Pmode),
1401 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1402
1403 func = init_one_libfunc ("__morestack_allocate_stack_space");
1404
1405 space = emit_library_call_value (func, target, LCT_NORMAL, Pmode,
1406 1, ask, Pmode);
1407
1408 if (available_label == NULL_RTX)
1409 return space;
1410
1411 final_target = gen_reg_rtx (Pmode);
1412
1413 emit_move_insn (final_target, space);
1414
1415 final_label = gen_label_rtx ();
1416 emit_jump (final_label);
1417
1418 emit_label (available_label);
1419 }
1420
1421 /* We ought to be called always on the toplevel and stack ought to be aligned
1422 properly. */
1423 gcc_assert (!(stack_pointer_delta
1424 % (PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT)));
1425
1426 /* If needed, check that we have the required amount of stack. Take into
1427 account what has already been checked. */
1428 if (STACK_CHECK_MOVING_SP)
1429 ;
1430 else if (flag_stack_check == GENERIC_STACK_CHECK)
1431 probe_stack_range (STACK_OLD_CHECK_PROTECT + STACK_CHECK_MAX_FRAME_SIZE,
1432 size);
1433 else if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
1434 probe_stack_range (STACK_CHECK_PROTECT, size);
1435
1436 /* Don't let anti_adjust_stack emit notes. */
1437 suppress_reg_args_size = true;
1438
1439 /* Perform the required allocation from the stack. Some systems do
1440 this differently than simply incrementing/decrementing from the
1441 stack pointer, such as acquiring the space by calling malloc(). */
1442 if (targetm.have_allocate_stack ())
1443 {
1444 struct expand_operand ops[2];
1445 /* We don't have to check against the predicate for operand 0 since
1446 TARGET is known to be a pseudo of the proper mode, which must
1447 be valid for the operand. */
1448 create_fixed_operand (&ops[0], target);
1449 create_convert_operand_to (&ops[1], size, STACK_SIZE_MODE, true);
1450 expand_insn (targetm.code_for_allocate_stack, 2, ops);
1451 }
1452 else
1453 {
1454 int saved_stack_pointer_delta;
1455
1456 if (!STACK_GROWS_DOWNWARD)
1457 emit_move_insn (target, virtual_stack_dynamic_rtx);
1458
1459 /* Check stack bounds if necessary. */
1460 if (crtl->limit_stack)
1461 {
1462 rtx available;
1463 rtx_code_label *space_available = gen_label_rtx ();
1464 if (STACK_GROWS_DOWNWARD)
1465 available = expand_binop (Pmode, sub_optab,
1466 stack_pointer_rtx, stack_limit_rtx,
1467 NULL_RTX, 1, OPTAB_WIDEN);
1468 else
1469 available = expand_binop (Pmode, sub_optab,
1470 stack_limit_rtx, stack_pointer_rtx,
1471 NULL_RTX, 1, OPTAB_WIDEN);
1472
1473 emit_cmp_and_jump_insns (available, size, GEU, NULL_RTX, Pmode, 1,
1474 space_available);
1475 if (targetm.have_trap ())
1476 emit_insn (targetm.gen_trap ());
1477 else
1478 error ("stack limits not supported on this target");
1479 emit_barrier ();
1480 emit_label (space_available);
1481 }
1482
1483 saved_stack_pointer_delta = stack_pointer_delta;
1484
1485 if (flag_stack_check && STACK_CHECK_MOVING_SP)
1486 anti_adjust_stack_and_probe (size, false);
1487 else
1488 anti_adjust_stack (size);
1489
1490 /* Even if size is constant, don't modify stack_pointer_delta.
1491 The constant size alloca should preserve
1492 crtl->preferred_stack_boundary alignment. */
1493 stack_pointer_delta = saved_stack_pointer_delta;
1494
1495 if (STACK_GROWS_DOWNWARD)
1496 emit_move_insn (target, virtual_stack_dynamic_rtx);
1497 }
1498
1499 suppress_reg_args_size = false;
1500
1501 /* Finish up the split stack handling. */
1502 if (final_label != NULL_RTX)
1503 {
1504 gcc_assert (flag_split_stack);
1505 emit_move_insn (final_target, target);
1506 emit_label (final_label);
1507 target = final_target;
1508 }
1509
1510 target = align_dynamic_address (target, required_align);
1511
1512 /* Now that we've committed to a return value, mark its alignment. */
1513 mark_reg_pointer (target, required_align);
1514
1515 /* Record the new stack level. */
1516 record_new_stack_level ();
1517
1518 return target;
1519 }
1520
1521 /* Return an rtx representing the address of an area of memory already
1522 statically pushed onto the stack in the virtual stack vars area. (It is
1523 assumed that the area is allocated in the function prologue.)
1524
1525 Any required stack pointer alignment is preserved.
1526
1527 OFFSET is the offset of the area into the virtual stack vars area.
1528
1529 REQUIRED_ALIGN is the alignment (in bits) required for the region
1530 of memory. */
1531
1532 rtx
1533 get_dynamic_stack_base (HOST_WIDE_INT offset, unsigned required_align)
1534 {
1535 rtx target;
1536
1537 if (crtl->preferred_stack_boundary < PREFERRED_STACK_BOUNDARY)
1538 crtl->preferred_stack_boundary = PREFERRED_STACK_BOUNDARY;
1539
1540 target = gen_reg_rtx (Pmode);
1541 emit_move_insn (target, virtual_stack_vars_rtx);
1542 target = expand_binop (Pmode, add_optab, target,
1543 gen_int_mode (offset, Pmode),
1544 NULL_RTX, 1, OPTAB_LIB_WIDEN);
1545 target = align_dynamic_address (target, required_align);
1546
1547 /* Now that we've committed to a return value, mark its alignment. */
1548 mark_reg_pointer (target, required_align);
1549
1550 return target;
1551 }
1552 \f
1553 /* A front end may want to override GCC's stack checking by providing a
1554 run-time routine to call to check the stack, so provide a mechanism for
1555 calling that routine. */
1556
1557 static GTY(()) rtx stack_check_libfunc;
1558
1559 void
1560 set_stack_check_libfunc (const char *libfunc_name)
1561 {
1562 gcc_assert (stack_check_libfunc == NULL_RTX);
1563 stack_check_libfunc = gen_rtx_SYMBOL_REF (Pmode, libfunc_name);
1564 }
1565 \f
1566 /* Emit one stack probe at ADDRESS, an address within the stack. */
1567
1568 void
1569 emit_stack_probe (rtx address)
1570 {
1571 if (targetm.have_probe_stack_address ())
1572 emit_insn (targetm.gen_probe_stack_address (address));
1573 else
1574 {
1575 rtx memref = gen_rtx_MEM (word_mode, address);
1576
1577 MEM_VOLATILE_P (memref) = 1;
1578
1579 /* See if we have an insn to probe the stack. */
1580 if (targetm.have_probe_stack ())
1581 emit_insn (targetm.gen_probe_stack (memref));
1582 else
1583 emit_move_insn (memref, const0_rtx);
1584 }
1585 }
1586
1587 /* Probe a range of stack addresses from FIRST to FIRST+SIZE, inclusive.
1588 FIRST is a constant and size is a Pmode RTX. These are offsets from
1589 the current stack pointer. STACK_GROWS_DOWNWARD says whether to add
1590 or subtract them from the stack pointer. */
1591
1592 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
1593
1594 #if STACK_GROWS_DOWNWARD
1595 #define STACK_GROW_OP MINUS
1596 #define STACK_GROW_OPTAB sub_optab
1597 #define STACK_GROW_OFF(off) -(off)
1598 #else
1599 #define STACK_GROW_OP PLUS
1600 #define STACK_GROW_OPTAB add_optab
1601 #define STACK_GROW_OFF(off) (off)
1602 #endif
1603
1604 void
1605 probe_stack_range (HOST_WIDE_INT first, rtx size)
1606 {
1607 /* First ensure SIZE is Pmode. */
1608 if (GET_MODE (size) != VOIDmode && GET_MODE (size) != Pmode)
1609 size = convert_to_mode (Pmode, size, 1);
1610
1611 /* Next see if we have a function to check the stack. */
1612 if (stack_check_libfunc)
1613 {
1614 rtx addr = memory_address (Pmode,
1615 gen_rtx_fmt_ee (STACK_GROW_OP, Pmode,
1616 stack_pointer_rtx,
1617 plus_constant (Pmode,
1618 size, first)));
1619 emit_library_call (stack_check_libfunc, LCT_THROW, VOIDmode, 1, addr,
1620 Pmode);
1621 }
1622
1623 /* Next see if we have an insn to check the stack. */
1624 else if (targetm.have_check_stack ())
1625 {
1626 struct expand_operand ops[1];
1627 rtx addr = memory_address (Pmode,
1628 gen_rtx_fmt_ee (STACK_GROW_OP, Pmode,
1629 stack_pointer_rtx,
1630 plus_constant (Pmode,
1631 size, first)));
1632 bool success;
1633 create_input_operand (&ops[0], addr, Pmode);
1634 success = maybe_expand_insn (targetm.code_for_check_stack, 1, ops);
1635 gcc_assert (success);
1636 }
1637
1638 /* Otherwise we have to generate explicit probes. If we have a constant
1639 small number of them to generate, that's the easy case. */
1640 else if (CONST_INT_P (size) && INTVAL (size) < 7 * PROBE_INTERVAL)
1641 {
1642 HOST_WIDE_INT isize = INTVAL (size), i;
1643 rtx addr;
1644
1645 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
1646 it exceeds SIZE. If only one probe is needed, this will not
1647 generate any code. Then probe at FIRST + SIZE. */
1648 for (i = PROBE_INTERVAL; i < isize; i += PROBE_INTERVAL)
1649 {
1650 addr = memory_address (Pmode,
1651 plus_constant (Pmode, stack_pointer_rtx,
1652 STACK_GROW_OFF (first + i)));
1653 emit_stack_probe (addr);
1654 }
1655
1656 addr = memory_address (Pmode,
1657 plus_constant (Pmode, stack_pointer_rtx,
1658 STACK_GROW_OFF (first + isize)));
1659 emit_stack_probe (addr);
1660 }
1661
1662 /* In the variable case, do the same as above, but in a loop. Note that we
1663 must be extra careful with variables wrapping around because we might be
1664 at the very top (or the very bottom) of the address space and we have to
1665 be able to handle this case properly; in particular, we use an equality
1666 test for the loop condition. */
1667 else
1668 {
1669 rtx rounded_size, rounded_size_op, test_addr, last_addr, temp;
1670 rtx_code_label *loop_lab = gen_label_rtx ();
1671 rtx_code_label *end_lab = gen_label_rtx ();
1672
1673 /* Step 1: round SIZE to the previous multiple of the interval. */
1674
1675 /* ROUNDED_SIZE = SIZE & -PROBE_INTERVAL */
1676 rounded_size
1677 = simplify_gen_binary (AND, Pmode, size,
1678 gen_int_mode (-PROBE_INTERVAL, Pmode));
1679 rounded_size_op = force_operand (rounded_size, NULL_RTX);
1680
1681
1682 /* Step 2: compute initial and final value of the loop counter. */
1683
1684 /* TEST_ADDR = SP + FIRST. */
1685 test_addr = force_operand (gen_rtx_fmt_ee (STACK_GROW_OP, Pmode,
1686 stack_pointer_rtx,
1687 gen_int_mode (first, Pmode)),
1688 NULL_RTX);
1689
1690 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
1691 last_addr = force_operand (gen_rtx_fmt_ee (STACK_GROW_OP, Pmode,
1692 test_addr,
1693 rounded_size_op), NULL_RTX);
1694
1695
1696 /* Step 3: the loop
1697
1698 while (TEST_ADDR != LAST_ADDR)
1699 {
1700 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
1701 probe at TEST_ADDR
1702 }
1703
1704 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
1705 until it is equal to ROUNDED_SIZE. */
1706
1707 emit_label (loop_lab);
1708
1709 /* Jump to END_LAB if TEST_ADDR == LAST_ADDR. */
1710 emit_cmp_and_jump_insns (test_addr, last_addr, EQ, NULL_RTX, Pmode, 1,
1711 end_lab);
1712
1713 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
1714 temp = expand_binop (Pmode, STACK_GROW_OPTAB, test_addr,
1715 gen_int_mode (PROBE_INTERVAL, Pmode), test_addr,
1716 1, OPTAB_WIDEN);
1717
1718 gcc_assert (temp == test_addr);
1719
1720 /* Probe at TEST_ADDR. */
1721 emit_stack_probe (test_addr);
1722
1723 emit_jump (loop_lab);
1724
1725 emit_label (end_lab);
1726
1727
1728 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
1729 that SIZE is equal to ROUNDED_SIZE. */
1730
1731 /* TEMP = SIZE - ROUNDED_SIZE. */
1732 temp = simplify_gen_binary (MINUS, Pmode, size, rounded_size);
1733 if (temp != const0_rtx)
1734 {
1735 rtx addr;
1736
1737 if (CONST_INT_P (temp))
1738 {
1739 /* Use [base + disp} addressing mode if supported. */
1740 HOST_WIDE_INT offset = INTVAL (temp);
1741 addr = memory_address (Pmode,
1742 plus_constant (Pmode, last_addr,
1743 STACK_GROW_OFF (offset)));
1744 }
1745 else
1746 {
1747 /* Manual CSE if the difference is not known at compile-time. */
1748 temp = gen_rtx_MINUS (Pmode, size, rounded_size_op);
1749 addr = memory_address (Pmode,
1750 gen_rtx_fmt_ee (STACK_GROW_OP, Pmode,
1751 last_addr, temp));
1752 }
1753
1754 emit_stack_probe (addr);
1755 }
1756 }
1757
1758 /* Make sure nothing is scheduled before we are done. */
1759 emit_insn (gen_blockage ());
1760 }
1761
1762 /* Adjust the stack pointer by minus SIZE (an rtx for a number of bytes)
1763 while probing it. This pushes when SIZE is positive. SIZE need not
1764 be constant. If ADJUST_BACK is true, adjust back the stack pointer
1765 by plus SIZE at the end. */
1766
1767 void
1768 anti_adjust_stack_and_probe (rtx size, bool adjust_back)
1769 {
1770 /* We skip the probe for the first interval + a small dope of 4 words and
1771 probe that many bytes past the specified size to maintain a protection
1772 area at the botton of the stack. */
1773 const int dope = 4 * UNITS_PER_WORD;
1774
1775 /* First ensure SIZE is Pmode. */
1776 if (GET_MODE (size) != VOIDmode && GET_MODE (size) != Pmode)
1777 size = convert_to_mode (Pmode, size, 1);
1778
1779 /* If we have a constant small number of probes to generate, that's the
1780 easy case. */
1781 if (CONST_INT_P (size) && INTVAL (size) < 7 * PROBE_INTERVAL)
1782 {
1783 HOST_WIDE_INT isize = INTVAL (size), i;
1784 bool first_probe = true;
1785
1786 /* Adjust SP and probe at PROBE_INTERVAL + N * PROBE_INTERVAL for
1787 values of N from 1 until it exceeds SIZE. If only one probe is
1788 needed, this will not generate any code. Then adjust and probe
1789 to PROBE_INTERVAL + SIZE. */
1790 for (i = PROBE_INTERVAL; i < isize; i += PROBE_INTERVAL)
1791 {
1792 if (first_probe)
1793 {
1794 anti_adjust_stack (GEN_INT (2 * PROBE_INTERVAL + dope));
1795 first_probe = false;
1796 }
1797 else
1798 anti_adjust_stack (GEN_INT (PROBE_INTERVAL));
1799 emit_stack_probe (stack_pointer_rtx);
1800 }
1801
1802 if (first_probe)
1803 anti_adjust_stack (plus_constant (Pmode, size, PROBE_INTERVAL + dope));
1804 else
1805 anti_adjust_stack (plus_constant (Pmode, size, PROBE_INTERVAL - i));
1806 emit_stack_probe (stack_pointer_rtx);
1807 }
1808
1809 /* In the variable case, do the same as above, but in a loop. Note that we
1810 must be extra careful with variables wrapping around because we might be
1811 at the very top (or the very bottom) of the address space and we have to
1812 be able to handle this case properly; in particular, we use an equality
1813 test for the loop condition. */
1814 else
1815 {
1816 rtx rounded_size, rounded_size_op, last_addr, temp;
1817 rtx_code_label *loop_lab = gen_label_rtx ();
1818 rtx_code_label *end_lab = gen_label_rtx ();
1819
1820
1821 /* Step 1: round SIZE to the previous multiple of the interval. */
1822
1823 /* ROUNDED_SIZE = SIZE & -PROBE_INTERVAL */
1824 rounded_size
1825 = simplify_gen_binary (AND, Pmode, size,
1826 gen_int_mode (-PROBE_INTERVAL, Pmode));
1827 rounded_size_op = force_operand (rounded_size, NULL_RTX);
1828
1829
1830 /* Step 2: compute initial and final value of the loop counter. */
1831
1832 /* SP = SP_0 + PROBE_INTERVAL. */
1833 anti_adjust_stack (GEN_INT (PROBE_INTERVAL + dope));
1834
1835 /* LAST_ADDR = SP_0 + PROBE_INTERVAL + ROUNDED_SIZE. */
1836 last_addr = force_operand (gen_rtx_fmt_ee (STACK_GROW_OP, Pmode,
1837 stack_pointer_rtx,
1838 rounded_size_op), NULL_RTX);
1839
1840
1841 /* Step 3: the loop
1842
1843 while (SP != LAST_ADDR)
1844 {
1845 SP = SP + PROBE_INTERVAL
1846 probe at SP
1847 }
1848
1849 adjusts SP and probes at PROBE_INTERVAL + N * PROBE_INTERVAL for
1850 values of N from 1 until it is equal to ROUNDED_SIZE. */
1851
1852 emit_label (loop_lab);
1853
1854 /* Jump to END_LAB if SP == LAST_ADDR. */
1855 emit_cmp_and_jump_insns (stack_pointer_rtx, last_addr, EQ, NULL_RTX,
1856 Pmode, 1, end_lab);
1857
1858 /* SP = SP + PROBE_INTERVAL and probe at SP. */
1859 anti_adjust_stack (GEN_INT (PROBE_INTERVAL));
1860 emit_stack_probe (stack_pointer_rtx);
1861
1862 emit_jump (loop_lab);
1863
1864 emit_label (end_lab);
1865
1866
1867 /* Step 4: adjust SP and probe at PROBE_INTERVAL + SIZE if we cannot
1868 assert at compile-time that SIZE is equal to ROUNDED_SIZE. */
1869
1870 /* TEMP = SIZE - ROUNDED_SIZE. */
1871 temp = simplify_gen_binary (MINUS, Pmode, size, rounded_size);
1872 if (temp != const0_rtx)
1873 {
1874 /* Manual CSE if the difference is not known at compile-time. */
1875 if (GET_CODE (temp) != CONST_INT)
1876 temp = gen_rtx_MINUS (Pmode, size, rounded_size_op);
1877 anti_adjust_stack (temp);
1878 emit_stack_probe (stack_pointer_rtx);
1879 }
1880 }
1881
1882 /* Adjust back and account for the additional first interval. */
1883 if (adjust_back)
1884 adjust_stack (plus_constant (Pmode, size, PROBE_INTERVAL + dope));
1885 else
1886 adjust_stack (GEN_INT (PROBE_INTERVAL + dope));
1887 }
1888
1889 /* Return an rtx representing the register or memory location
1890 in which a scalar value of data type VALTYPE
1891 was returned by a function call to function FUNC.
1892 FUNC is a FUNCTION_DECL, FNTYPE a FUNCTION_TYPE node if the precise
1893 function is known, otherwise 0.
1894 OUTGOING is 1 if on a machine with register windows this function
1895 should return the register in which the function will put its result
1896 and 0 otherwise. */
1897
1898 rtx
1899 hard_function_value (const_tree valtype, const_tree func, const_tree fntype,
1900 int outgoing ATTRIBUTE_UNUSED)
1901 {
1902 rtx val;
1903
1904 val = targetm.calls.function_value (valtype, func ? func : fntype, outgoing);
1905
1906 if (REG_P (val)
1907 && GET_MODE (val) == BLKmode)
1908 {
1909 unsigned HOST_WIDE_INT bytes = int_size_in_bytes (valtype);
1910 opt_scalar_int_mode tmpmode;
1911
1912 /* int_size_in_bytes can return -1. We don't need a check here
1913 since the value of bytes will then be large enough that no
1914 mode will match anyway. */
1915
1916 FOR_EACH_MODE_IN_CLASS (tmpmode, MODE_INT)
1917 {
1918 /* Have we found a large enough mode? */
1919 if (GET_MODE_SIZE (tmpmode.require ()) >= bytes)
1920 break;
1921 }
1922
1923 PUT_MODE (val, tmpmode.require ());
1924 }
1925 return val;
1926 }
1927
1928 /* Return an rtx representing the register or memory location
1929 in which a scalar value of mode MODE was returned by a library call. */
1930
1931 rtx
1932 hard_libcall_value (machine_mode mode, rtx fun)
1933 {
1934 return targetm.calls.libcall_value (mode, fun);
1935 }
1936
1937 /* Look up the tree code for a given rtx code
1938 to provide the arithmetic operation for real_arithmetic.
1939 The function returns an int because the caller may not know
1940 what `enum tree_code' means. */
1941
1942 int
1943 rtx_to_tree_code (enum rtx_code code)
1944 {
1945 enum tree_code tcode;
1946
1947 switch (code)
1948 {
1949 case PLUS:
1950 tcode = PLUS_EXPR;
1951 break;
1952 case MINUS:
1953 tcode = MINUS_EXPR;
1954 break;
1955 case MULT:
1956 tcode = MULT_EXPR;
1957 break;
1958 case DIV:
1959 tcode = RDIV_EXPR;
1960 break;
1961 case SMIN:
1962 tcode = MIN_EXPR;
1963 break;
1964 case SMAX:
1965 tcode = MAX_EXPR;
1966 break;
1967 default:
1968 tcode = LAST_AND_UNUSED_TREE_CODE;
1969 break;
1970 }
1971 return ((int) tcode);
1972 }
1973
1974 #include "gt-explow.h"